Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.393
      1 /*	$NetBSD: if_wm.c,v 1.393 2016/05/06 08:57:43 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.393 2016/05/06 08:57:43 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 
    107 #include <sys/rndsource.h>
    108 
    109 #include <net/if.h>
    110 #include <net/if_dl.h>
    111 #include <net/if_media.h>
    112 #include <net/if_ether.h>
    113 
    114 #include <net/bpf.h>
    115 
    116 #include <netinet/in.h>			/* XXX for struct ip */
    117 #include <netinet/in_systm.h>		/* XXX for struct ip */
    118 #include <netinet/ip.h>			/* XXX for struct ip */
    119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    121 
    122 #include <sys/bus.h>
    123 #include <sys/intr.h>
    124 #include <machine/endian.h>
    125 
    126 #include <dev/mii/mii.h>
    127 #include <dev/mii/miivar.h>
    128 #include <dev/mii/miidevs.h>
    129 #include <dev/mii/mii_bitbang.h>
    130 #include <dev/mii/ikphyreg.h>
    131 #include <dev/mii/igphyreg.h>
    132 #include <dev/mii/igphyvar.h>
    133 #include <dev/mii/inbmphyreg.h>
    134 
    135 #include <dev/pci/pcireg.h>
    136 #include <dev/pci/pcivar.h>
    137 #include <dev/pci/pcidevs.h>
    138 
    139 #include <dev/pci/if_wmreg.h>
    140 #include <dev/pci/if_wmvar.h>
    141 
    142 #ifdef WM_DEBUG
    143 #define	WM_DEBUG_LINK		0x01
    144 #define	WM_DEBUG_TX		0x02
    145 #define	WM_DEBUG_RX		0x04
    146 #define	WM_DEBUG_GMII		0x08
    147 #define	WM_DEBUG_MANAGE		0x10
    148 #define	WM_DEBUG_NVM		0x20
    149 #define	WM_DEBUG_INIT		0x40
    150 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    151     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
    152 
    153 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    154 #else
    155 #define	DPRINTF(x, y)	/* nothing */
    156 #endif /* WM_DEBUG */
    157 
    158 #ifdef NET_MPSAFE
    159 #define WM_MPSAFE	1
    160 #endif
    161 
    162 /*
    163  * This device driver's max interrupt numbers.
    164  */
    165 #define WM_MAX_NTXINTR		16
    166 #define WM_MAX_NRXINTR		16
    167 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
    168 
    169 /*
    170  * Transmit descriptor list size.  Due to errata, we can only have
    171  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    172  * on >= 82544.  We tell the upper layers that they can queue a lot
    173  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    174  * of them at a time.
    175  *
    176  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    177  * chains containing many small mbufs have been observed in zero-copy
    178  * situations with jumbo frames.
    179  */
    180 #define	WM_NTXSEGS		256
    181 #define	WM_IFQUEUELEN		256
    182 #define	WM_TXQUEUELEN_MAX	64
    183 #define	WM_TXQUEUELEN_MAX_82547	16
    184 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    185 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    186 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    187 #define	WM_NTXDESC_82542	256
    188 #define	WM_NTXDESC_82544	4096
    189 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    190 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    191 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
    192 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    193 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    194 
    195 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    196 
    197 /*
    198  * Receive descriptor list size.  We have one Rx buffer for normal
    199  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    200  * packet.  We allocate 256 receive descriptors, each with a 2k
    201  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    202  */
    203 #define	WM_NRXDESC		256
    204 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    205 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    206 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    207 
    208 typedef union txdescs {
    209 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    210 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    211 } txdescs_t;
    212 
    213 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
    214 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    215 
    216 /*
    217  * Software state for transmit jobs.
    218  */
    219 struct wm_txsoft {
    220 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    221 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    222 	int txs_firstdesc;		/* first descriptor in packet */
    223 	int txs_lastdesc;		/* last descriptor in packet */
    224 	int txs_ndesc;			/* # of descriptors used */
    225 };
    226 
    227 /*
    228  * Software state for receive buffers.  Each descriptor gets a
    229  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    230  * more than one buffer, we chain them together.
    231  */
    232 struct wm_rxsoft {
    233 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    234 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    235 };
    236 
    237 #define WM_LINKUP_TIMEOUT	50
    238 
    239 static uint16_t swfwphysem[] = {
    240 	SWFW_PHY0_SM,
    241 	SWFW_PHY1_SM,
    242 	SWFW_PHY2_SM,
    243 	SWFW_PHY3_SM
    244 };
    245 
    246 static const uint32_t wm_82580_rxpbs_table[] = {
    247 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    248 };
    249 
    250 struct wm_softc;
    251 
    252 struct wm_txqueue {
    253 	kmutex_t *txq_lock;		/* lock for tx operations */
    254 
    255 	struct wm_softc *txq_sc;
    256 
    257 	int txq_id;			/* index of transmit queues */
    258 	int txq_intr_idx;		/* index of MSI-X tables */
    259 
    260 	/* Software state for the transmit descriptors. */
    261 	int txq_num;			/* must be a power of two */
    262 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    263 
    264 	/* TX control data structures. */
    265 	int txq_ndesc;			/* must be a power of two */
    266 	txdescs_t *txq_descs_u;
    267         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    268 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    269 	int txq_desc_rseg;		/* real number of control segment */
    270 	size_t txq_desc_size;		/* control data size */
    271 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    272 #define	txq_descs	txq_descs_u->sctxu_txdescs
    273 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    274 
    275 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    276 
    277 	int txq_free;			/* number of free Tx descriptors */
    278 	int txq_next;			/* next ready Tx descriptor */
    279 
    280 	int txq_sfree;			/* number of free Tx jobs */
    281 	int txq_snext;			/* next free Tx job */
    282 	int txq_sdirty;			/* dirty Tx jobs */
    283 
    284 	/* These 4 variables are used only on the 82547. */
    285 	int txq_fifo_size;		/* Tx FIFO size */
    286 	int txq_fifo_head;		/* current head of FIFO */
    287 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    288 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    289 
    290 	/* XXX which event counter is required? */
    291 };
    292 
    293 struct wm_rxqueue {
    294 	kmutex_t *rxq_lock;		/* lock for rx operations */
    295 
    296 	struct wm_softc *rxq_sc;
    297 
    298 	int rxq_id;			/* index of receive queues */
    299 	int rxq_intr_idx;		/* index of MSI-X tables */
    300 
    301 	/* Software state for the receive descriptors. */
    302 	wiseman_rxdesc_t *rxq_descs;
    303 
    304 	/* RX control data structures. */
    305 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    306 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    307 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    308 	int rxq_desc_rseg;		/* real number of control segment */
    309 	size_t rxq_desc_size;		/* control data size */
    310 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    311 
    312 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    313 
    314 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    315 	int rxq_discard;
    316 	int rxq_len;
    317 	struct mbuf *rxq_head;
    318 	struct mbuf *rxq_tail;
    319 	struct mbuf **rxq_tailp;
    320 
    321 	/* XXX which event counter is required? */
    322 };
    323 
    324 /*
    325  * Software state per device.
    326  */
    327 struct wm_softc {
    328 	device_t sc_dev;		/* generic device information */
    329 	bus_space_tag_t sc_st;		/* bus space tag */
    330 	bus_space_handle_t sc_sh;	/* bus space handle */
    331 	bus_size_t sc_ss;		/* bus space size */
    332 	bus_space_tag_t sc_iot;		/* I/O space tag */
    333 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    334 	bus_size_t sc_ios;		/* I/O space size */
    335 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    336 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    337 	bus_size_t sc_flashs;		/* flash registers space size */
    338 	off_t sc_flashreg_offset;	/*
    339 					 * offset to flash registers from
    340 					 * start of BAR
    341 					 */
    342 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    343 
    344 	struct ethercom sc_ethercom;	/* ethernet common data */
    345 	struct mii_data sc_mii;		/* MII/media information */
    346 
    347 	pci_chipset_tag_t sc_pc;
    348 	pcitag_t sc_pcitag;
    349 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    350 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    351 
    352 	uint16_t sc_pcidevid;		/* PCI device ID */
    353 	wm_chip_type sc_type;		/* MAC type */
    354 	int sc_rev;			/* MAC revision */
    355 	wm_phy_type sc_phytype;		/* PHY type */
    356 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    357 #define	WM_MEDIATYPE_UNKNOWN		0x00
    358 #define	WM_MEDIATYPE_FIBER		0x01
    359 #define	WM_MEDIATYPE_COPPER		0x02
    360 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    361 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    362 	int sc_flags;			/* flags; see below */
    363 	int sc_if_flags;		/* last if_flags */
    364 	int sc_flowflags;		/* 802.3x flow control flags */
    365 	int sc_align_tweak;
    366 
    367 	void *sc_ihs[WM_MAX_NINTR];	/*
    368 					 * interrupt cookie.
    369 					 * legacy and msi use sc_ihs[0].
    370 					 */
    371 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    372 	int sc_nintrs;			/* number of interrupts */
    373 
    374 	int sc_link_intr_idx;		/* index of MSI-X tables */
    375 
    376 	callout_t sc_tick_ch;		/* tick callout */
    377 	bool sc_stopping;
    378 
    379 	int sc_nvm_ver_major;
    380 	int sc_nvm_ver_minor;
    381 	int sc_nvm_ver_build;
    382 	int sc_nvm_addrbits;		/* NVM address bits */
    383 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    384 	int sc_ich8_flash_base;
    385 	int sc_ich8_flash_bank_size;
    386 	int sc_nvm_k1_enabled;
    387 
    388 	int sc_ntxqueues;
    389 	struct wm_txqueue *sc_txq;
    390 
    391 	int sc_nrxqueues;
    392 	struct wm_rxqueue *sc_rxq;
    393 
    394 #ifdef WM_EVENT_COUNTERS
    395 	/* Event counters. */
    396 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    397 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    398 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    399 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    400 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    401 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    402 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    403 
    404 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    405 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    406 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    407 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    408 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    409 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    410 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    411 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    412 
    413 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    414 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped(too many segs) */
    415 
    416 	struct evcnt sc_ev_tu;		/* Tx underrun */
    417 
    418 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    419 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    420 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    421 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    422 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    423 #endif /* WM_EVENT_COUNTERS */
    424 
    425 	/* This variable are used only on the 82547. */
    426 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    427 
    428 	uint32_t sc_ctrl;		/* prototype CTRL register */
    429 #if 0
    430 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    431 #endif
    432 	uint32_t sc_icr;		/* prototype interrupt bits */
    433 	uint32_t sc_itr;		/* prototype intr throttling reg */
    434 	uint32_t sc_tctl;		/* prototype TCTL register */
    435 	uint32_t sc_rctl;		/* prototype RCTL register */
    436 	uint32_t sc_txcw;		/* prototype TXCW register */
    437 	uint32_t sc_tipg;		/* prototype TIPG register */
    438 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    439 	uint32_t sc_pba;		/* prototype PBA register */
    440 
    441 	int sc_tbi_linkup;		/* TBI link status */
    442 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    443 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    444 
    445 	int sc_mchash_type;		/* multicast filter offset */
    446 
    447 	krndsource_t rnd_source;	/* random source */
    448 
    449 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    450 
    451 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    452 };
    453 
    454 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    455 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    456 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    457 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    458 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    459 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    460 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    461 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    462 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    463 
    464 #ifdef WM_MPSAFE
    465 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    466 #else
    467 #define CALLOUT_FLAGS	0
    468 #endif
    469 
    470 #define	WM_RXCHAIN_RESET(rxq)						\
    471 do {									\
    472 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    473 	*(rxq)->rxq_tailp = NULL;					\
    474 	(rxq)->rxq_len = 0;						\
    475 } while (/*CONSTCOND*/0)
    476 
    477 #define	WM_RXCHAIN_LINK(rxq, m)						\
    478 do {									\
    479 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    480 	(rxq)->rxq_tailp = &(m)->m_next;				\
    481 } while (/*CONSTCOND*/0)
    482 
    483 #ifdef WM_EVENT_COUNTERS
    484 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    485 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    486 #else
    487 #define	WM_EVCNT_INCR(ev)	/* nothing */
    488 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    489 #endif
    490 
    491 #define	CSR_READ(sc, reg)						\
    492 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    493 #define	CSR_WRITE(sc, reg, val)						\
    494 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    495 #define	CSR_WRITE_FLUSH(sc)						\
    496 	(void) CSR_READ((sc), WMREG_STATUS)
    497 
    498 #define ICH8_FLASH_READ32(sc, reg)					\
    499 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    500 	    (reg) + sc->sc_flashreg_offset)
    501 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    502 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    503 	    (reg) + sc->sc_flashreg_offset, (data))
    504 
    505 #define ICH8_FLASH_READ16(sc, reg)					\
    506 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    507 	    (reg) + sc->sc_flashreg_offset)
    508 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    509 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    510 	    (reg) + sc->sc_flashreg_offset, (data))
    511 
    512 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
    513 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    514 
    515 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    516 #define	WM_CDTXADDR_HI(txq, x)						\
    517 	(sizeof(bus_addr_t) == 8 ?					\
    518 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    519 
    520 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    521 #define	WM_CDRXADDR_HI(rxq, x)						\
    522 	(sizeof(bus_addr_t) == 8 ?					\
    523 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    524 
    525 /*
    526  * Register read/write functions.
    527  * Other than CSR_{READ|WRITE}().
    528  */
    529 #if 0
    530 static inline uint32_t wm_io_read(struct wm_softc *, int);
    531 #endif
    532 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    533 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    534 	uint32_t, uint32_t);
    535 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    536 
    537 /*
    538  * Descriptor sync/init functions.
    539  */
    540 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    541 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    542 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    543 
    544 /*
    545  * Device driver interface functions and commonly used functions.
    546  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    547  */
    548 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    549 static int	wm_match(device_t, cfdata_t, void *);
    550 static void	wm_attach(device_t, device_t, void *);
    551 static int	wm_detach(device_t, int);
    552 static bool	wm_suspend(device_t, const pmf_qual_t *);
    553 static bool	wm_resume(device_t, const pmf_qual_t *);
    554 static void	wm_watchdog(struct ifnet *);
    555 static void	wm_tick(void *);
    556 static int	wm_ifflags_cb(struct ethercom *);
    557 static int	wm_ioctl(struct ifnet *, u_long, void *);
    558 /* MAC address related */
    559 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    560 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    561 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    562 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    563 static void	wm_set_filter(struct wm_softc *);
    564 /* Reset and init related */
    565 static void	wm_set_vlan(struct wm_softc *);
    566 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    567 static void	wm_get_auto_rd_done(struct wm_softc *);
    568 static void	wm_lan_init_done(struct wm_softc *);
    569 static void	wm_get_cfg_done(struct wm_softc *);
    570 static void	wm_initialize_hardware_bits(struct wm_softc *);
    571 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    572 static void	wm_reset(struct wm_softc *);
    573 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    574 static void	wm_rxdrain(struct wm_rxqueue *);
    575 static void	wm_rss_getkey(uint8_t *);
    576 static void	wm_init_rss(struct wm_softc *);
    577 static void	wm_adjust_qnum(struct wm_softc *, int);
    578 static int	wm_setup_legacy(struct wm_softc *);
    579 static int	wm_setup_msix(struct wm_softc *);
    580 static int	wm_init(struct ifnet *);
    581 static int	wm_init_locked(struct ifnet *);
    582 static void	wm_stop(struct ifnet *, int);
    583 static void	wm_stop_locked(struct ifnet *, int);
    584 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    585 static void	wm_82547_txfifo_stall(void *);
    586 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    587 /* DMA related */
    588 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    589 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    590 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    591 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
    592 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    593 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    594 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
    595 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    596 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    597 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    598 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    599 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    600 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    601 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
    602 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
    603 static int	wm_alloc_txrx_queues(struct wm_softc *);
    604 static void	wm_free_txrx_queues(struct wm_softc *);
    605 static int	wm_init_txrx_queues(struct wm_softc *);
    606 /* Start */
    607 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    608     uint32_t *, uint8_t *);
    609 static void	wm_start(struct ifnet *);
    610 static void	wm_start_locked(struct ifnet *);
    611 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    612     uint32_t *, uint32_t *, bool *);
    613 static void	wm_nq_start(struct ifnet *);
    614 static void	wm_nq_start_locked(struct ifnet *);
    615 /* Interrupt */
    616 static int	wm_txeof(struct wm_softc *);
    617 static void	wm_rxeof(struct wm_rxqueue *);
    618 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    619 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    620 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    621 static void	wm_linkintr(struct wm_softc *, uint32_t);
    622 static int	wm_intr_legacy(void *);
    623 static int	wm_txintr_msix(void *);
    624 static int	wm_rxintr_msix(void *);
    625 static int	wm_linkintr_msix(void *);
    626 
    627 /*
    628  * Media related.
    629  * GMII, SGMII, TBI, SERDES and SFP.
    630  */
    631 /* Common */
    632 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    633 /* GMII related */
    634 static void	wm_gmii_reset(struct wm_softc *);
    635 static int	wm_get_phy_id_82575(struct wm_softc *);
    636 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    637 static int	wm_gmii_mediachange(struct ifnet *);
    638 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    639 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    640 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    641 static int	wm_gmii_i82543_readreg(device_t, int, int);
    642 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    643 static int	wm_gmii_i82544_readreg(device_t, int, int);
    644 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    645 static int	wm_gmii_i80003_readreg(device_t, int, int);
    646 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    647 static int	wm_gmii_bm_readreg(device_t, int, int);
    648 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    649 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    650 static int	wm_gmii_hv_readreg(device_t, int, int);
    651 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    652 static int	wm_gmii_82580_readreg(device_t, int, int);
    653 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    654 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    655 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    656 static void	wm_gmii_statchg(struct ifnet *);
    657 static int	wm_kmrn_readreg(struct wm_softc *, int);
    658 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    659 /* SGMII */
    660 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    661 static int	wm_sgmii_readreg(device_t, int, int);
    662 static void	wm_sgmii_writereg(device_t, int, int, int);
    663 /* TBI related */
    664 static void	wm_tbi_mediainit(struct wm_softc *);
    665 static int	wm_tbi_mediachange(struct ifnet *);
    666 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    667 static int	wm_check_for_link(struct wm_softc *);
    668 static void	wm_tbi_tick(struct wm_softc *);
    669 /* SERDES related */
    670 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    671 static int	wm_serdes_mediachange(struct ifnet *);
    672 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    673 static void	wm_serdes_tick(struct wm_softc *);
    674 /* SFP related */
    675 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    676 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    677 
    678 /*
    679  * NVM related.
    680  * Microwire, SPI (w/wo EERD) and Flash.
    681  */
    682 /* Misc functions */
    683 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    684 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    685 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    686 /* Microwire */
    687 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    688 /* SPI */
    689 static int	wm_nvm_ready_spi(struct wm_softc *);
    690 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    691 /* Using with EERD */
    692 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    693 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    694 /* Flash */
    695 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    696     unsigned int *);
    697 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    698 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    699 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    700 	uint32_t *);
    701 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    702 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    703 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    704 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    705 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    706 /* iNVM */
    707 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    708 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    709 /* Lock, detecting NVM type, validate checksum and read */
    710 static int	wm_nvm_acquire(struct wm_softc *);
    711 static void	wm_nvm_release(struct wm_softc *);
    712 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    713 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    714 static int	wm_nvm_validate_checksum(struct wm_softc *);
    715 static void	wm_nvm_version_invm(struct wm_softc *);
    716 static void	wm_nvm_version(struct wm_softc *);
    717 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    718 
    719 /*
    720  * Hardware semaphores.
    721  * Very complexed...
    722  */
    723 static int	wm_get_swsm_semaphore(struct wm_softc *);
    724 static void	wm_put_swsm_semaphore(struct wm_softc *);
    725 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    726 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    727 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    728 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    729 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    730 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    731 
    732 /*
    733  * Management mode and power management related subroutines.
    734  * BMC, AMT, suspend/resume and EEE.
    735  */
    736 #ifdef WM_WOL
    737 static int	wm_check_mng_mode(struct wm_softc *);
    738 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    739 static int	wm_check_mng_mode_82574(struct wm_softc *);
    740 static int	wm_check_mng_mode_generic(struct wm_softc *);
    741 #endif
    742 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    743 static bool	wm_phy_resetisblocked(struct wm_softc *);
    744 static void	wm_get_hw_control(struct wm_softc *);
    745 static void	wm_release_hw_control(struct wm_softc *);
    746 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    747 static void	wm_smbustopci(struct wm_softc *);
    748 static void	wm_init_manageability(struct wm_softc *);
    749 static void	wm_release_manageability(struct wm_softc *);
    750 static void	wm_get_wakeup(struct wm_softc *);
    751 #ifdef WM_WOL
    752 static void	wm_enable_phy_wakeup(struct wm_softc *);
    753 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    754 static void	wm_enable_wakeup(struct wm_softc *);
    755 #endif
    756 /* LPLU (Low Power Link Up) */
    757 static void	wm_lplu_d0_disable(struct wm_softc *);
    758 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    759 /* EEE */
    760 static void	wm_set_eee_i350(struct wm_softc *);
    761 
    762 /*
    763  * Workarounds (mainly PHY related).
    764  * Basically, PHY's workarounds are in the PHY drivers.
    765  */
    766 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    767 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    768 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    769 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    770 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    771 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    772 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    773 static void	wm_reset_init_script_82575(struct wm_softc *);
    774 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    775 static void	wm_pll_workaround_i210(struct wm_softc *);
    776 
    777 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    778     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    779 
    780 /*
    781  * Devices supported by this driver.
    782  */
    783 static const struct wm_product {
    784 	pci_vendor_id_t		wmp_vendor;
    785 	pci_product_id_t	wmp_product;
    786 	const char		*wmp_name;
    787 	wm_chip_type		wmp_type;
    788 	uint32_t		wmp_flags;
    789 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    790 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    791 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    792 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    793 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    794 } wm_products[] = {
    795 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    796 	  "Intel i82542 1000BASE-X Ethernet",
    797 	  WM_T_82542_2_1,	WMP_F_FIBER },
    798 
    799 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    800 	  "Intel i82543GC 1000BASE-X Ethernet",
    801 	  WM_T_82543,		WMP_F_FIBER },
    802 
    803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    804 	  "Intel i82543GC 1000BASE-T Ethernet",
    805 	  WM_T_82543,		WMP_F_COPPER },
    806 
    807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    808 	  "Intel i82544EI 1000BASE-T Ethernet",
    809 	  WM_T_82544,		WMP_F_COPPER },
    810 
    811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    812 	  "Intel i82544EI 1000BASE-X Ethernet",
    813 	  WM_T_82544,		WMP_F_FIBER },
    814 
    815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    816 	  "Intel i82544GC 1000BASE-T Ethernet",
    817 	  WM_T_82544,		WMP_F_COPPER },
    818 
    819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    820 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    821 	  WM_T_82544,		WMP_F_COPPER },
    822 
    823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    824 	  "Intel i82540EM 1000BASE-T Ethernet",
    825 	  WM_T_82540,		WMP_F_COPPER },
    826 
    827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    828 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    829 	  WM_T_82540,		WMP_F_COPPER },
    830 
    831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    832 	  "Intel i82540EP 1000BASE-T Ethernet",
    833 	  WM_T_82540,		WMP_F_COPPER },
    834 
    835 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    836 	  "Intel i82540EP 1000BASE-T Ethernet",
    837 	  WM_T_82540,		WMP_F_COPPER },
    838 
    839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    840 	  "Intel i82540EP 1000BASE-T Ethernet",
    841 	  WM_T_82540,		WMP_F_COPPER },
    842 
    843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    844 	  "Intel i82545EM 1000BASE-T Ethernet",
    845 	  WM_T_82545,		WMP_F_COPPER },
    846 
    847 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    848 	  "Intel i82545GM 1000BASE-T Ethernet",
    849 	  WM_T_82545_3,		WMP_F_COPPER },
    850 
    851 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    852 	  "Intel i82545GM 1000BASE-X Ethernet",
    853 	  WM_T_82545_3,		WMP_F_FIBER },
    854 
    855 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    856 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    857 	  WM_T_82545_3,		WMP_F_SERDES },
    858 
    859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    860 	  "Intel i82546EB 1000BASE-T Ethernet",
    861 	  WM_T_82546,		WMP_F_COPPER },
    862 
    863 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    864 	  "Intel i82546EB 1000BASE-T Ethernet",
    865 	  WM_T_82546,		WMP_F_COPPER },
    866 
    867 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    868 	  "Intel i82545EM 1000BASE-X Ethernet",
    869 	  WM_T_82545,		WMP_F_FIBER },
    870 
    871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    872 	  "Intel i82546EB 1000BASE-X Ethernet",
    873 	  WM_T_82546,		WMP_F_FIBER },
    874 
    875 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    876 	  "Intel i82546GB 1000BASE-T Ethernet",
    877 	  WM_T_82546_3,		WMP_F_COPPER },
    878 
    879 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    880 	  "Intel i82546GB 1000BASE-X Ethernet",
    881 	  WM_T_82546_3,		WMP_F_FIBER },
    882 
    883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    884 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    885 	  WM_T_82546_3,		WMP_F_SERDES },
    886 
    887 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    888 	  "i82546GB quad-port Gigabit Ethernet",
    889 	  WM_T_82546_3,		WMP_F_COPPER },
    890 
    891 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    892 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    893 	  WM_T_82546_3,		WMP_F_COPPER },
    894 
    895 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    896 	  "Intel PRO/1000MT (82546GB)",
    897 	  WM_T_82546_3,		WMP_F_COPPER },
    898 
    899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    900 	  "Intel i82541EI 1000BASE-T Ethernet",
    901 	  WM_T_82541,		WMP_F_COPPER },
    902 
    903 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    904 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    905 	  WM_T_82541,		WMP_F_COPPER },
    906 
    907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    908 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    909 	  WM_T_82541,		WMP_F_COPPER },
    910 
    911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    912 	  "Intel i82541ER 1000BASE-T Ethernet",
    913 	  WM_T_82541_2,		WMP_F_COPPER },
    914 
    915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    916 	  "Intel i82541GI 1000BASE-T Ethernet",
    917 	  WM_T_82541_2,		WMP_F_COPPER },
    918 
    919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    920 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    921 	  WM_T_82541_2,		WMP_F_COPPER },
    922 
    923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    924 	  "Intel i82541PI 1000BASE-T Ethernet",
    925 	  WM_T_82541_2,		WMP_F_COPPER },
    926 
    927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    928 	  "Intel i82547EI 1000BASE-T Ethernet",
    929 	  WM_T_82547,		WMP_F_COPPER },
    930 
    931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    932 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    933 	  WM_T_82547,		WMP_F_COPPER },
    934 
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    936 	  "Intel i82547GI 1000BASE-T Ethernet",
    937 	  WM_T_82547_2,		WMP_F_COPPER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    940 	  "Intel PRO/1000 PT (82571EB)",
    941 	  WM_T_82571,		WMP_F_COPPER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    944 	  "Intel PRO/1000 PF (82571EB)",
    945 	  WM_T_82571,		WMP_F_FIBER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    948 	  "Intel PRO/1000 PB (82571EB)",
    949 	  WM_T_82571,		WMP_F_SERDES },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    952 	  "Intel PRO/1000 QT (82571EB)",
    953 	  WM_T_82571,		WMP_F_COPPER },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    956 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    957 	  WM_T_82571,		WMP_F_COPPER, },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    960 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    961 	  WM_T_82571,		WMP_F_COPPER, },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    964 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    965 	  WM_T_82571,		WMP_F_SERDES, },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    968 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    969 	  WM_T_82571,		WMP_F_SERDES, },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    972 	  "Intel 82571EB Quad 1000baseX Ethernet",
    973 	  WM_T_82571,		WMP_F_FIBER, },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    976 	  "Intel i82572EI 1000baseT Ethernet",
    977 	  WM_T_82572,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    980 	  "Intel i82572EI 1000baseX Ethernet",
    981 	  WM_T_82572,		WMP_F_FIBER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    984 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    985 	  WM_T_82572,		WMP_F_SERDES },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    988 	  "Intel i82572EI 1000baseT Ethernet",
    989 	  WM_T_82572,		WMP_F_COPPER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    992 	  "Intel i82573E",
    993 	  WM_T_82573,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    996 	  "Intel i82573E IAMT",
    997 	  WM_T_82573,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1000 	  "Intel i82573L Gigabit Ethernet",
   1001 	  WM_T_82573,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1004 	  "Intel i82574L",
   1005 	  WM_T_82574,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1008 	  "Intel i82574L",
   1009 	  WM_T_82574,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1012 	  "Intel i82583V",
   1013 	  WM_T_82583,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1016 	  "i80003 dual 1000baseT Ethernet",
   1017 	  WM_T_80003,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1020 	  "i80003 dual 1000baseX Ethernet",
   1021 	  WM_T_80003,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1024 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1025 	  WM_T_80003,		WMP_F_SERDES },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1028 	  "Intel i80003 1000baseT Ethernet",
   1029 	  WM_T_80003,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1032 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1033 	  WM_T_80003,		WMP_F_SERDES },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1036 	  "Intel i82801H (M_AMT) LAN Controller",
   1037 	  WM_T_ICH8,		WMP_F_COPPER },
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1039 	  "Intel i82801H (AMT) LAN Controller",
   1040 	  WM_T_ICH8,		WMP_F_COPPER },
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1042 	  "Intel i82801H LAN Controller",
   1043 	  WM_T_ICH8,		WMP_F_COPPER },
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1045 	  "Intel i82801H (IFE) LAN Controller",
   1046 	  WM_T_ICH8,		WMP_F_COPPER },
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1048 	  "Intel i82801H (M) LAN Controller",
   1049 	  WM_T_ICH8,		WMP_F_COPPER },
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1051 	  "Intel i82801H IFE (GT) LAN Controller",
   1052 	  WM_T_ICH8,		WMP_F_COPPER },
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1054 	  "Intel i82801H IFE (G) LAN Controller",
   1055 	  WM_T_ICH8,		WMP_F_COPPER },
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1057 	  "82801I (AMT) LAN Controller",
   1058 	  WM_T_ICH9,		WMP_F_COPPER },
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1060 	  "82801I LAN Controller",
   1061 	  WM_T_ICH9,		WMP_F_COPPER },
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1063 	  "82801I (G) LAN Controller",
   1064 	  WM_T_ICH9,		WMP_F_COPPER },
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1066 	  "82801I (GT) LAN Controller",
   1067 	  WM_T_ICH9,		WMP_F_COPPER },
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1069 	  "82801I (C) LAN Controller",
   1070 	  WM_T_ICH9,		WMP_F_COPPER },
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1072 	  "82801I mobile LAN Controller",
   1073 	  WM_T_ICH9,		WMP_F_COPPER },
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1075 	  "82801I mobile (V) LAN Controller",
   1076 	  WM_T_ICH9,		WMP_F_COPPER },
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1078 	  "82801I mobile (AMT) LAN Controller",
   1079 	  WM_T_ICH9,		WMP_F_COPPER },
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1081 	  "82567LM-4 LAN Controller",
   1082 	  WM_T_ICH9,		WMP_F_COPPER },
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1084 	  "82567V-3 LAN Controller",
   1085 	  WM_T_ICH9,		WMP_F_COPPER },
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1087 	  "82567LM-2 LAN Controller",
   1088 	  WM_T_ICH10,		WMP_F_COPPER },
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1090 	  "82567LF-2 LAN Controller",
   1091 	  WM_T_ICH10,		WMP_F_COPPER },
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1093 	  "82567LM-3 LAN Controller",
   1094 	  WM_T_ICH10,		WMP_F_COPPER },
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1096 	  "82567LF-3 LAN Controller",
   1097 	  WM_T_ICH10,		WMP_F_COPPER },
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1099 	  "82567V-2 LAN Controller",
   1100 	  WM_T_ICH10,		WMP_F_COPPER },
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1102 	  "82567V-3? LAN Controller",
   1103 	  WM_T_ICH10,		WMP_F_COPPER },
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1105 	  "HANKSVILLE LAN Controller",
   1106 	  WM_T_ICH10,		WMP_F_COPPER },
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1108 	  "PCH LAN (82577LM) Controller",
   1109 	  WM_T_PCH,		WMP_F_COPPER },
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1111 	  "PCH LAN (82577LC) Controller",
   1112 	  WM_T_PCH,		WMP_F_COPPER },
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1114 	  "PCH LAN (82578DM) Controller",
   1115 	  WM_T_PCH,		WMP_F_COPPER },
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1117 	  "PCH LAN (82578DC) Controller",
   1118 	  WM_T_PCH,		WMP_F_COPPER },
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1120 	  "PCH2 LAN (82579LM) Controller",
   1121 	  WM_T_PCH2,		WMP_F_COPPER },
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1123 	  "PCH2 LAN (82579V) Controller",
   1124 	  WM_T_PCH2,		WMP_F_COPPER },
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1126 	  "82575EB dual-1000baseT Ethernet",
   1127 	  WM_T_82575,		WMP_F_COPPER },
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1129 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1130 	  WM_T_82575,		WMP_F_SERDES },
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1132 	  "82575GB quad-1000baseT Ethernet",
   1133 	  WM_T_82575,		WMP_F_COPPER },
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1135 	  "82575GB quad-1000baseT Ethernet (PM)",
   1136 	  WM_T_82575,		WMP_F_COPPER },
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1138 	  "82576 1000BaseT Ethernet",
   1139 	  WM_T_82576,		WMP_F_COPPER },
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1141 	  "82576 1000BaseX Ethernet",
   1142 	  WM_T_82576,		WMP_F_FIBER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1145 	  "82576 gigabit Ethernet (SERDES)",
   1146 	  WM_T_82576,		WMP_F_SERDES },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1149 	  "82576 quad-1000BaseT Ethernet",
   1150 	  WM_T_82576,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1153 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1154 	  WM_T_82576,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1157 	  "82576 gigabit Ethernet",
   1158 	  WM_T_82576,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1161 	  "82576 gigabit Ethernet (SERDES)",
   1162 	  WM_T_82576,		WMP_F_SERDES },
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1164 	  "82576 quad-gigabit Ethernet (SERDES)",
   1165 	  WM_T_82576,		WMP_F_SERDES },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1168 	  "82580 1000BaseT Ethernet",
   1169 	  WM_T_82580,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1171 	  "82580 1000BaseX Ethernet",
   1172 	  WM_T_82580,		WMP_F_FIBER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1175 	  "82580 1000BaseT Ethernet (SERDES)",
   1176 	  WM_T_82580,		WMP_F_SERDES },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1179 	  "82580 gigabit Ethernet (SGMII)",
   1180 	  WM_T_82580,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1182 	  "82580 dual-1000BaseT Ethernet",
   1183 	  WM_T_82580,		WMP_F_COPPER },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1186 	  "82580 quad-1000BaseX Ethernet",
   1187 	  WM_T_82580,		WMP_F_FIBER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1190 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1191 	  WM_T_82580,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1194 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1195 	  WM_T_82580,		WMP_F_SERDES },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1198 	  "DH89XXCC 1000BASE-KX Ethernet",
   1199 	  WM_T_82580,		WMP_F_SERDES },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1202 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1203 	  WM_T_82580,		WMP_F_SERDES },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1206 	  "I350 Gigabit Network Connection",
   1207 	  WM_T_I350,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1210 	  "I350 Gigabit Fiber Network Connection",
   1211 	  WM_T_I350,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1214 	  "I350 Gigabit Backplane Connection",
   1215 	  WM_T_I350,		WMP_F_SERDES },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1218 	  "I350 Quad Port Gigabit Ethernet",
   1219 	  WM_T_I350,		WMP_F_SERDES },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1222 	  "I350 Gigabit Connection",
   1223 	  WM_T_I350,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1226 	  "I354 Gigabit Ethernet (KX)",
   1227 	  WM_T_I354,		WMP_F_SERDES },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1230 	  "I354 Gigabit Ethernet (SGMII)",
   1231 	  WM_T_I354,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1234 	  "I354 Gigabit Ethernet (2.5G)",
   1235 	  WM_T_I354,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1238 	  "I210-T1 Ethernet Server Adapter",
   1239 	  WM_T_I210,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1242 	  "I210 Ethernet (Copper OEM)",
   1243 	  WM_T_I210,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1246 	  "I210 Ethernet (Copper IT)",
   1247 	  WM_T_I210,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1250 	  "I210 Ethernet (FLASH less)",
   1251 	  WM_T_I210,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1254 	  "I210 Gigabit Ethernet (Fiber)",
   1255 	  WM_T_I210,		WMP_F_FIBER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1258 	  "I210 Gigabit Ethernet (SERDES)",
   1259 	  WM_T_I210,		WMP_F_SERDES },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1262 	  "I210 Gigabit Ethernet (FLASH less)",
   1263 	  WM_T_I210,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1266 	  "I210 Gigabit Ethernet (SGMII)",
   1267 	  WM_T_I210,		WMP_F_COPPER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1270 	  "I211 Ethernet (COPPER)",
   1271 	  WM_T_I211,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1273 	  "I217 V Ethernet Connection",
   1274 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1276 	  "I217 LM Ethernet Connection",
   1277 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1279 	  "I218 V Ethernet Connection",
   1280 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1282 	  "I218 V Ethernet Connection",
   1283 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1285 	  "I218 V Ethernet Connection",
   1286 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1288 	  "I218 LM Ethernet Connection",
   1289 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1291 	  "I218 LM Ethernet Connection",
   1292 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1294 	  "I218 LM Ethernet Connection",
   1295 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1296 #if 0
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1298 	  "I219 V Ethernet Connection",
   1299 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1301 	  "I219 V Ethernet Connection",
   1302 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1304 	  "I219 LM Ethernet Connection",
   1305 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1307 	  "I219 LM Ethernet Connection",
   1308 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1309 #endif
   1310 	{ 0,			0,
   1311 	  NULL,
   1312 	  0,			0 },
   1313 };
   1314 
   1315 #ifdef WM_EVENT_COUNTERS
   1316 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1317 #endif /* WM_EVENT_COUNTERS */
   1318 
   1319 
   1320 /*
   1321  * Register read/write functions.
   1322  * Other than CSR_{READ|WRITE}().
   1323  */
   1324 
   1325 #if 0 /* Not currently used */
   1326 static inline uint32_t
   1327 wm_io_read(struct wm_softc *sc, int reg)
   1328 {
   1329 
   1330 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1331 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1332 }
   1333 #endif
   1334 
   1335 static inline void
   1336 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1337 {
   1338 
   1339 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1340 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1341 }
   1342 
   1343 static inline void
   1344 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1345     uint32_t data)
   1346 {
   1347 	uint32_t regval;
   1348 	int i;
   1349 
   1350 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1351 
   1352 	CSR_WRITE(sc, reg, regval);
   1353 
   1354 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1355 		delay(5);
   1356 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1357 			break;
   1358 	}
   1359 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1360 		aprint_error("%s: WARNING:"
   1361 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1362 		    device_xname(sc->sc_dev), reg);
   1363 	}
   1364 }
   1365 
   1366 static inline void
   1367 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1368 {
   1369 	wa->wa_low = htole32(v & 0xffffffffU);
   1370 	if (sizeof(bus_addr_t) == 8)
   1371 		wa->wa_high = htole32((uint64_t) v >> 32);
   1372 	else
   1373 		wa->wa_high = 0;
   1374 }
   1375 
   1376 /*
   1377  * Descriptor sync/init functions.
   1378  */
   1379 static inline void
   1380 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1381 {
   1382 	struct wm_softc *sc = txq->txq_sc;
   1383 
   1384 	/* If it will wrap around, sync to the end of the ring. */
   1385 	if ((start + num) > WM_NTXDESC(txq)) {
   1386 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1387 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
   1388 		    (WM_NTXDESC(txq) - start), ops);
   1389 		num -= (WM_NTXDESC(txq) - start);
   1390 		start = 0;
   1391 	}
   1392 
   1393 	/* Now sync whatever is left. */
   1394 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1395 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
   1396 }
   1397 
   1398 static inline void
   1399 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1400 {
   1401 	struct wm_softc *sc = rxq->rxq_sc;
   1402 
   1403 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1404 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1405 }
   1406 
   1407 static inline void
   1408 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1409 {
   1410 	struct wm_softc *sc = rxq->rxq_sc;
   1411 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1412 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1413 	struct mbuf *m = rxs->rxs_mbuf;
   1414 
   1415 	/*
   1416 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1417 	 * so that the payload after the Ethernet header is aligned
   1418 	 * to a 4-byte boundary.
   1419 
   1420 	 * XXX BRAINDAMAGE ALERT!
   1421 	 * The stupid chip uses the same size for every buffer, which
   1422 	 * is set in the Receive Control register.  We are using the 2K
   1423 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1424 	 * reason, we can't "scoot" packets longer than the standard
   1425 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1426 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1427 	 * the upper layer copy the headers.
   1428 	 */
   1429 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1430 
   1431 	wm_set_dma_addr(&rxd->wrx_addr,
   1432 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1433 	rxd->wrx_len = 0;
   1434 	rxd->wrx_cksum = 0;
   1435 	rxd->wrx_status = 0;
   1436 	rxd->wrx_errors = 0;
   1437 	rxd->wrx_special = 0;
   1438 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1439 
   1440 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1441 }
   1442 
   1443 /*
   1444  * Device driver interface functions and commonly used functions.
   1445  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1446  */
   1447 
   1448 /* Lookup supported device table */
   1449 static const struct wm_product *
   1450 wm_lookup(const struct pci_attach_args *pa)
   1451 {
   1452 	const struct wm_product *wmp;
   1453 
   1454 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1455 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1456 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1457 			return wmp;
   1458 	}
   1459 	return NULL;
   1460 }
   1461 
   1462 /* The match function (ca_match) */
   1463 static int
   1464 wm_match(device_t parent, cfdata_t cf, void *aux)
   1465 {
   1466 	struct pci_attach_args *pa = aux;
   1467 
   1468 	if (wm_lookup(pa) != NULL)
   1469 		return 1;
   1470 
   1471 	return 0;
   1472 }
   1473 
   1474 /* The attach function (ca_attach) */
   1475 static void
   1476 wm_attach(device_t parent, device_t self, void *aux)
   1477 {
   1478 	struct wm_softc *sc = device_private(self);
   1479 	struct pci_attach_args *pa = aux;
   1480 	prop_dictionary_t dict;
   1481 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1482 	pci_chipset_tag_t pc = pa->pa_pc;
   1483 	int counts[PCI_INTR_TYPE_SIZE];
   1484 	pci_intr_type_t max_type;
   1485 	const char *eetype, *xname;
   1486 	bus_space_tag_t memt;
   1487 	bus_space_handle_t memh;
   1488 	bus_size_t memsize;
   1489 	int memh_valid;
   1490 	int i, error;
   1491 	const struct wm_product *wmp;
   1492 	prop_data_t ea;
   1493 	prop_number_t pn;
   1494 	uint8_t enaddr[ETHER_ADDR_LEN];
   1495 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1496 	pcireg_t preg, memtype;
   1497 	uint16_t eeprom_data, apme_mask;
   1498 	bool force_clear_smbi;
   1499 	uint32_t link_mode;
   1500 	uint32_t reg;
   1501 
   1502 	sc->sc_dev = self;
   1503 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1504 	sc->sc_stopping = false;
   1505 
   1506 	wmp = wm_lookup(pa);
   1507 #ifdef DIAGNOSTIC
   1508 	if (wmp == NULL) {
   1509 		printf("\n");
   1510 		panic("wm_attach: impossible");
   1511 	}
   1512 #endif
   1513 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1514 
   1515 	sc->sc_pc = pa->pa_pc;
   1516 	sc->sc_pcitag = pa->pa_tag;
   1517 
   1518 	if (pci_dma64_available(pa))
   1519 		sc->sc_dmat = pa->pa_dmat64;
   1520 	else
   1521 		sc->sc_dmat = pa->pa_dmat;
   1522 
   1523 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1524 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1525 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1526 
   1527 	sc->sc_type = wmp->wmp_type;
   1528 	if (sc->sc_type < WM_T_82543) {
   1529 		if (sc->sc_rev < 2) {
   1530 			aprint_error_dev(sc->sc_dev,
   1531 			    "i82542 must be at least rev. 2\n");
   1532 			return;
   1533 		}
   1534 		if (sc->sc_rev < 3)
   1535 			sc->sc_type = WM_T_82542_2_0;
   1536 	}
   1537 
   1538 	/*
   1539 	 * Disable MSI for Errata:
   1540 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1541 	 *
   1542 	 *  82544: Errata 25
   1543 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1544 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1545 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1546 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1547 	 *
   1548 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1549 	 *
   1550 	 *  82571 & 82572: Errata 63
   1551 	 */
   1552 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1553 	    || (sc->sc_type == WM_T_82572))
   1554 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1555 
   1556 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1557 	    || (sc->sc_type == WM_T_82580)
   1558 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1559 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1560 		sc->sc_flags |= WM_F_NEWQUEUE;
   1561 
   1562 	/* Set device properties (mactype) */
   1563 	dict = device_properties(sc->sc_dev);
   1564 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1565 
   1566 	/*
   1567 	 * Map the device.  All devices support memory-mapped acccess,
   1568 	 * and it is really required for normal operation.
   1569 	 */
   1570 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1571 	switch (memtype) {
   1572 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1573 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1574 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1575 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1576 		break;
   1577 	default:
   1578 		memh_valid = 0;
   1579 		break;
   1580 	}
   1581 
   1582 	if (memh_valid) {
   1583 		sc->sc_st = memt;
   1584 		sc->sc_sh = memh;
   1585 		sc->sc_ss = memsize;
   1586 	} else {
   1587 		aprint_error_dev(sc->sc_dev,
   1588 		    "unable to map device registers\n");
   1589 		return;
   1590 	}
   1591 
   1592 	/*
   1593 	 * In addition, i82544 and later support I/O mapped indirect
   1594 	 * register access.  It is not desirable (nor supported in
   1595 	 * this driver) to use it for normal operation, though it is
   1596 	 * required to work around bugs in some chip versions.
   1597 	 */
   1598 	if (sc->sc_type >= WM_T_82544) {
   1599 		/* First we have to find the I/O BAR. */
   1600 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1601 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1602 			if (memtype == PCI_MAPREG_TYPE_IO)
   1603 				break;
   1604 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1605 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1606 				i += 4;	/* skip high bits, too */
   1607 		}
   1608 		if (i < PCI_MAPREG_END) {
   1609 			/*
   1610 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1611 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1612 			 * It's no problem because newer chips has no this
   1613 			 * bug.
   1614 			 *
   1615 			 * The i8254x doesn't apparently respond when the
   1616 			 * I/O BAR is 0, which looks somewhat like it's not
   1617 			 * been configured.
   1618 			 */
   1619 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1620 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1621 				aprint_error_dev(sc->sc_dev,
   1622 				    "WARNING: I/O BAR at zero.\n");
   1623 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1624 					0, &sc->sc_iot, &sc->sc_ioh,
   1625 					NULL, &sc->sc_ios) == 0) {
   1626 				sc->sc_flags |= WM_F_IOH_VALID;
   1627 			} else {
   1628 				aprint_error_dev(sc->sc_dev,
   1629 				    "WARNING: unable to map I/O space\n");
   1630 			}
   1631 		}
   1632 
   1633 	}
   1634 
   1635 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1636 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1637 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1638 	if (sc->sc_type < WM_T_82542_2_1)
   1639 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1640 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1641 
   1642 	/* power up chip */
   1643 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1644 	    NULL)) && error != EOPNOTSUPP) {
   1645 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1646 		return;
   1647 	}
   1648 
   1649 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1650 
   1651 	/* Allocation settings */
   1652 	max_type = PCI_INTR_TYPE_MSIX;
   1653 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   1654 	counts[PCI_INTR_TYPE_MSI] = 1;
   1655 	counts[PCI_INTR_TYPE_INTX] = 1;
   1656 
   1657 alloc_retry:
   1658 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1659 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1660 		return;
   1661 	}
   1662 
   1663 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1664 		error = wm_setup_msix(sc);
   1665 		if (error) {
   1666 			pci_intr_release(pc, sc->sc_intrs,
   1667 			    counts[PCI_INTR_TYPE_MSIX]);
   1668 
   1669 			/* Setup for MSI: Disable MSI-X */
   1670 			max_type = PCI_INTR_TYPE_MSI;
   1671 			counts[PCI_INTR_TYPE_MSI] = 1;
   1672 			counts[PCI_INTR_TYPE_INTX] = 1;
   1673 			goto alloc_retry;
   1674 		}
   1675 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1676 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1677 		error = wm_setup_legacy(sc);
   1678 		if (error) {
   1679 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1680 			    counts[PCI_INTR_TYPE_MSI]);
   1681 
   1682 			/* The next try is for INTx: Disable MSI */
   1683 			max_type = PCI_INTR_TYPE_INTX;
   1684 			counts[PCI_INTR_TYPE_INTX] = 1;
   1685 			goto alloc_retry;
   1686 		}
   1687 	} else {
   1688 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1689 		error = wm_setup_legacy(sc);
   1690 		if (error) {
   1691 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1692 			    counts[PCI_INTR_TYPE_INTX]);
   1693 			return;
   1694 		}
   1695 	}
   1696 
   1697 	/*
   1698 	 * Check the function ID (unit number of the chip).
   1699 	 */
   1700 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1701 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1702 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1703 	    || (sc->sc_type == WM_T_82580)
   1704 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1705 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1706 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1707 	else
   1708 		sc->sc_funcid = 0;
   1709 
   1710 	/*
   1711 	 * Determine a few things about the bus we're connected to.
   1712 	 */
   1713 	if (sc->sc_type < WM_T_82543) {
   1714 		/* We don't really know the bus characteristics here. */
   1715 		sc->sc_bus_speed = 33;
   1716 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1717 		/*
   1718 		 * CSA (Communication Streaming Architecture) is about as fast
   1719 		 * a 32-bit 66MHz PCI Bus.
   1720 		 */
   1721 		sc->sc_flags |= WM_F_CSA;
   1722 		sc->sc_bus_speed = 66;
   1723 		aprint_verbose_dev(sc->sc_dev,
   1724 		    "Communication Streaming Architecture\n");
   1725 		if (sc->sc_type == WM_T_82547) {
   1726 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1727 			callout_setfunc(&sc->sc_txfifo_ch,
   1728 					wm_82547_txfifo_stall, sc);
   1729 			aprint_verbose_dev(sc->sc_dev,
   1730 			    "using 82547 Tx FIFO stall work-around\n");
   1731 		}
   1732 	} else if (sc->sc_type >= WM_T_82571) {
   1733 		sc->sc_flags |= WM_F_PCIE;
   1734 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1735 		    && (sc->sc_type != WM_T_ICH10)
   1736 		    && (sc->sc_type != WM_T_PCH)
   1737 		    && (sc->sc_type != WM_T_PCH2)
   1738 		    && (sc->sc_type != WM_T_PCH_LPT)
   1739 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1740 			/* ICH* and PCH* have no PCIe capability registers */
   1741 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1742 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1743 				NULL) == 0)
   1744 				aprint_error_dev(sc->sc_dev,
   1745 				    "unable to find PCIe capability\n");
   1746 		}
   1747 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1748 	} else {
   1749 		reg = CSR_READ(sc, WMREG_STATUS);
   1750 		if (reg & STATUS_BUS64)
   1751 			sc->sc_flags |= WM_F_BUS64;
   1752 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1753 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1754 
   1755 			sc->sc_flags |= WM_F_PCIX;
   1756 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1757 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1758 				aprint_error_dev(sc->sc_dev,
   1759 				    "unable to find PCIX capability\n");
   1760 			else if (sc->sc_type != WM_T_82545_3 &&
   1761 				 sc->sc_type != WM_T_82546_3) {
   1762 				/*
   1763 				 * Work around a problem caused by the BIOS
   1764 				 * setting the max memory read byte count
   1765 				 * incorrectly.
   1766 				 */
   1767 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1768 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1769 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1770 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1771 
   1772 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1773 				    PCIX_CMD_BYTECNT_SHIFT;
   1774 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1775 				    PCIX_STATUS_MAXB_SHIFT;
   1776 				if (bytecnt > maxb) {
   1777 					aprint_verbose_dev(sc->sc_dev,
   1778 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1779 					    512 << bytecnt, 512 << maxb);
   1780 					pcix_cmd = (pcix_cmd &
   1781 					    ~PCIX_CMD_BYTECNT_MASK) |
   1782 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1783 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1784 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1785 					    pcix_cmd);
   1786 				}
   1787 			}
   1788 		}
   1789 		/*
   1790 		 * The quad port adapter is special; it has a PCIX-PCIX
   1791 		 * bridge on the board, and can run the secondary bus at
   1792 		 * a higher speed.
   1793 		 */
   1794 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1795 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1796 								      : 66;
   1797 		} else if (sc->sc_flags & WM_F_PCIX) {
   1798 			switch (reg & STATUS_PCIXSPD_MASK) {
   1799 			case STATUS_PCIXSPD_50_66:
   1800 				sc->sc_bus_speed = 66;
   1801 				break;
   1802 			case STATUS_PCIXSPD_66_100:
   1803 				sc->sc_bus_speed = 100;
   1804 				break;
   1805 			case STATUS_PCIXSPD_100_133:
   1806 				sc->sc_bus_speed = 133;
   1807 				break;
   1808 			default:
   1809 				aprint_error_dev(sc->sc_dev,
   1810 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1811 				    reg & STATUS_PCIXSPD_MASK);
   1812 				sc->sc_bus_speed = 66;
   1813 				break;
   1814 			}
   1815 		} else
   1816 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1817 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1818 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1819 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1820 	}
   1821 
   1822 	/* clear interesting stat counters */
   1823 	CSR_READ(sc, WMREG_COLC);
   1824 	CSR_READ(sc, WMREG_RXERRC);
   1825 
   1826 	/* get PHY control from SMBus to PCIe */
   1827 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1828 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1829 		wm_smbustopci(sc);
   1830 
   1831 	/* Reset the chip to a known state. */
   1832 	wm_reset(sc);
   1833 
   1834 	/* Get some information about the EEPROM. */
   1835 	switch (sc->sc_type) {
   1836 	case WM_T_82542_2_0:
   1837 	case WM_T_82542_2_1:
   1838 	case WM_T_82543:
   1839 	case WM_T_82544:
   1840 		/* Microwire */
   1841 		sc->sc_nvm_wordsize = 64;
   1842 		sc->sc_nvm_addrbits = 6;
   1843 		break;
   1844 	case WM_T_82540:
   1845 	case WM_T_82545:
   1846 	case WM_T_82545_3:
   1847 	case WM_T_82546:
   1848 	case WM_T_82546_3:
   1849 		/* Microwire */
   1850 		reg = CSR_READ(sc, WMREG_EECD);
   1851 		if (reg & EECD_EE_SIZE) {
   1852 			sc->sc_nvm_wordsize = 256;
   1853 			sc->sc_nvm_addrbits = 8;
   1854 		} else {
   1855 			sc->sc_nvm_wordsize = 64;
   1856 			sc->sc_nvm_addrbits = 6;
   1857 		}
   1858 		sc->sc_flags |= WM_F_LOCK_EECD;
   1859 		break;
   1860 	case WM_T_82541:
   1861 	case WM_T_82541_2:
   1862 	case WM_T_82547:
   1863 	case WM_T_82547_2:
   1864 		sc->sc_flags |= WM_F_LOCK_EECD;
   1865 		reg = CSR_READ(sc, WMREG_EECD);
   1866 		if (reg & EECD_EE_TYPE) {
   1867 			/* SPI */
   1868 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1869 			wm_nvm_set_addrbits_size_eecd(sc);
   1870 		} else {
   1871 			/* Microwire */
   1872 			if ((reg & EECD_EE_ABITS) != 0) {
   1873 				sc->sc_nvm_wordsize = 256;
   1874 				sc->sc_nvm_addrbits = 8;
   1875 			} else {
   1876 				sc->sc_nvm_wordsize = 64;
   1877 				sc->sc_nvm_addrbits = 6;
   1878 			}
   1879 		}
   1880 		break;
   1881 	case WM_T_82571:
   1882 	case WM_T_82572:
   1883 		/* SPI */
   1884 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1885 		wm_nvm_set_addrbits_size_eecd(sc);
   1886 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1887 		break;
   1888 	case WM_T_82573:
   1889 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1890 		/* FALLTHROUGH */
   1891 	case WM_T_82574:
   1892 	case WM_T_82583:
   1893 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1894 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1895 			sc->sc_nvm_wordsize = 2048;
   1896 		} else {
   1897 			/* SPI */
   1898 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1899 			wm_nvm_set_addrbits_size_eecd(sc);
   1900 		}
   1901 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1902 		break;
   1903 	case WM_T_82575:
   1904 	case WM_T_82576:
   1905 	case WM_T_82580:
   1906 	case WM_T_I350:
   1907 	case WM_T_I354:
   1908 	case WM_T_80003:
   1909 		/* SPI */
   1910 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1911 		wm_nvm_set_addrbits_size_eecd(sc);
   1912 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1913 		    | WM_F_LOCK_SWSM;
   1914 		break;
   1915 	case WM_T_ICH8:
   1916 	case WM_T_ICH9:
   1917 	case WM_T_ICH10:
   1918 	case WM_T_PCH:
   1919 	case WM_T_PCH2:
   1920 	case WM_T_PCH_LPT:
   1921 		/* FLASH */
   1922 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1923 		sc->sc_nvm_wordsize = 2048;
   1924 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   1925 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1926 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1927 			aprint_error_dev(sc->sc_dev,
   1928 			    "can't map FLASH registers\n");
   1929 			goto out;
   1930 		}
   1931 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1932 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1933 		    ICH_FLASH_SECTOR_SIZE;
   1934 		sc->sc_ich8_flash_bank_size =
   1935 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1936 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   1937 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1938 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1939 		sc->sc_flashreg_offset = 0;
   1940 		break;
   1941 	case WM_T_PCH_SPT:
   1942 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   1943 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1944 		sc->sc_flasht = sc->sc_st;
   1945 		sc->sc_flashh = sc->sc_sh;
   1946 		sc->sc_ich8_flash_base = 0;
   1947 		sc->sc_nvm_wordsize =
   1948 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   1949 			* NVM_SIZE_MULTIPLIER;
   1950 		/* It is size in bytes, we want words */
   1951 		sc->sc_nvm_wordsize /= 2;
   1952 		/* assume 2 banks */
   1953 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   1954 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   1955 		break;
   1956 	case WM_T_I210:
   1957 	case WM_T_I211:
   1958 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1959 			wm_nvm_set_addrbits_size_eecd(sc);
   1960 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1961 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1962 		} else {
   1963 			sc->sc_nvm_wordsize = INVM_SIZE;
   1964 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1965 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1966 		}
   1967 		break;
   1968 	default:
   1969 		break;
   1970 	}
   1971 
   1972 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1973 	switch (sc->sc_type) {
   1974 	case WM_T_82571:
   1975 	case WM_T_82572:
   1976 		reg = CSR_READ(sc, WMREG_SWSM2);
   1977 		if ((reg & SWSM2_LOCK) == 0) {
   1978 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1979 			force_clear_smbi = true;
   1980 		} else
   1981 			force_clear_smbi = false;
   1982 		break;
   1983 	case WM_T_82573:
   1984 	case WM_T_82574:
   1985 	case WM_T_82583:
   1986 		force_clear_smbi = true;
   1987 		break;
   1988 	default:
   1989 		force_clear_smbi = false;
   1990 		break;
   1991 	}
   1992 	if (force_clear_smbi) {
   1993 		reg = CSR_READ(sc, WMREG_SWSM);
   1994 		if ((reg & SWSM_SMBI) != 0)
   1995 			aprint_error_dev(sc->sc_dev,
   1996 			    "Please update the Bootagent\n");
   1997 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1998 	}
   1999 
   2000 	/*
   2001 	 * Defer printing the EEPROM type until after verifying the checksum
   2002 	 * This allows the EEPROM type to be printed correctly in the case
   2003 	 * that no EEPROM is attached.
   2004 	 */
   2005 	/*
   2006 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2007 	 * this for later, so we can fail future reads from the EEPROM.
   2008 	 */
   2009 	if (wm_nvm_validate_checksum(sc)) {
   2010 		/*
   2011 		 * Read twice again because some PCI-e parts fail the
   2012 		 * first check due to the link being in sleep state.
   2013 		 */
   2014 		if (wm_nvm_validate_checksum(sc))
   2015 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2016 	}
   2017 
   2018 	/* Set device properties (macflags) */
   2019 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2020 
   2021 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2022 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2023 	else {
   2024 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2025 		    sc->sc_nvm_wordsize);
   2026 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2027 			aprint_verbose("iNVM");
   2028 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2029 			aprint_verbose("FLASH(HW)");
   2030 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2031 			aprint_verbose("FLASH");
   2032 		else {
   2033 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2034 				eetype = "SPI";
   2035 			else
   2036 				eetype = "MicroWire";
   2037 			aprint_verbose("(%d address bits) %s EEPROM",
   2038 			    sc->sc_nvm_addrbits, eetype);
   2039 		}
   2040 	}
   2041 	wm_nvm_version(sc);
   2042 	aprint_verbose("\n");
   2043 
   2044 	/* Check for I21[01] PLL workaround */
   2045 	if (sc->sc_type == WM_T_I210)
   2046 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2047 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2048 		/* NVM image release 3.25 has a workaround */
   2049 		if ((sc->sc_nvm_ver_major < 3)
   2050 		    || ((sc->sc_nvm_ver_major == 3)
   2051 			&& (sc->sc_nvm_ver_minor < 25))) {
   2052 			aprint_verbose_dev(sc->sc_dev,
   2053 			    "ROM image version %d.%d is older than 3.25\n",
   2054 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2055 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2056 		}
   2057 	}
   2058 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2059 		wm_pll_workaround_i210(sc);
   2060 
   2061 	wm_get_wakeup(sc);
   2062 	switch (sc->sc_type) {
   2063 	case WM_T_82571:
   2064 	case WM_T_82572:
   2065 	case WM_T_82573:
   2066 	case WM_T_82574:
   2067 	case WM_T_82583:
   2068 	case WM_T_80003:
   2069 	case WM_T_ICH8:
   2070 	case WM_T_ICH9:
   2071 	case WM_T_ICH10:
   2072 	case WM_T_PCH:
   2073 	case WM_T_PCH2:
   2074 	case WM_T_PCH_LPT:
   2075 	case WM_T_PCH_SPT:
   2076 		/* Non-AMT based hardware can now take control from firmware */
   2077 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2078 			wm_get_hw_control(sc);
   2079 		break;
   2080 	default:
   2081 		break;
   2082 	}
   2083 
   2084 	/*
   2085 	 * Read the Ethernet address from the EEPROM, if not first found
   2086 	 * in device properties.
   2087 	 */
   2088 	ea = prop_dictionary_get(dict, "mac-address");
   2089 	if (ea != NULL) {
   2090 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2091 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2092 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2093 	} else {
   2094 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2095 			aprint_error_dev(sc->sc_dev,
   2096 			    "unable to read Ethernet address\n");
   2097 			goto out;
   2098 		}
   2099 	}
   2100 
   2101 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2102 	    ether_sprintf(enaddr));
   2103 
   2104 	/*
   2105 	 * Read the config info from the EEPROM, and set up various
   2106 	 * bits in the control registers based on their contents.
   2107 	 */
   2108 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2109 	if (pn != NULL) {
   2110 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2111 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2112 	} else {
   2113 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2114 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2115 			goto out;
   2116 		}
   2117 	}
   2118 
   2119 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2120 	if (pn != NULL) {
   2121 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2122 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2123 	} else {
   2124 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2125 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2126 			goto out;
   2127 		}
   2128 	}
   2129 
   2130 	/* check for WM_F_WOL */
   2131 	switch (sc->sc_type) {
   2132 	case WM_T_82542_2_0:
   2133 	case WM_T_82542_2_1:
   2134 	case WM_T_82543:
   2135 		/* dummy? */
   2136 		eeprom_data = 0;
   2137 		apme_mask = NVM_CFG3_APME;
   2138 		break;
   2139 	case WM_T_82544:
   2140 		apme_mask = NVM_CFG2_82544_APM_EN;
   2141 		eeprom_data = cfg2;
   2142 		break;
   2143 	case WM_T_82546:
   2144 	case WM_T_82546_3:
   2145 	case WM_T_82571:
   2146 	case WM_T_82572:
   2147 	case WM_T_82573:
   2148 	case WM_T_82574:
   2149 	case WM_T_82583:
   2150 	case WM_T_80003:
   2151 	default:
   2152 		apme_mask = NVM_CFG3_APME;
   2153 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2154 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2155 		break;
   2156 	case WM_T_82575:
   2157 	case WM_T_82576:
   2158 	case WM_T_82580:
   2159 	case WM_T_I350:
   2160 	case WM_T_I354: /* XXX ok? */
   2161 	case WM_T_ICH8:
   2162 	case WM_T_ICH9:
   2163 	case WM_T_ICH10:
   2164 	case WM_T_PCH:
   2165 	case WM_T_PCH2:
   2166 	case WM_T_PCH_LPT:
   2167 	case WM_T_PCH_SPT:
   2168 		/* XXX The funcid should be checked on some devices */
   2169 		apme_mask = WUC_APME;
   2170 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2171 		break;
   2172 	}
   2173 
   2174 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2175 	if ((eeprom_data & apme_mask) != 0)
   2176 		sc->sc_flags |= WM_F_WOL;
   2177 #ifdef WM_DEBUG
   2178 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2179 		printf("WOL\n");
   2180 #endif
   2181 
   2182 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2183 		/* Check NVM for autonegotiation */
   2184 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2185 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2186 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2187 		}
   2188 	}
   2189 
   2190 	/*
   2191 	 * XXX need special handling for some multiple port cards
   2192 	 * to disable a paticular port.
   2193 	 */
   2194 
   2195 	if (sc->sc_type >= WM_T_82544) {
   2196 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2197 		if (pn != NULL) {
   2198 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2199 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2200 		} else {
   2201 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2202 				aprint_error_dev(sc->sc_dev,
   2203 				    "unable to read SWDPIN\n");
   2204 				goto out;
   2205 			}
   2206 		}
   2207 	}
   2208 
   2209 	if (cfg1 & NVM_CFG1_ILOS)
   2210 		sc->sc_ctrl |= CTRL_ILOS;
   2211 
   2212 	/*
   2213 	 * XXX
   2214 	 * This code isn't correct because pin 2 and 3 are located
   2215 	 * in different position on newer chips. Check all datasheet.
   2216 	 *
   2217 	 * Until resolve this problem, check if a chip < 82580
   2218 	 */
   2219 	if (sc->sc_type <= WM_T_82580) {
   2220 		if (sc->sc_type >= WM_T_82544) {
   2221 			sc->sc_ctrl |=
   2222 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2223 			    CTRL_SWDPIO_SHIFT;
   2224 			sc->sc_ctrl |=
   2225 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2226 			    CTRL_SWDPINS_SHIFT;
   2227 		} else {
   2228 			sc->sc_ctrl |=
   2229 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2230 			    CTRL_SWDPIO_SHIFT;
   2231 		}
   2232 	}
   2233 
   2234 	/* XXX For other than 82580? */
   2235 	if (sc->sc_type == WM_T_82580) {
   2236 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2237 		if (nvmword & __BIT(13))
   2238 			sc->sc_ctrl |= CTRL_ILOS;
   2239 	}
   2240 
   2241 #if 0
   2242 	if (sc->sc_type >= WM_T_82544) {
   2243 		if (cfg1 & NVM_CFG1_IPS0)
   2244 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2245 		if (cfg1 & NVM_CFG1_IPS1)
   2246 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2247 		sc->sc_ctrl_ext |=
   2248 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2249 		    CTRL_EXT_SWDPIO_SHIFT;
   2250 		sc->sc_ctrl_ext |=
   2251 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2252 		    CTRL_EXT_SWDPINS_SHIFT;
   2253 	} else {
   2254 		sc->sc_ctrl_ext |=
   2255 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2256 		    CTRL_EXT_SWDPIO_SHIFT;
   2257 	}
   2258 #endif
   2259 
   2260 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2261 #if 0
   2262 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2263 #endif
   2264 
   2265 	if (sc->sc_type == WM_T_PCH) {
   2266 		uint16_t val;
   2267 
   2268 		/* Save the NVM K1 bit setting */
   2269 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2270 
   2271 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2272 			sc->sc_nvm_k1_enabled = 1;
   2273 		else
   2274 			sc->sc_nvm_k1_enabled = 0;
   2275 	}
   2276 
   2277 	/*
   2278 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2279 	 * media structures accordingly.
   2280 	 */
   2281 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2282 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2283 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2284 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2285 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2286 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2287 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2288 	} else if (sc->sc_type < WM_T_82543 ||
   2289 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2290 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2291 			aprint_error_dev(sc->sc_dev,
   2292 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2293 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2294 		}
   2295 		wm_tbi_mediainit(sc);
   2296 	} else {
   2297 		switch (sc->sc_type) {
   2298 		case WM_T_82575:
   2299 		case WM_T_82576:
   2300 		case WM_T_82580:
   2301 		case WM_T_I350:
   2302 		case WM_T_I354:
   2303 		case WM_T_I210:
   2304 		case WM_T_I211:
   2305 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2306 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2307 			switch (link_mode) {
   2308 			case CTRL_EXT_LINK_MODE_1000KX:
   2309 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2310 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2311 				break;
   2312 			case CTRL_EXT_LINK_MODE_SGMII:
   2313 				if (wm_sgmii_uses_mdio(sc)) {
   2314 					aprint_verbose_dev(sc->sc_dev,
   2315 					    "SGMII(MDIO)\n");
   2316 					sc->sc_flags |= WM_F_SGMII;
   2317 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2318 					break;
   2319 				}
   2320 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2321 				/*FALLTHROUGH*/
   2322 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2323 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2324 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2325 					if (link_mode
   2326 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2327 						sc->sc_mediatype
   2328 						    = WM_MEDIATYPE_COPPER;
   2329 						sc->sc_flags |= WM_F_SGMII;
   2330 					} else {
   2331 						sc->sc_mediatype
   2332 						    = WM_MEDIATYPE_SERDES;
   2333 						aprint_verbose_dev(sc->sc_dev,
   2334 						    "SERDES\n");
   2335 					}
   2336 					break;
   2337 				}
   2338 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2339 					aprint_verbose_dev(sc->sc_dev,
   2340 					    "SERDES\n");
   2341 
   2342 				/* Change current link mode setting */
   2343 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2344 				switch (sc->sc_mediatype) {
   2345 				case WM_MEDIATYPE_COPPER:
   2346 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2347 					break;
   2348 				case WM_MEDIATYPE_SERDES:
   2349 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2350 					break;
   2351 				default:
   2352 					break;
   2353 				}
   2354 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2355 				break;
   2356 			case CTRL_EXT_LINK_MODE_GMII:
   2357 			default:
   2358 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2359 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2360 				break;
   2361 			}
   2362 
   2363 			reg &= ~CTRL_EXT_I2C_ENA;
   2364 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2365 				reg |= CTRL_EXT_I2C_ENA;
   2366 			else
   2367 				reg &= ~CTRL_EXT_I2C_ENA;
   2368 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2369 
   2370 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2371 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2372 			else
   2373 				wm_tbi_mediainit(sc);
   2374 			break;
   2375 		default:
   2376 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2377 				aprint_error_dev(sc->sc_dev,
   2378 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2379 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2380 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2381 		}
   2382 	}
   2383 
   2384 	ifp = &sc->sc_ethercom.ec_if;
   2385 	xname = device_xname(sc->sc_dev);
   2386 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2387 	ifp->if_softc = sc;
   2388 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2389 	ifp->if_ioctl = wm_ioctl;
   2390 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2391 		ifp->if_start = wm_nq_start;
   2392 	else
   2393 		ifp->if_start = wm_start;
   2394 	ifp->if_watchdog = wm_watchdog;
   2395 	ifp->if_init = wm_init;
   2396 	ifp->if_stop = wm_stop;
   2397 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2398 	IFQ_SET_READY(&ifp->if_snd);
   2399 
   2400 	/* Check for jumbo frame */
   2401 	switch (sc->sc_type) {
   2402 	case WM_T_82573:
   2403 		/* XXX limited to 9234 if ASPM is disabled */
   2404 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2405 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2406 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2407 		break;
   2408 	case WM_T_82571:
   2409 	case WM_T_82572:
   2410 	case WM_T_82574:
   2411 	case WM_T_82575:
   2412 	case WM_T_82576:
   2413 	case WM_T_82580:
   2414 	case WM_T_I350:
   2415 	case WM_T_I354: /* XXXX ok? */
   2416 	case WM_T_I210:
   2417 	case WM_T_I211:
   2418 	case WM_T_80003:
   2419 	case WM_T_ICH9:
   2420 	case WM_T_ICH10:
   2421 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2422 	case WM_T_PCH_LPT:
   2423 	case WM_T_PCH_SPT:
   2424 		/* XXX limited to 9234 */
   2425 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2426 		break;
   2427 	case WM_T_PCH:
   2428 		/* XXX limited to 4096 */
   2429 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2430 		break;
   2431 	case WM_T_82542_2_0:
   2432 	case WM_T_82542_2_1:
   2433 	case WM_T_82583:
   2434 	case WM_T_ICH8:
   2435 		/* No support for jumbo frame */
   2436 		break;
   2437 	default:
   2438 		/* ETHER_MAX_LEN_JUMBO */
   2439 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2440 		break;
   2441 	}
   2442 
   2443 	/* If we're a i82543 or greater, we can support VLANs. */
   2444 	if (sc->sc_type >= WM_T_82543)
   2445 		sc->sc_ethercom.ec_capabilities |=
   2446 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2447 
   2448 	/*
   2449 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2450 	 * on i82543 and later.
   2451 	 */
   2452 	if (sc->sc_type >= WM_T_82543) {
   2453 		ifp->if_capabilities |=
   2454 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2455 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2456 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2457 		    IFCAP_CSUM_TCPv6_Tx |
   2458 		    IFCAP_CSUM_UDPv6_Tx;
   2459 	}
   2460 
   2461 	/*
   2462 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2463 	 *
   2464 	 *	82541GI (8086:1076) ... no
   2465 	 *	82572EI (8086:10b9) ... yes
   2466 	 */
   2467 	if (sc->sc_type >= WM_T_82571) {
   2468 		ifp->if_capabilities |=
   2469 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2470 	}
   2471 
   2472 	/*
   2473 	 * If we're a i82544 or greater (except i82547), we can do
   2474 	 * TCP segmentation offload.
   2475 	 */
   2476 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2477 		ifp->if_capabilities |= IFCAP_TSOv4;
   2478 	}
   2479 
   2480 	if (sc->sc_type >= WM_T_82571) {
   2481 		ifp->if_capabilities |= IFCAP_TSOv6;
   2482 	}
   2483 
   2484 #ifdef WM_MPSAFE
   2485 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2486 #else
   2487 	sc->sc_core_lock = NULL;
   2488 #endif
   2489 
   2490 	/* Attach the interface. */
   2491 	if_initialize(ifp);
   2492 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2493 	ether_ifattach(ifp, enaddr);
   2494 	if_register(ifp);
   2495 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2496 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2497 			  RND_FLAG_DEFAULT);
   2498 
   2499 #ifdef WM_EVENT_COUNTERS
   2500 	/* Attach event counters. */
   2501 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2502 	    NULL, xname, "txsstall");
   2503 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2504 	    NULL, xname, "txdstall");
   2505 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2506 	    NULL, xname, "txfifo_stall");
   2507 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2508 	    NULL, xname, "txdw");
   2509 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2510 	    NULL, xname, "txqe");
   2511 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2512 	    NULL, xname, "rxintr");
   2513 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2514 	    NULL, xname, "linkintr");
   2515 
   2516 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2517 	    NULL, xname, "rxipsum");
   2518 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2519 	    NULL, xname, "rxtusum");
   2520 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2521 	    NULL, xname, "txipsum");
   2522 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2523 	    NULL, xname, "txtusum");
   2524 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2525 	    NULL, xname, "txtusum6");
   2526 
   2527 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2528 	    NULL, xname, "txtso");
   2529 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2530 	    NULL, xname, "txtso6");
   2531 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2532 	    NULL, xname, "txtsopain");
   2533 
   2534 	for (i = 0; i < WM_NTXSEGS; i++) {
   2535 		snprintf(wm_txseg_evcnt_names[i],
   2536 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2537 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2538 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2539 	}
   2540 
   2541 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2542 	    NULL, xname, "txdrop");
   2543 
   2544 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2545 	    NULL, xname, "tu");
   2546 
   2547 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2548 	    NULL, xname, "tx_xoff");
   2549 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2550 	    NULL, xname, "tx_xon");
   2551 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2552 	    NULL, xname, "rx_xoff");
   2553 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2554 	    NULL, xname, "rx_xon");
   2555 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2556 	    NULL, xname, "rx_macctl");
   2557 #endif /* WM_EVENT_COUNTERS */
   2558 
   2559 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2560 		pmf_class_network_register(self, ifp);
   2561 	else
   2562 		aprint_error_dev(self, "couldn't establish power handler\n");
   2563 
   2564 	sc->sc_flags |= WM_F_ATTACHED;
   2565  out:
   2566 	return;
   2567 }
   2568 
   2569 /* The detach function (ca_detach) */
   2570 static int
   2571 wm_detach(device_t self, int flags __unused)
   2572 {
   2573 	struct wm_softc *sc = device_private(self);
   2574 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2575 	int i;
   2576 #ifndef WM_MPSAFE
   2577 	int s;
   2578 #endif
   2579 
   2580 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2581 		return 0;
   2582 
   2583 #ifndef WM_MPSAFE
   2584 	s = splnet();
   2585 #endif
   2586 	/* Stop the interface. Callouts are stopped in it. */
   2587 	wm_stop(ifp, 1);
   2588 
   2589 #ifndef WM_MPSAFE
   2590 	splx(s);
   2591 #endif
   2592 
   2593 	pmf_device_deregister(self);
   2594 
   2595 	/* Tell the firmware about the release */
   2596 	WM_CORE_LOCK(sc);
   2597 	wm_release_manageability(sc);
   2598 	wm_release_hw_control(sc);
   2599 	WM_CORE_UNLOCK(sc);
   2600 
   2601 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2602 
   2603 	/* Delete all remaining media. */
   2604 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2605 
   2606 	ether_ifdetach(ifp);
   2607 	if_detach(ifp);
   2608 	if_percpuq_destroy(sc->sc_ipq);
   2609 
   2610 	/* Unload RX dmamaps and free mbufs */
   2611 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   2612 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   2613 		WM_RX_LOCK(rxq);
   2614 		wm_rxdrain(rxq);
   2615 		WM_RX_UNLOCK(rxq);
   2616 	}
   2617 	/* Must unlock here */
   2618 
   2619 	wm_free_txrx_queues(sc);
   2620 
   2621 	/* Disestablish the interrupt handler */
   2622 	for (i = 0; i < sc->sc_nintrs; i++) {
   2623 		if (sc->sc_ihs[i] != NULL) {
   2624 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2625 			sc->sc_ihs[i] = NULL;
   2626 		}
   2627 	}
   2628 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2629 
   2630 	/* Unmap the registers */
   2631 	if (sc->sc_ss) {
   2632 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2633 		sc->sc_ss = 0;
   2634 	}
   2635 	if (sc->sc_ios) {
   2636 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2637 		sc->sc_ios = 0;
   2638 	}
   2639 	if (sc->sc_flashs) {
   2640 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2641 		sc->sc_flashs = 0;
   2642 	}
   2643 
   2644 	if (sc->sc_core_lock)
   2645 		mutex_obj_free(sc->sc_core_lock);
   2646 
   2647 	return 0;
   2648 }
   2649 
   2650 static bool
   2651 wm_suspend(device_t self, const pmf_qual_t *qual)
   2652 {
   2653 	struct wm_softc *sc = device_private(self);
   2654 
   2655 	wm_release_manageability(sc);
   2656 	wm_release_hw_control(sc);
   2657 #ifdef WM_WOL
   2658 	wm_enable_wakeup(sc);
   2659 #endif
   2660 
   2661 	return true;
   2662 }
   2663 
   2664 static bool
   2665 wm_resume(device_t self, const pmf_qual_t *qual)
   2666 {
   2667 	struct wm_softc *sc = device_private(self);
   2668 
   2669 	wm_init_manageability(sc);
   2670 
   2671 	return true;
   2672 }
   2673 
   2674 /*
   2675  * wm_watchdog:		[ifnet interface function]
   2676  *
   2677  *	Watchdog timer handler.
   2678  */
   2679 static void
   2680 wm_watchdog(struct ifnet *ifp)
   2681 {
   2682 	struct wm_softc *sc = ifp->if_softc;
   2683 	struct wm_txqueue *txq = &sc->sc_txq[0];
   2684 
   2685 	/*
   2686 	 * Since we're using delayed interrupts, sweep up
   2687 	 * before we report an error.
   2688 	 */
   2689 	WM_TX_LOCK(txq);
   2690 	wm_txeof(sc);
   2691 	WM_TX_UNLOCK(txq);
   2692 
   2693 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2694 #ifdef WM_DEBUG
   2695 		int i, j;
   2696 		struct wm_txsoft *txs;
   2697 #endif
   2698 		log(LOG_ERR,
   2699 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2700 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2701 		    txq->txq_next);
   2702 		ifp->if_oerrors++;
   2703 #ifdef WM_DEBUG
   2704 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2705 		    i = WM_NEXTTXS(txq, i)) {
   2706 		    txs = &txq->txq_soft[i];
   2707 		    printf("txs %d tx %d -> %d\n",
   2708 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2709 		    for (j = txs->txs_firstdesc; ;
   2710 			j = WM_NEXTTX(txq, j)) {
   2711 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2712 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2713 			printf("\t %#08x%08x\n",
   2714 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2715 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2716 			if (j == txs->txs_lastdesc)
   2717 				break;
   2718 			}
   2719 		}
   2720 #endif
   2721 		/* Reset the interface. */
   2722 		(void) wm_init(ifp);
   2723 	}
   2724 
   2725 	/* Try to get more packets going. */
   2726 	ifp->if_start(ifp);
   2727 }
   2728 
   2729 /*
   2730  * wm_tick:
   2731  *
   2732  *	One second timer, used to check link status, sweep up
   2733  *	completed transmit jobs, etc.
   2734  */
   2735 static void
   2736 wm_tick(void *arg)
   2737 {
   2738 	struct wm_softc *sc = arg;
   2739 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2740 #ifndef WM_MPSAFE
   2741 	int s;
   2742 
   2743 	s = splnet();
   2744 #endif
   2745 
   2746 	WM_CORE_LOCK(sc);
   2747 
   2748 	if (sc->sc_stopping)
   2749 		goto out;
   2750 
   2751 	if (sc->sc_type >= WM_T_82542_2_1) {
   2752 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2753 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2754 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2755 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2756 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2757 	}
   2758 
   2759 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2760 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2761 	    + CSR_READ(sc, WMREG_CRCERRS)
   2762 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2763 	    + CSR_READ(sc, WMREG_SYMERRC)
   2764 	    + CSR_READ(sc, WMREG_RXERRC)
   2765 	    + CSR_READ(sc, WMREG_SEC)
   2766 	    + CSR_READ(sc, WMREG_CEXTERR)
   2767 	    + CSR_READ(sc, WMREG_RLEC);
   2768 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2769 
   2770 	if (sc->sc_flags & WM_F_HAS_MII)
   2771 		mii_tick(&sc->sc_mii);
   2772 	else if ((sc->sc_type >= WM_T_82575)
   2773 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2774 		wm_serdes_tick(sc);
   2775 	else
   2776 		wm_tbi_tick(sc);
   2777 
   2778 out:
   2779 	WM_CORE_UNLOCK(sc);
   2780 #ifndef WM_MPSAFE
   2781 	splx(s);
   2782 #endif
   2783 
   2784 	if (!sc->sc_stopping)
   2785 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2786 }
   2787 
   2788 static int
   2789 wm_ifflags_cb(struct ethercom *ec)
   2790 {
   2791 	struct ifnet *ifp = &ec->ec_if;
   2792 	struct wm_softc *sc = ifp->if_softc;
   2793 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2794 	int rc = 0;
   2795 
   2796 	WM_CORE_LOCK(sc);
   2797 
   2798 	if (change != 0)
   2799 		sc->sc_if_flags = ifp->if_flags;
   2800 
   2801 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2802 		rc = ENETRESET;
   2803 		goto out;
   2804 	}
   2805 
   2806 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2807 		wm_set_filter(sc);
   2808 
   2809 	wm_set_vlan(sc);
   2810 
   2811 out:
   2812 	WM_CORE_UNLOCK(sc);
   2813 
   2814 	return rc;
   2815 }
   2816 
   2817 /*
   2818  * wm_ioctl:		[ifnet interface function]
   2819  *
   2820  *	Handle control requests from the operator.
   2821  */
   2822 static int
   2823 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2824 {
   2825 	struct wm_softc *sc = ifp->if_softc;
   2826 	struct ifreq *ifr = (struct ifreq *) data;
   2827 	struct ifaddr *ifa = (struct ifaddr *)data;
   2828 	struct sockaddr_dl *sdl;
   2829 	int s, error;
   2830 
   2831 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2832 		device_xname(sc->sc_dev), __func__));
   2833 #ifndef WM_MPSAFE
   2834 	s = splnet();
   2835 #endif
   2836 	switch (cmd) {
   2837 	case SIOCSIFMEDIA:
   2838 	case SIOCGIFMEDIA:
   2839 		WM_CORE_LOCK(sc);
   2840 		/* Flow control requires full-duplex mode. */
   2841 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2842 		    (ifr->ifr_media & IFM_FDX) == 0)
   2843 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2844 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2845 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2846 				/* We can do both TXPAUSE and RXPAUSE. */
   2847 				ifr->ifr_media |=
   2848 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2849 			}
   2850 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2851 		}
   2852 		WM_CORE_UNLOCK(sc);
   2853 #ifdef WM_MPSAFE
   2854 		s = splnet();
   2855 #endif
   2856 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2857 #ifdef WM_MPSAFE
   2858 		splx(s);
   2859 #endif
   2860 		break;
   2861 	case SIOCINITIFADDR:
   2862 		WM_CORE_LOCK(sc);
   2863 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2864 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2865 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2866 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2867 			/* unicast address is first multicast entry */
   2868 			wm_set_filter(sc);
   2869 			error = 0;
   2870 			WM_CORE_UNLOCK(sc);
   2871 			break;
   2872 		}
   2873 		WM_CORE_UNLOCK(sc);
   2874 		/*FALLTHROUGH*/
   2875 	default:
   2876 #ifdef WM_MPSAFE
   2877 		s = splnet();
   2878 #endif
   2879 		/* It may call wm_start, so unlock here */
   2880 		error = ether_ioctl(ifp, cmd, data);
   2881 #ifdef WM_MPSAFE
   2882 		splx(s);
   2883 #endif
   2884 		if (error != ENETRESET)
   2885 			break;
   2886 
   2887 		error = 0;
   2888 
   2889 		if (cmd == SIOCSIFCAP) {
   2890 			error = (*ifp->if_init)(ifp);
   2891 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2892 			;
   2893 		else if (ifp->if_flags & IFF_RUNNING) {
   2894 			/*
   2895 			 * Multicast list has changed; set the hardware filter
   2896 			 * accordingly.
   2897 			 */
   2898 			WM_CORE_LOCK(sc);
   2899 			wm_set_filter(sc);
   2900 			WM_CORE_UNLOCK(sc);
   2901 		}
   2902 		break;
   2903 	}
   2904 
   2905 #ifndef WM_MPSAFE
   2906 	splx(s);
   2907 #endif
   2908 	return error;
   2909 }
   2910 
   2911 /* MAC address related */
   2912 
   2913 /*
   2914  * Get the offset of MAC address and return it.
   2915  * If error occured, use offset 0.
   2916  */
   2917 static uint16_t
   2918 wm_check_alt_mac_addr(struct wm_softc *sc)
   2919 {
   2920 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2921 	uint16_t offset = NVM_OFF_MACADDR;
   2922 
   2923 	/* Try to read alternative MAC address pointer */
   2924 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2925 		return 0;
   2926 
   2927 	/* Check pointer if it's valid or not. */
   2928 	if ((offset == 0x0000) || (offset == 0xffff))
   2929 		return 0;
   2930 
   2931 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2932 	/*
   2933 	 * Check whether alternative MAC address is valid or not.
   2934 	 * Some cards have non 0xffff pointer but those don't use
   2935 	 * alternative MAC address in reality.
   2936 	 *
   2937 	 * Check whether the broadcast bit is set or not.
   2938 	 */
   2939 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2940 		if (((myea[0] & 0xff) & 0x01) == 0)
   2941 			return offset; /* Found */
   2942 
   2943 	/* Not found */
   2944 	return 0;
   2945 }
   2946 
   2947 static int
   2948 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2949 {
   2950 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2951 	uint16_t offset = NVM_OFF_MACADDR;
   2952 	int do_invert = 0;
   2953 
   2954 	switch (sc->sc_type) {
   2955 	case WM_T_82580:
   2956 	case WM_T_I350:
   2957 	case WM_T_I354:
   2958 		/* EEPROM Top Level Partitioning */
   2959 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2960 		break;
   2961 	case WM_T_82571:
   2962 	case WM_T_82575:
   2963 	case WM_T_82576:
   2964 	case WM_T_80003:
   2965 	case WM_T_I210:
   2966 	case WM_T_I211:
   2967 		offset = wm_check_alt_mac_addr(sc);
   2968 		if (offset == 0)
   2969 			if ((sc->sc_funcid & 0x01) == 1)
   2970 				do_invert = 1;
   2971 		break;
   2972 	default:
   2973 		if ((sc->sc_funcid & 0x01) == 1)
   2974 			do_invert = 1;
   2975 		break;
   2976 	}
   2977 
   2978 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2979 		myea) != 0)
   2980 		goto bad;
   2981 
   2982 	enaddr[0] = myea[0] & 0xff;
   2983 	enaddr[1] = myea[0] >> 8;
   2984 	enaddr[2] = myea[1] & 0xff;
   2985 	enaddr[3] = myea[1] >> 8;
   2986 	enaddr[4] = myea[2] & 0xff;
   2987 	enaddr[5] = myea[2] >> 8;
   2988 
   2989 	/*
   2990 	 * Toggle the LSB of the MAC address on the second port
   2991 	 * of some dual port cards.
   2992 	 */
   2993 	if (do_invert != 0)
   2994 		enaddr[5] ^= 1;
   2995 
   2996 	return 0;
   2997 
   2998  bad:
   2999 	return -1;
   3000 }
   3001 
   3002 /*
   3003  * wm_set_ral:
   3004  *
   3005  *	Set an entery in the receive address list.
   3006  */
   3007 static void
   3008 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3009 {
   3010 	uint32_t ral_lo, ral_hi;
   3011 
   3012 	if (enaddr != NULL) {
   3013 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3014 		    (enaddr[3] << 24);
   3015 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3016 		ral_hi |= RAL_AV;
   3017 	} else {
   3018 		ral_lo = 0;
   3019 		ral_hi = 0;
   3020 	}
   3021 
   3022 	if (sc->sc_type >= WM_T_82544) {
   3023 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3024 		    ral_lo);
   3025 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3026 		    ral_hi);
   3027 	} else {
   3028 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3029 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3030 	}
   3031 }
   3032 
   3033 /*
   3034  * wm_mchash:
   3035  *
   3036  *	Compute the hash of the multicast address for the 4096-bit
   3037  *	multicast filter.
   3038  */
   3039 static uint32_t
   3040 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3041 {
   3042 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3043 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3044 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3045 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3046 	uint32_t hash;
   3047 
   3048 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3049 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3050 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3051 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3052 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3053 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3054 		return (hash & 0x3ff);
   3055 	}
   3056 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3057 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3058 
   3059 	return (hash & 0xfff);
   3060 }
   3061 
   3062 /*
   3063  * wm_set_filter:
   3064  *
   3065  *	Set up the receive filter.
   3066  */
   3067 static void
   3068 wm_set_filter(struct wm_softc *sc)
   3069 {
   3070 	struct ethercom *ec = &sc->sc_ethercom;
   3071 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3072 	struct ether_multi *enm;
   3073 	struct ether_multistep step;
   3074 	bus_addr_t mta_reg;
   3075 	uint32_t hash, reg, bit;
   3076 	int i, size, ralmax;
   3077 
   3078 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3079 		device_xname(sc->sc_dev), __func__));
   3080 	if (sc->sc_type >= WM_T_82544)
   3081 		mta_reg = WMREG_CORDOVA_MTA;
   3082 	else
   3083 		mta_reg = WMREG_MTA;
   3084 
   3085 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3086 
   3087 	if (ifp->if_flags & IFF_BROADCAST)
   3088 		sc->sc_rctl |= RCTL_BAM;
   3089 	if (ifp->if_flags & IFF_PROMISC) {
   3090 		sc->sc_rctl |= RCTL_UPE;
   3091 		goto allmulti;
   3092 	}
   3093 
   3094 	/*
   3095 	 * Set the station address in the first RAL slot, and
   3096 	 * clear the remaining slots.
   3097 	 */
   3098 	if (sc->sc_type == WM_T_ICH8)
   3099 		size = WM_RAL_TABSIZE_ICH8 -1;
   3100 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3101 	    || (sc->sc_type == WM_T_PCH))
   3102 		size = WM_RAL_TABSIZE_ICH8;
   3103 	else if (sc->sc_type == WM_T_PCH2)
   3104 		size = WM_RAL_TABSIZE_PCH2;
   3105 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3106 		size = WM_RAL_TABSIZE_PCH_LPT;
   3107 	else if (sc->sc_type == WM_T_82575)
   3108 		size = WM_RAL_TABSIZE_82575;
   3109 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3110 		size = WM_RAL_TABSIZE_82576;
   3111 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3112 		size = WM_RAL_TABSIZE_I350;
   3113 	else
   3114 		size = WM_RAL_TABSIZE;
   3115 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3116 
   3117 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3118 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3119 		switch (i) {
   3120 		case 0:
   3121 			/* We can use all entries */
   3122 			ralmax = size;
   3123 			break;
   3124 		case 1:
   3125 			/* Only RAR[0] */
   3126 			ralmax = 1;
   3127 			break;
   3128 		default:
   3129 			/* available SHRA + RAR[0] */
   3130 			ralmax = i + 1;
   3131 		}
   3132 	} else
   3133 		ralmax = size;
   3134 	for (i = 1; i < size; i++) {
   3135 		if (i < ralmax)
   3136 			wm_set_ral(sc, NULL, i);
   3137 	}
   3138 
   3139 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3140 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3141 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3142 	    || (sc->sc_type == WM_T_PCH_SPT))
   3143 		size = WM_ICH8_MC_TABSIZE;
   3144 	else
   3145 		size = WM_MC_TABSIZE;
   3146 	/* Clear out the multicast table. */
   3147 	for (i = 0; i < size; i++)
   3148 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3149 
   3150 	ETHER_FIRST_MULTI(step, ec, enm);
   3151 	while (enm != NULL) {
   3152 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3153 			/*
   3154 			 * We must listen to a range of multicast addresses.
   3155 			 * For now, just accept all multicasts, rather than
   3156 			 * trying to set only those filter bits needed to match
   3157 			 * the range.  (At this time, the only use of address
   3158 			 * ranges is for IP multicast routing, for which the
   3159 			 * range is big enough to require all bits set.)
   3160 			 */
   3161 			goto allmulti;
   3162 		}
   3163 
   3164 		hash = wm_mchash(sc, enm->enm_addrlo);
   3165 
   3166 		reg = (hash >> 5);
   3167 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3168 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3169 		    || (sc->sc_type == WM_T_PCH2)
   3170 		    || (sc->sc_type == WM_T_PCH_LPT)
   3171 		    || (sc->sc_type == WM_T_PCH_SPT))
   3172 			reg &= 0x1f;
   3173 		else
   3174 			reg &= 0x7f;
   3175 		bit = hash & 0x1f;
   3176 
   3177 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3178 		hash |= 1U << bit;
   3179 
   3180 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3181 			/*
   3182 			 * 82544 Errata 9: Certain register cannot be written
   3183 			 * with particular alignments in PCI-X bus operation
   3184 			 * (FCAH, MTA and VFTA).
   3185 			 */
   3186 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3187 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3188 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3189 		} else
   3190 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3191 
   3192 		ETHER_NEXT_MULTI(step, enm);
   3193 	}
   3194 
   3195 	ifp->if_flags &= ~IFF_ALLMULTI;
   3196 	goto setit;
   3197 
   3198  allmulti:
   3199 	ifp->if_flags |= IFF_ALLMULTI;
   3200 	sc->sc_rctl |= RCTL_MPE;
   3201 
   3202  setit:
   3203 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3204 }
   3205 
   3206 /* Reset and init related */
   3207 
   3208 static void
   3209 wm_set_vlan(struct wm_softc *sc)
   3210 {
   3211 
   3212 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3213 		device_xname(sc->sc_dev), __func__));
   3214 	/* Deal with VLAN enables. */
   3215 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3216 		sc->sc_ctrl |= CTRL_VME;
   3217 	else
   3218 		sc->sc_ctrl &= ~CTRL_VME;
   3219 
   3220 	/* Write the control registers. */
   3221 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3222 }
   3223 
   3224 static void
   3225 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3226 {
   3227 	uint32_t gcr;
   3228 	pcireg_t ctrl2;
   3229 
   3230 	gcr = CSR_READ(sc, WMREG_GCR);
   3231 
   3232 	/* Only take action if timeout value is defaulted to 0 */
   3233 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3234 		goto out;
   3235 
   3236 	if ((gcr & GCR_CAP_VER2) == 0) {
   3237 		gcr |= GCR_CMPL_TMOUT_10MS;
   3238 		goto out;
   3239 	}
   3240 
   3241 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3242 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3243 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3244 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3245 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3246 
   3247 out:
   3248 	/* Disable completion timeout resend */
   3249 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3250 
   3251 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3252 }
   3253 
   3254 void
   3255 wm_get_auto_rd_done(struct wm_softc *sc)
   3256 {
   3257 	int i;
   3258 
   3259 	/* wait for eeprom to reload */
   3260 	switch (sc->sc_type) {
   3261 	case WM_T_82571:
   3262 	case WM_T_82572:
   3263 	case WM_T_82573:
   3264 	case WM_T_82574:
   3265 	case WM_T_82583:
   3266 	case WM_T_82575:
   3267 	case WM_T_82576:
   3268 	case WM_T_82580:
   3269 	case WM_T_I350:
   3270 	case WM_T_I354:
   3271 	case WM_T_I210:
   3272 	case WM_T_I211:
   3273 	case WM_T_80003:
   3274 	case WM_T_ICH8:
   3275 	case WM_T_ICH9:
   3276 		for (i = 0; i < 10; i++) {
   3277 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3278 				break;
   3279 			delay(1000);
   3280 		}
   3281 		if (i == 10) {
   3282 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3283 			    "complete\n", device_xname(sc->sc_dev));
   3284 		}
   3285 		break;
   3286 	default:
   3287 		break;
   3288 	}
   3289 }
   3290 
   3291 void
   3292 wm_lan_init_done(struct wm_softc *sc)
   3293 {
   3294 	uint32_t reg = 0;
   3295 	int i;
   3296 
   3297 	/* wait for eeprom to reload */
   3298 	switch (sc->sc_type) {
   3299 	case WM_T_ICH10:
   3300 	case WM_T_PCH:
   3301 	case WM_T_PCH2:
   3302 	case WM_T_PCH_LPT:
   3303 	case WM_T_PCH_SPT:
   3304 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3305 			reg = CSR_READ(sc, WMREG_STATUS);
   3306 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3307 				break;
   3308 			delay(100);
   3309 		}
   3310 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3311 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3312 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3313 		}
   3314 		break;
   3315 	default:
   3316 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3317 		    __func__);
   3318 		break;
   3319 	}
   3320 
   3321 	reg &= ~STATUS_LAN_INIT_DONE;
   3322 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3323 }
   3324 
   3325 void
   3326 wm_get_cfg_done(struct wm_softc *sc)
   3327 {
   3328 	int mask;
   3329 	uint32_t reg;
   3330 	int i;
   3331 
   3332 	/* wait for eeprom to reload */
   3333 	switch (sc->sc_type) {
   3334 	case WM_T_82542_2_0:
   3335 	case WM_T_82542_2_1:
   3336 		/* null */
   3337 		break;
   3338 	case WM_T_82543:
   3339 	case WM_T_82544:
   3340 	case WM_T_82540:
   3341 	case WM_T_82545:
   3342 	case WM_T_82545_3:
   3343 	case WM_T_82546:
   3344 	case WM_T_82546_3:
   3345 	case WM_T_82541:
   3346 	case WM_T_82541_2:
   3347 	case WM_T_82547:
   3348 	case WM_T_82547_2:
   3349 	case WM_T_82573:
   3350 	case WM_T_82574:
   3351 	case WM_T_82583:
   3352 		/* generic */
   3353 		delay(10*1000);
   3354 		break;
   3355 	case WM_T_80003:
   3356 	case WM_T_82571:
   3357 	case WM_T_82572:
   3358 	case WM_T_82575:
   3359 	case WM_T_82576:
   3360 	case WM_T_82580:
   3361 	case WM_T_I350:
   3362 	case WM_T_I354:
   3363 	case WM_T_I210:
   3364 	case WM_T_I211:
   3365 		if (sc->sc_type == WM_T_82571) {
   3366 			/* Only 82571 shares port 0 */
   3367 			mask = EEMNGCTL_CFGDONE_0;
   3368 		} else
   3369 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3370 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3371 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3372 				break;
   3373 			delay(1000);
   3374 		}
   3375 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3376 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3377 				device_xname(sc->sc_dev), __func__));
   3378 		}
   3379 		break;
   3380 	case WM_T_ICH8:
   3381 	case WM_T_ICH9:
   3382 	case WM_T_ICH10:
   3383 	case WM_T_PCH:
   3384 	case WM_T_PCH2:
   3385 	case WM_T_PCH_LPT:
   3386 	case WM_T_PCH_SPT:
   3387 		delay(10*1000);
   3388 		if (sc->sc_type >= WM_T_ICH10)
   3389 			wm_lan_init_done(sc);
   3390 		else
   3391 			wm_get_auto_rd_done(sc);
   3392 
   3393 		reg = CSR_READ(sc, WMREG_STATUS);
   3394 		if ((reg & STATUS_PHYRA) != 0)
   3395 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3396 		break;
   3397 	default:
   3398 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3399 		    __func__);
   3400 		break;
   3401 	}
   3402 }
   3403 
   3404 /* Init hardware bits */
   3405 void
   3406 wm_initialize_hardware_bits(struct wm_softc *sc)
   3407 {
   3408 	uint32_t tarc0, tarc1, reg;
   3409 
   3410 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3411 		device_xname(sc->sc_dev), __func__));
   3412 	/* For 82571 variant, 80003 and ICHs */
   3413 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3414 	    || (sc->sc_type >= WM_T_80003)) {
   3415 
   3416 		/* Transmit Descriptor Control 0 */
   3417 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3418 		reg |= TXDCTL_COUNT_DESC;
   3419 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3420 
   3421 		/* Transmit Descriptor Control 1 */
   3422 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3423 		reg |= TXDCTL_COUNT_DESC;
   3424 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3425 
   3426 		/* TARC0 */
   3427 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3428 		switch (sc->sc_type) {
   3429 		case WM_T_82571:
   3430 		case WM_T_82572:
   3431 		case WM_T_82573:
   3432 		case WM_T_82574:
   3433 		case WM_T_82583:
   3434 		case WM_T_80003:
   3435 			/* Clear bits 30..27 */
   3436 			tarc0 &= ~__BITS(30, 27);
   3437 			break;
   3438 		default:
   3439 			break;
   3440 		}
   3441 
   3442 		switch (sc->sc_type) {
   3443 		case WM_T_82571:
   3444 		case WM_T_82572:
   3445 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3446 
   3447 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3448 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3449 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3450 			/* 8257[12] Errata No.7 */
   3451 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3452 
   3453 			/* TARC1 bit 28 */
   3454 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3455 				tarc1 &= ~__BIT(28);
   3456 			else
   3457 				tarc1 |= __BIT(28);
   3458 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3459 
   3460 			/*
   3461 			 * 8257[12] Errata No.13
   3462 			 * Disable Dyamic Clock Gating.
   3463 			 */
   3464 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3465 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3466 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3467 			break;
   3468 		case WM_T_82573:
   3469 		case WM_T_82574:
   3470 		case WM_T_82583:
   3471 			if ((sc->sc_type == WM_T_82574)
   3472 			    || (sc->sc_type == WM_T_82583))
   3473 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3474 
   3475 			/* Extended Device Control */
   3476 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3477 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3478 			reg |= __BIT(22);	/* Set bit 22 */
   3479 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3480 
   3481 			/* Device Control */
   3482 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3483 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3484 
   3485 			/* PCIe Control Register */
   3486 			/*
   3487 			 * 82573 Errata (unknown).
   3488 			 *
   3489 			 * 82574 Errata 25 and 82583 Errata 12
   3490 			 * "Dropped Rx Packets":
   3491 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3492 			 */
   3493 			reg = CSR_READ(sc, WMREG_GCR);
   3494 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3495 			CSR_WRITE(sc, WMREG_GCR, reg);
   3496 
   3497 			if ((sc->sc_type == WM_T_82574)
   3498 			    || (sc->sc_type == WM_T_82583)) {
   3499 				/*
   3500 				 * Document says this bit must be set for
   3501 				 * proper operation.
   3502 				 */
   3503 				reg = CSR_READ(sc, WMREG_GCR);
   3504 				reg |= __BIT(22);
   3505 				CSR_WRITE(sc, WMREG_GCR, reg);
   3506 
   3507 				/*
   3508 				 * Apply workaround for hardware errata
   3509 				 * documented in errata docs Fixes issue where
   3510 				 * some error prone or unreliable PCIe
   3511 				 * completions are occurring, particularly
   3512 				 * with ASPM enabled. Without fix, issue can
   3513 				 * cause Tx timeouts.
   3514 				 */
   3515 				reg = CSR_READ(sc, WMREG_GCR2);
   3516 				reg |= __BIT(0);
   3517 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3518 			}
   3519 			break;
   3520 		case WM_T_80003:
   3521 			/* TARC0 */
   3522 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3523 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3524 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3525 
   3526 			/* TARC1 bit 28 */
   3527 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3528 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3529 				tarc1 &= ~__BIT(28);
   3530 			else
   3531 				tarc1 |= __BIT(28);
   3532 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3533 			break;
   3534 		case WM_T_ICH8:
   3535 		case WM_T_ICH9:
   3536 		case WM_T_ICH10:
   3537 		case WM_T_PCH:
   3538 		case WM_T_PCH2:
   3539 		case WM_T_PCH_LPT:
   3540 		case WM_T_PCH_SPT:
   3541 			/* TARC0 */
   3542 			if ((sc->sc_type == WM_T_ICH8)
   3543 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3544 				/* Set TARC0 bits 29 and 28 */
   3545 				tarc0 |= __BITS(29, 28);
   3546 			}
   3547 			/* Set TARC0 bits 23,24,26,27 */
   3548 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3549 
   3550 			/* CTRL_EXT */
   3551 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3552 			reg |= __BIT(22);	/* Set bit 22 */
   3553 			/*
   3554 			 * Enable PHY low-power state when MAC is at D3
   3555 			 * w/o WoL
   3556 			 */
   3557 			if (sc->sc_type >= WM_T_PCH)
   3558 				reg |= CTRL_EXT_PHYPDEN;
   3559 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3560 
   3561 			/* TARC1 */
   3562 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3563 			/* bit 28 */
   3564 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3565 				tarc1 &= ~__BIT(28);
   3566 			else
   3567 				tarc1 |= __BIT(28);
   3568 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3569 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3570 
   3571 			/* Device Status */
   3572 			if (sc->sc_type == WM_T_ICH8) {
   3573 				reg = CSR_READ(sc, WMREG_STATUS);
   3574 				reg &= ~__BIT(31);
   3575 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3576 
   3577 			}
   3578 
   3579 			/* IOSFPC */
   3580 			if (sc->sc_type == WM_T_PCH_SPT) {
   3581 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3582 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3583 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3584 			}
   3585 			/*
   3586 			 * Work-around descriptor data corruption issue during
   3587 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3588 			 * capability.
   3589 			 */
   3590 			reg = CSR_READ(sc, WMREG_RFCTL);
   3591 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3592 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3593 			break;
   3594 		default:
   3595 			break;
   3596 		}
   3597 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3598 
   3599 		/*
   3600 		 * 8257[12] Errata No.52 and some others.
   3601 		 * Avoid RSS Hash Value bug.
   3602 		 */
   3603 		switch (sc->sc_type) {
   3604 		case WM_T_82571:
   3605 		case WM_T_82572:
   3606 		case WM_T_82573:
   3607 		case WM_T_80003:
   3608 		case WM_T_ICH8:
   3609 			reg = CSR_READ(sc, WMREG_RFCTL);
   3610 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3611 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3612 			break;
   3613 		default:
   3614 			break;
   3615 		}
   3616 	}
   3617 }
   3618 
   3619 static uint32_t
   3620 wm_rxpbs_adjust_82580(uint32_t val)
   3621 {
   3622 	uint32_t rv = 0;
   3623 
   3624 	if (val < __arraycount(wm_82580_rxpbs_table))
   3625 		rv = wm_82580_rxpbs_table[val];
   3626 
   3627 	return rv;
   3628 }
   3629 
   3630 /*
   3631  * wm_reset:
   3632  *
   3633  *	Reset the i82542 chip.
   3634  */
   3635 static void
   3636 wm_reset(struct wm_softc *sc)
   3637 {
   3638 	int phy_reset = 0;
   3639 	int i, error = 0;
   3640 	uint32_t reg, mask;
   3641 
   3642 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3643 		device_xname(sc->sc_dev), __func__));
   3644 	/*
   3645 	 * Allocate on-chip memory according to the MTU size.
   3646 	 * The Packet Buffer Allocation register must be written
   3647 	 * before the chip is reset.
   3648 	 */
   3649 	switch (sc->sc_type) {
   3650 	case WM_T_82547:
   3651 	case WM_T_82547_2:
   3652 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3653 		    PBA_22K : PBA_30K;
   3654 		for (i = 0; i < sc->sc_ntxqueues; i++) {
   3655 			struct wm_txqueue *txq = &sc->sc_txq[i];
   3656 			txq->txq_fifo_head = 0;
   3657 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3658 			txq->txq_fifo_size =
   3659 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3660 			txq->txq_fifo_stall = 0;
   3661 		}
   3662 		break;
   3663 	case WM_T_82571:
   3664 	case WM_T_82572:
   3665 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3666 	case WM_T_80003:
   3667 		sc->sc_pba = PBA_32K;
   3668 		break;
   3669 	case WM_T_82573:
   3670 		sc->sc_pba = PBA_12K;
   3671 		break;
   3672 	case WM_T_82574:
   3673 	case WM_T_82583:
   3674 		sc->sc_pba = PBA_20K;
   3675 		break;
   3676 	case WM_T_82576:
   3677 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3678 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3679 		break;
   3680 	case WM_T_82580:
   3681 	case WM_T_I350:
   3682 	case WM_T_I354:
   3683 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3684 		break;
   3685 	case WM_T_I210:
   3686 	case WM_T_I211:
   3687 		sc->sc_pba = PBA_34K;
   3688 		break;
   3689 	case WM_T_ICH8:
   3690 		/* Workaround for a bit corruption issue in FIFO memory */
   3691 		sc->sc_pba = PBA_8K;
   3692 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3693 		break;
   3694 	case WM_T_ICH9:
   3695 	case WM_T_ICH10:
   3696 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3697 		    PBA_14K : PBA_10K;
   3698 		break;
   3699 	case WM_T_PCH:
   3700 	case WM_T_PCH2:
   3701 	case WM_T_PCH_LPT:
   3702 	case WM_T_PCH_SPT:
   3703 		sc->sc_pba = PBA_26K;
   3704 		break;
   3705 	default:
   3706 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3707 		    PBA_40K : PBA_48K;
   3708 		break;
   3709 	}
   3710 	/*
   3711 	 * Only old or non-multiqueue devices have the PBA register
   3712 	 * XXX Need special handling for 82575.
   3713 	 */
   3714 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3715 	    || (sc->sc_type == WM_T_82575))
   3716 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3717 
   3718 	/* Prevent the PCI-E bus from sticking */
   3719 	if (sc->sc_flags & WM_F_PCIE) {
   3720 		int timeout = 800;
   3721 
   3722 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3723 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3724 
   3725 		while (timeout--) {
   3726 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3727 			    == 0)
   3728 				break;
   3729 			delay(100);
   3730 		}
   3731 	}
   3732 
   3733 	/* Set the completion timeout for interface */
   3734 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3735 	    || (sc->sc_type == WM_T_82580)
   3736 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3737 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3738 		wm_set_pcie_completion_timeout(sc);
   3739 
   3740 	/* Clear interrupt */
   3741 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3742 	if (sc->sc_nintrs > 1) {
   3743 		if (sc->sc_type != WM_T_82574) {
   3744 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3745 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3746 		} else {
   3747 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3748 		}
   3749 	}
   3750 
   3751 	/* Stop the transmit and receive processes. */
   3752 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3753 	sc->sc_rctl &= ~RCTL_EN;
   3754 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3755 	CSR_WRITE_FLUSH(sc);
   3756 
   3757 	/* XXX set_tbi_sbp_82543() */
   3758 
   3759 	delay(10*1000);
   3760 
   3761 	/* Must acquire the MDIO ownership before MAC reset */
   3762 	switch (sc->sc_type) {
   3763 	case WM_T_82573:
   3764 	case WM_T_82574:
   3765 	case WM_T_82583:
   3766 		error = wm_get_hw_semaphore_82573(sc);
   3767 		break;
   3768 	default:
   3769 		break;
   3770 	}
   3771 
   3772 	/*
   3773 	 * 82541 Errata 29? & 82547 Errata 28?
   3774 	 * See also the description about PHY_RST bit in CTRL register
   3775 	 * in 8254x_GBe_SDM.pdf.
   3776 	 */
   3777 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3778 		CSR_WRITE(sc, WMREG_CTRL,
   3779 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3780 		CSR_WRITE_FLUSH(sc);
   3781 		delay(5000);
   3782 	}
   3783 
   3784 	switch (sc->sc_type) {
   3785 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3786 	case WM_T_82541:
   3787 	case WM_T_82541_2:
   3788 	case WM_T_82547:
   3789 	case WM_T_82547_2:
   3790 		/*
   3791 		 * On some chipsets, a reset through a memory-mapped write
   3792 		 * cycle can cause the chip to reset before completing the
   3793 		 * write cycle.  This causes major headache that can be
   3794 		 * avoided by issuing the reset via indirect register writes
   3795 		 * through I/O space.
   3796 		 *
   3797 		 * So, if we successfully mapped the I/O BAR at attach time,
   3798 		 * use that.  Otherwise, try our luck with a memory-mapped
   3799 		 * reset.
   3800 		 */
   3801 		if (sc->sc_flags & WM_F_IOH_VALID)
   3802 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3803 		else
   3804 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3805 		break;
   3806 	case WM_T_82545_3:
   3807 	case WM_T_82546_3:
   3808 		/* Use the shadow control register on these chips. */
   3809 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3810 		break;
   3811 	case WM_T_80003:
   3812 		mask = swfwphysem[sc->sc_funcid];
   3813 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3814 		wm_get_swfw_semaphore(sc, mask);
   3815 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3816 		wm_put_swfw_semaphore(sc, mask);
   3817 		break;
   3818 	case WM_T_ICH8:
   3819 	case WM_T_ICH9:
   3820 	case WM_T_ICH10:
   3821 	case WM_T_PCH:
   3822 	case WM_T_PCH2:
   3823 	case WM_T_PCH_LPT:
   3824 	case WM_T_PCH_SPT:
   3825 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3826 		if (wm_phy_resetisblocked(sc) == false) {
   3827 			/*
   3828 			 * Gate automatic PHY configuration by hardware on
   3829 			 * non-managed 82579
   3830 			 */
   3831 			if ((sc->sc_type == WM_T_PCH2)
   3832 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3833 				== 0))
   3834 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3835 
   3836 			reg |= CTRL_PHY_RESET;
   3837 			phy_reset = 1;
   3838 		}
   3839 		wm_get_swfwhw_semaphore(sc);
   3840 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3841 		/* Don't insert a completion barrier when reset */
   3842 		delay(20*1000);
   3843 		wm_put_swfwhw_semaphore(sc);
   3844 		break;
   3845 	case WM_T_82580:
   3846 	case WM_T_I350:
   3847 	case WM_T_I354:
   3848 	case WM_T_I210:
   3849 	case WM_T_I211:
   3850 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3851 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3852 			CSR_WRITE_FLUSH(sc);
   3853 		delay(5000);
   3854 		break;
   3855 	case WM_T_82542_2_0:
   3856 	case WM_T_82542_2_1:
   3857 	case WM_T_82543:
   3858 	case WM_T_82540:
   3859 	case WM_T_82545:
   3860 	case WM_T_82546:
   3861 	case WM_T_82571:
   3862 	case WM_T_82572:
   3863 	case WM_T_82573:
   3864 	case WM_T_82574:
   3865 	case WM_T_82575:
   3866 	case WM_T_82576:
   3867 	case WM_T_82583:
   3868 	default:
   3869 		/* Everything else can safely use the documented method. */
   3870 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3871 		break;
   3872 	}
   3873 
   3874 	/* Must release the MDIO ownership after MAC reset */
   3875 	switch (sc->sc_type) {
   3876 	case WM_T_82573:
   3877 	case WM_T_82574:
   3878 	case WM_T_82583:
   3879 		if (error == 0)
   3880 			wm_put_hw_semaphore_82573(sc);
   3881 		break;
   3882 	default:
   3883 		break;
   3884 	}
   3885 
   3886 	if (phy_reset != 0)
   3887 		wm_get_cfg_done(sc);
   3888 
   3889 	/* reload EEPROM */
   3890 	switch (sc->sc_type) {
   3891 	case WM_T_82542_2_0:
   3892 	case WM_T_82542_2_1:
   3893 	case WM_T_82543:
   3894 	case WM_T_82544:
   3895 		delay(10);
   3896 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3897 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3898 		CSR_WRITE_FLUSH(sc);
   3899 		delay(2000);
   3900 		break;
   3901 	case WM_T_82540:
   3902 	case WM_T_82545:
   3903 	case WM_T_82545_3:
   3904 	case WM_T_82546:
   3905 	case WM_T_82546_3:
   3906 		delay(5*1000);
   3907 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3908 		break;
   3909 	case WM_T_82541:
   3910 	case WM_T_82541_2:
   3911 	case WM_T_82547:
   3912 	case WM_T_82547_2:
   3913 		delay(20000);
   3914 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3915 		break;
   3916 	case WM_T_82571:
   3917 	case WM_T_82572:
   3918 	case WM_T_82573:
   3919 	case WM_T_82574:
   3920 	case WM_T_82583:
   3921 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3922 			delay(10);
   3923 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3924 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3925 			CSR_WRITE_FLUSH(sc);
   3926 		}
   3927 		/* check EECD_EE_AUTORD */
   3928 		wm_get_auto_rd_done(sc);
   3929 		/*
   3930 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3931 		 * is set.
   3932 		 */
   3933 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3934 		    || (sc->sc_type == WM_T_82583))
   3935 			delay(25*1000);
   3936 		break;
   3937 	case WM_T_82575:
   3938 	case WM_T_82576:
   3939 	case WM_T_82580:
   3940 	case WM_T_I350:
   3941 	case WM_T_I354:
   3942 	case WM_T_I210:
   3943 	case WM_T_I211:
   3944 	case WM_T_80003:
   3945 		/* check EECD_EE_AUTORD */
   3946 		wm_get_auto_rd_done(sc);
   3947 		break;
   3948 	case WM_T_ICH8:
   3949 	case WM_T_ICH9:
   3950 	case WM_T_ICH10:
   3951 	case WM_T_PCH:
   3952 	case WM_T_PCH2:
   3953 	case WM_T_PCH_LPT:
   3954 	case WM_T_PCH_SPT:
   3955 		break;
   3956 	default:
   3957 		panic("%s: unknown type\n", __func__);
   3958 	}
   3959 
   3960 	/* Check whether EEPROM is present or not */
   3961 	switch (sc->sc_type) {
   3962 	case WM_T_82575:
   3963 	case WM_T_82576:
   3964 	case WM_T_82580:
   3965 	case WM_T_I350:
   3966 	case WM_T_I354:
   3967 	case WM_T_ICH8:
   3968 	case WM_T_ICH9:
   3969 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3970 			/* Not found */
   3971 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3972 			if (sc->sc_type == WM_T_82575)
   3973 				wm_reset_init_script_82575(sc);
   3974 		}
   3975 		break;
   3976 	default:
   3977 		break;
   3978 	}
   3979 
   3980 	if ((sc->sc_type == WM_T_82580)
   3981 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3982 		/* clear global device reset status bit */
   3983 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3984 	}
   3985 
   3986 	/* Clear any pending interrupt events. */
   3987 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3988 	reg = CSR_READ(sc, WMREG_ICR);
   3989 	if (sc->sc_nintrs > 1) {
   3990 		if (sc->sc_type != WM_T_82574) {
   3991 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3992 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3993 		} else
   3994 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3995 	}
   3996 
   3997 	/* reload sc_ctrl */
   3998 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3999 
   4000 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4001 		wm_set_eee_i350(sc);
   4002 
   4003 	/* dummy read from WUC */
   4004 	if (sc->sc_type == WM_T_PCH)
   4005 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4006 	/*
   4007 	 * For PCH, this write will make sure that any noise will be detected
   4008 	 * as a CRC error and be dropped rather than show up as a bad packet
   4009 	 * to the DMA engine
   4010 	 */
   4011 	if (sc->sc_type == WM_T_PCH)
   4012 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4013 
   4014 	if (sc->sc_type >= WM_T_82544)
   4015 		CSR_WRITE(sc, WMREG_WUC, 0);
   4016 
   4017 	wm_reset_mdicnfg_82580(sc);
   4018 
   4019 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4020 		wm_pll_workaround_i210(sc);
   4021 }
   4022 
   4023 /*
   4024  * wm_add_rxbuf:
   4025  *
   4026  *	Add a receive buffer to the indiciated descriptor.
   4027  */
   4028 static int
   4029 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4030 {
   4031 	struct wm_softc *sc = rxq->rxq_sc;
   4032 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4033 	struct mbuf *m;
   4034 	int error;
   4035 
   4036 	KASSERT(WM_RX_LOCKED(rxq));
   4037 
   4038 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4039 	if (m == NULL)
   4040 		return ENOBUFS;
   4041 
   4042 	MCLGET(m, M_DONTWAIT);
   4043 	if ((m->m_flags & M_EXT) == 0) {
   4044 		m_freem(m);
   4045 		return ENOBUFS;
   4046 	}
   4047 
   4048 	if (rxs->rxs_mbuf != NULL)
   4049 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4050 
   4051 	rxs->rxs_mbuf = m;
   4052 
   4053 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4054 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4055 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4056 	if (error) {
   4057 		/* XXX XXX XXX */
   4058 		aprint_error_dev(sc->sc_dev,
   4059 		    "unable to load rx DMA map %d, error = %d\n",
   4060 		    idx, error);
   4061 		panic("wm_add_rxbuf");
   4062 	}
   4063 
   4064 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4065 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4066 
   4067 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4068 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4069 			wm_init_rxdesc(rxq, idx);
   4070 	} else
   4071 		wm_init_rxdesc(rxq, idx);
   4072 
   4073 	return 0;
   4074 }
   4075 
   4076 /*
   4077  * wm_rxdrain:
   4078  *
   4079  *	Drain the receive queue.
   4080  */
   4081 static void
   4082 wm_rxdrain(struct wm_rxqueue *rxq)
   4083 {
   4084 	struct wm_softc *sc = rxq->rxq_sc;
   4085 	struct wm_rxsoft *rxs;
   4086 	int i;
   4087 
   4088 	KASSERT(WM_RX_LOCKED(rxq));
   4089 
   4090 	for (i = 0; i < WM_NRXDESC; i++) {
   4091 		rxs = &rxq->rxq_soft[i];
   4092 		if (rxs->rxs_mbuf != NULL) {
   4093 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4094 			m_freem(rxs->rxs_mbuf);
   4095 			rxs->rxs_mbuf = NULL;
   4096 		}
   4097 	}
   4098 }
   4099 
   4100 
   4101 /*
   4102  * XXX copy from FreeBSD's sys/net/rss_config.c
   4103  */
   4104 /*
   4105  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4106  * effectiveness may be limited by algorithm choice and available entropy
   4107  * during the boot.
   4108  *
   4109  * XXXRW: And that we don't randomize it yet!
   4110  *
   4111  * This is the default Microsoft RSS specification key which is also
   4112  * the Chelsio T5 firmware default key.
   4113  */
   4114 #define RSS_KEYSIZE 40
   4115 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4116 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4117 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4118 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4119 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4120 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4121 };
   4122 
   4123 /*
   4124  * Caller must pass an array of size sizeof(rss_key).
   4125  *
   4126  * XXX
   4127  * As if_ixgbe may use this function, this function should not be
   4128  * if_wm specific function.
   4129  */
   4130 static void
   4131 wm_rss_getkey(uint8_t *key)
   4132 {
   4133 
   4134 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4135 }
   4136 
   4137 /*
   4138  * Setup registers for RSS.
   4139  *
   4140  * XXX not yet VMDq support
   4141  */
   4142 static void
   4143 wm_init_rss(struct wm_softc *sc)
   4144 {
   4145 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4146 	int i;
   4147 
   4148 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4149 
   4150 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4151 		int qid, reta_ent;
   4152 
   4153 		qid  = i % sc->sc_nrxqueues;
   4154 		switch(sc->sc_type) {
   4155 		case WM_T_82574:
   4156 			reta_ent = __SHIFTIN(qid,
   4157 			    RETA_ENT_QINDEX_MASK_82574);
   4158 			break;
   4159 		case WM_T_82575:
   4160 			reta_ent = __SHIFTIN(qid,
   4161 			    RETA_ENT_QINDEX1_MASK_82575);
   4162 			break;
   4163 		default:
   4164 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4165 			break;
   4166 		}
   4167 
   4168 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4169 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4170 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4171 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4172 	}
   4173 
   4174 	wm_rss_getkey((uint8_t *)rss_key);
   4175 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4176 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4177 
   4178 	if (sc->sc_type == WM_T_82574)
   4179 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4180 	else
   4181 		mrqc = MRQC_ENABLE_RSS_MQ;
   4182 
   4183 	/* XXXX
   4184 	 * The same as FreeBSD igb.
   4185 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4186 	 */
   4187 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4188 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4189 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4190 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4191 
   4192 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4193 }
   4194 
   4195 /*
   4196  * Adjust TX and RX queue numbers which the system actulally uses.
   4197  *
   4198  * The numbers are affected by below parameters.
   4199  *     - The nubmer of hardware queues
   4200  *     - The number of MSI-X vectors (= "nvectors" argument)
   4201  *     - ncpu
   4202  */
   4203 static void
   4204 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4205 {
   4206 	int hw_ntxqueues, hw_nrxqueues;
   4207 
   4208 	if (nvectors < 3) {
   4209 		sc->sc_ntxqueues = 1;
   4210 		sc->sc_nrxqueues = 1;
   4211 		return;
   4212 	}
   4213 
   4214 	switch(sc->sc_type) {
   4215 	case WM_T_82572:
   4216 		hw_ntxqueues = 2;
   4217 		hw_nrxqueues = 2;
   4218 		break;
   4219 	case WM_T_82574:
   4220 		hw_ntxqueues = 2;
   4221 		hw_nrxqueues = 2;
   4222 		break;
   4223 	case WM_T_82575:
   4224 		hw_ntxqueues = 4;
   4225 		hw_nrxqueues = 4;
   4226 		break;
   4227 	case WM_T_82576:
   4228 		hw_ntxqueues = 16;
   4229 		hw_nrxqueues = 16;
   4230 		break;
   4231 	case WM_T_82580:
   4232 	case WM_T_I350:
   4233 	case WM_T_I354:
   4234 		hw_ntxqueues = 8;
   4235 		hw_nrxqueues = 8;
   4236 		break;
   4237 	case WM_T_I210:
   4238 		hw_ntxqueues = 4;
   4239 		hw_nrxqueues = 4;
   4240 		break;
   4241 	case WM_T_I211:
   4242 		hw_ntxqueues = 2;
   4243 		hw_nrxqueues = 2;
   4244 		break;
   4245 		/*
   4246 		 * As below ethernet controllers does not support MSI-X,
   4247 		 * this driver let them not use multiqueue.
   4248 		 *     - WM_T_80003
   4249 		 *     - WM_T_ICH8
   4250 		 *     - WM_T_ICH9
   4251 		 *     - WM_T_ICH10
   4252 		 *     - WM_T_PCH
   4253 		 *     - WM_T_PCH2
   4254 		 *     - WM_T_PCH_LPT
   4255 		 */
   4256 	default:
   4257 		hw_ntxqueues = 1;
   4258 		hw_nrxqueues = 1;
   4259 		break;
   4260 	}
   4261 
   4262 	/*
   4263 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
   4264 	 * the number of queues used actually.
   4265 	 *
   4266 	 * XXX
   4267 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
   4268 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
   4269 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
   4270 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
   4271 	 * such a way.
   4272 	 */
   4273 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
   4274 		sc->sc_ntxqueues = (nvectors - 1) / 2;
   4275 		sc->sc_nrxqueues = (nvectors - 1) / 2;
   4276 	} else {
   4277 		sc->sc_ntxqueues = hw_ntxqueues;
   4278 		sc->sc_nrxqueues = hw_nrxqueues;
   4279 	}
   4280 
   4281 	/*
   4282 	 * As queues more then cpus cannot improve scaling, we limit
   4283 	 * the number of queues used actually.
   4284 	 */
   4285 	if (ncpu < sc->sc_ntxqueues)
   4286 		sc->sc_ntxqueues = ncpu;
   4287 	if (ncpu < sc->sc_nrxqueues)
   4288 		sc->sc_nrxqueues = ncpu;
   4289 
   4290 	/* XXX Currently, this driver supports RX multiqueue only. */
   4291 	sc->sc_ntxqueues = 1;
   4292 }
   4293 
   4294 /*
   4295  * Both single interrupt MSI and INTx can use this function.
   4296  */
   4297 static int
   4298 wm_setup_legacy(struct wm_softc *sc)
   4299 {
   4300 	pci_chipset_tag_t pc = sc->sc_pc;
   4301 	const char *intrstr = NULL;
   4302 	char intrbuf[PCI_INTRSTR_LEN];
   4303 	int error;
   4304 
   4305 	error = wm_alloc_txrx_queues(sc);
   4306 	if (error) {
   4307 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4308 		    error);
   4309 		return ENOMEM;
   4310 	}
   4311 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4312 	    sizeof(intrbuf));
   4313 #ifdef WM_MPSAFE
   4314 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4315 #endif
   4316 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4317 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4318 	if (sc->sc_ihs[0] == NULL) {
   4319 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4320 		    (pci_intr_type(sc->sc_intrs[0])
   4321 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4322 		return ENOMEM;
   4323 	}
   4324 
   4325 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4326 	sc->sc_nintrs = 1;
   4327 	return 0;
   4328 }
   4329 
   4330 static int
   4331 wm_setup_msix(struct wm_softc *sc)
   4332 {
   4333 	void *vih;
   4334 	kcpuset_t *affinity;
   4335 	int qidx, error, intr_idx, tx_established, rx_established;
   4336 	pci_chipset_tag_t pc = sc->sc_pc;
   4337 	const char *intrstr = NULL;
   4338 	char intrbuf[PCI_INTRSTR_LEN];
   4339 	char intr_xname[INTRDEVNAMEBUF];
   4340 	/*
   4341 	 * To avoid other devices' interrupts, the affinity of Tx/Rx interrupts
   4342 	 * start from CPU#1.
   4343 	 */
   4344 	int affinity_offset = 1;
   4345 
   4346 	error = wm_alloc_txrx_queues(sc);
   4347 	if (error) {
   4348 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4349 		    error);
   4350 		return ENOMEM;
   4351 	}
   4352 
   4353 	kcpuset_create(&affinity, false);
   4354 	intr_idx = 0;
   4355 
   4356 	/*
   4357 	 * TX
   4358 	 */
   4359 	tx_established = 0;
   4360 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4361 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4362 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
   4363 
   4364 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4365 		    sizeof(intrbuf));
   4366 #ifdef WM_MPSAFE
   4367 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4368 		    PCI_INTR_MPSAFE, true);
   4369 #endif
   4370 		memset(intr_xname, 0, sizeof(intr_xname));
   4371 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
   4372 		    device_xname(sc->sc_dev), qidx);
   4373 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4374 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
   4375 		if (vih == NULL) {
   4376 			aprint_error_dev(sc->sc_dev,
   4377 			    "unable to establish MSI-X(for TX)%s%s\n",
   4378 			    intrstr ? " at " : "",
   4379 			    intrstr ? intrstr : "");
   4380 
   4381 			goto fail_0;
   4382 		}
   4383 		kcpuset_zero(affinity);
   4384 		/* Round-robin affinity */
   4385 		kcpuset_set(affinity, affinity_to);
   4386 		error = interrupt_distribute(vih, affinity, NULL);
   4387 		if (error == 0) {
   4388 			aprint_normal_dev(sc->sc_dev,
   4389 			    "for TX interrupting at %s affinity to %u\n",
   4390 			    intrstr, affinity_to);
   4391 		} else {
   4392 			aprint_normal_dev(sc->sc_dev,
   4393 			    "for TX interrupting at %s\n", intrstr);
   4394 		}
   4395 		sc->sc_ihs[intr_idx] = vih;
   4396 		txq->txq_id = qidx;
   4397 		txq->txq_intr_idx = intr_idx;
   4398 
   4399 		tx_established++;
   4400 		intr_idx++;
   4401 	}
   4402 
   4403 	/*
   4404 	 * RX
   4405 	 */
   4406 	rx_established = 0;
   4407 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4408 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4409 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
   4410 
   4411 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4412 		    sizeof(intrbuf));
   4413 #ifdef WM_MPSAFE
   4414 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4415 		    PCI_INTR_MPSAFE, true);
   4416 #endif
   4417 		memset(intr_xname, 0, sizeof(intr_xname));
   4418 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
   4419 		    device_xname(sc->sc_dev), qidx);
   4420 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4421 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
   4422 		if (vih == NULL) {
   4423 			aprint_error_dev(sc->sc_dev,
   4424 			    "unable to establish MSI-X(for RX)%s%s\n",
   4425 			    intrstr ? " at " : "",
   4426 			    intrstr ? intrstr : "");
   4427 
   4428 			goto fail_1;
   4429 		}
   4430 		kcpuset_zero(affinity);
   4431 		/* Round-robin affinity */
   4432 		kcpuset_set(affinity, affinity_to);
   4433 		error = interrupt_distribute(vih, affinity, NULL);
   4434 		if (error == 0) {
   4435 			aprint_normal_dev(sc->sc_dev,
   4436 			    "for RX interrupting at %s affinity to %u\n",
   4437 			    intrstr, affinity_to);
   4438 		} else {
   4439 			aprint_normal_dev(sc->sc_dev,
   4440 			    "for RX interrupting at %s\n", intrstr);
   4441 		}
   4442 		sc->sc_ihs[intr_idx] = vih;
   4443 		rxq->rxq_id = qidx;
   4444 		rxq->rxq_intr_idx = intr_idx;
   4445 
   4446 		rx_established++;
   4447 		intr_idx++;
   4448 	}
   4449 
   4450 	/*
   4451 	 * LINK
   4452 	 */
   4453 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4454 	    sizeof(intrbuf));
   4455 #ifdef WM_MPSAFE
   4456 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4457 #endif
   4458 	memset(intr_xname, 0, sizeof(intr_xname));
   4459 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4460 	    device_xname(sc->sc_dev));
   4461 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4462 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4463 	if (vih == NULL) {
   4464 		aprint_error_dev(sc->sc_dev,
   4465 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4466 		    intrstr ? " at " : "",
   4467 		    intrstr ? intrstr : "");
   4468 
   4469 		goto fail_1;
   4470 	}
   4471 	/* keep default affinity to LINK interrupt */
   4472 	aprint_normal_dev(sc->sc_dev,
   4473 	    "for LINK interrupting at %s\n", intrstr);
   4474 	sc->sc_ihs[intr_idx] = vih;
   4475 	sc->sc_link_intr_idx = intr_idx;
   4476 
   4477 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   4478 	kcpuset_destroy(affinity);
   4479 	return 0;
   4480 
   4481  fail_1:
   4482 	for (qidx = 0; qidx < rx_established; qidx++) {
   4483 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4484 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[rxq->rxq_intr_idx]);
   4485 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
   4486 	}
   4487  fail_0:
   4488 	for (qidx = 0; qidx < tx_established; qidx++) {
   4489 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4490 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[txq->txq_intr_idx]);
   4491 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
   4492 	}
   4493 
   4494 	kcpuset_destroy(affinity);
   4495 	return ENOMEM;
   4496 }
   4497 
   4498 /*
   4499  * wm_init:		[ifnet interface function]
   4500  *
   4501  *	Initialize the interface.
   4502  */
   4503 static int
   4504 wm_init(struct ifnet *ifp)
   4505 {
   4506 	struct wm_softc *sc = ifp->if_softc;
   4507 	int ret;
   4508 
   4509 	WM_CORE_LOCK(sc);
   4510 	ret = wm_init_locked(ifp);
   4511 	WM_CORE_UNLOCK(sc);
   4512 
   4513 	return ret;
   4514 }
   4515 
   4516 static int
   4517 wm_init_locked(struct ifnet *ifp)
   4518 {
   4519 	struct wm_softc *sc = ifp->if_softc;
   4520 	int i, j, trynum, error = 0;
   4521 	uint32_t reg;
   4522 
   4523 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4524 		device_xname(sc->sc_dev), __func__));
   4525 	KASSERT(WM_CORE_LOCKED(sc));
   4526 	/*
   4527 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4528 	 * There is a small but measurable benefit to avoiding the adjusment
   4529 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4530 	 * on such platforms.  One possibility is that the DMA itself is
   4531 	 * slightly more efficient if the front of the entire packet (instead
   4532 	 * of the front of the headers) is aligned.
   4533 	 *
   4534 	 * Note we must always set align_tweak to 0 if we are using
   4535 	 * jumbo frames.
   4536 	 */
   4537 #ifdef __NO_STRICT_ALIGNMENT
   4538 	sc->sc_align_tweak = 0;
   4539 #else
   4540 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4541 		sc->sc_align_tweak = 0;
   4542 	else
   4543 		sc->sc_align_tweak = 2;
   4544 #endif /* __NO_STRICT_ALIGNMENT */
   4545 
   4546 	/* Cancel any pending I/O. */
   4547 	wm_stop_locked(ifp, 0);
   4548 
   4549 	/* update statistics before reset */
   4550 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4551 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4552 
   4553 	/* Reset the chip to a known state. */
   4554 	wm_reset(sc);
   4555 
   4556 	switch (sc->sc_type) {
   4557 	case WM_T_82571:
   4558 	case WM_T_82572:
   4559 	case WM_T_82573:
   4560 	case WM_T_82574:
   4561 	case WM_T_82583:
   4562 	case WM_T_80003:
   4563 	case WM_T_ICH8:
   4564 	case WM_T_ICH9:
   4565 	case WM_T_ICH10:
   4566 	case WM_T_PCH:
   4567 	case WM_T_PCH2:
   4568 	case WM_T_PCH_LPT:
   4569 	case WM_T_PCH_SPT:
   4570 		/* AMT based hardware can now take control from firmware */
   4571 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4572 			wm_get_hw_control(sc);
   4573 		break;
   4574 	default:
   4575 		break;
   4576 	}
   4577 
   4578 	/* Init hardware bits */
   4579 	wm_initialize_hardware_bits(sc);
   4580 
   4581 	/* Reset the PHY. */
   4582 	if (sc->sc_flags & WM_F_HAS_MII)
   4583 		wm_gmii_reset(sc);
   4584 
   4585 	/* Calculate (E)ITR value */
   4586 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4587 		sc->sc_itr = 450;	/* For EITR */
   4588 	} else if (sc->sc_type >= WM_T_82543) {
   4589 		/*
   4590 		 * Set up the interrupt throttling register (units of 256ns)
   4591 		 * Note that a footnote in Intel's documentation says this
   4592 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4593 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4594 		 * that that is also true for the 1024ns units of the other
   4595 		 * interrupt-related timer registers -- so, really, we ought
   4596 		 * to divide this value by 4 when the link speed is low.
   4597 		 *
   4598 		 * XXX implement this division at link speed change!
   4599 		 */
   4600 
   4601 		/*
   4602 		 * For N interrupts/sec, set this value to:
   4603 		 * 1000000000 / (N * 256).  Note that we set the
   4604 		 * absolute and packet timer values to this value
   4605 		 * divided by 4 to get "simple timer" behavior.
   4606 		 */
   4607 
   4608 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4609 	}
   4610 
   4611 	error = wm_init_txrx_queues(sc);
   4612 	if (error)
   4613 		goto out;
   4614 
   4615 	/*
   4616 	 * Clear out the VLAN table -- we don't use it (yet).
   4617 	 */
   4618 	CSR_WRITE(sc, WMREG_VET, 0);
   4619 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4620 		trynum = 10; /* Due to hw errata */
   4621 	else
   4622 		trynum = 1;
   4623 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4624 		for (j = 0; j < trynum; j++)
   4625 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4626 
   4627 	/*
   4628 	 * Set up flow-control parameters.
   4629 	 *
   4630 	 * XXX Values could probably stand some tuning.
   4631 	 */
   4632 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4633 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4634 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4635 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4636 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4637 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4638 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4639 	}
   4640 
   4641 	sc->sc_fcrtl = FCRTL_DFLT;
   4642 	if (sc->sc_type < WM_T_82543) {
   4643 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4644 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4645 	} else {
   4646 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4647 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4648 	}
   4649 
   4650 	if (sc->sc_type == WM_T_80003)
   4651 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4652 	else
   4653 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4654 
   4655 	/* Writes the control register. */
   4656 	wm_set_vlan(sc);
   4657 
   4658 	if (sc->sc_flags & WM_F_HAS_MII) {
   4659 		int val;
   4660 
   4661 		switch (sc->sc_type) {
   4662 		case WM_T_80003:
   4663 		case WM_T_ICH8:
   4664 		case WM_T_ICH9:
   4665 		case WM_T_ICH10:
   4666 		case WM_T_PCH:
   4667 		case WM_T_PCH2:
   4668 		case WM_T_PCH_LPT:
   4669 		case WM_T_PCH_SPT:
   4670 			/*
   4671 			 * Set the mac to wait the maximum time between each
   4672 			 * iteration and increase the max iterations when
   4673 			 * polling the phy; this fixes erroneous timeouts at
   4674 			 * 10Mbps.
   4675 			 */
   4676 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4677 			    0xFFFF);
   4678 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4679 			val |= 0x3F;
   4680 			wm_kmrn_writereg(sc,
   4681 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4682 			break;
   4683 		default:
   4684 			break;
   4685 		}
   4686 
   4687 		if (sc->sc_type == WM_T_80003) {
   4688 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4689 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4690 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4691 
   4692 			/* Bypass RX and TX FIFO's */
   4693 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4694 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4695 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4696 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4697 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4698 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4699 		}
   4700 	}
   4701 #if 0
   4702 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4703 #endif
   4704 
   4705 	/* Set up checksum offload parameters. */
   4706 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4707 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4708 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4709 		reg |= RXCSUM_IPOFL;
   4710 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4711 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4712 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4713 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4714 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4715 
   4716 	/* Set up MSI-X */
   4717 	if (sc->sc_nintrs > 1) {
   4718 		uint32_t ivar;
   4719 		struct wm_txqueue *txq;
   4720 		struct wm_rxqueue *rxq;
   4721 		int qid;
   4722 
   4723 		if (sc->sc_type == WM_T_82575) {
   4724 			/* Interrupt control */
   4725 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4726 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4727 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4728 
   4729 			/* TX */
   4730 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4731 				txq = &sc->sc_txq[i];
   4732 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
   4733 				    EITR_TX_QUEUE(txq->txq_id));
   4734 			}
   4735 			/* RX */
   4736 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4737 				rxq = &sc->sc_rxq[i];
   4738 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
   4739 				    EITR_RX_QUEUE(rxq->rxq_id));
   4740 			}
   4741 			/* Link status */
   4742 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4743 			    EITR_OTHER);
   4744 		} else if (sc->sc_type == WM_T_82574) {
   4745 			/* Interrupt control */
   4746 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4747 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4748 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4749 
   4750 			ivar = 0;
   4751 			/* TX */
   4752 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4753 				txq = &sc->sc_txq[i];
   4754 				ivar |= __SHIFTIN((IVAR_VALID_82574
   4755 					| txq->txq_intr_idx),
   4756 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
   4757 			}
   4758 			/* RX */
   4759 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4760 				rxq = &sc->sc_rxq[i];
   4761 				ivar |= __SHIFTIN((IVAR_VALID_82574
   4762 					| rxq->rxq_intr_idx),
   4763 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
   4764 			}
   4765 			/* Link status */
   4766 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4767 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4768 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4769 		} else {
   4770 			/* Interrupt control */
   4771 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4772 			    | GPIE_EIAME | GPIE_PBA);
   4773 
   4774 			switch (sc->sc_type) {
   4775 			case WM_T_82580:
   4776 			case WM_T_I350:
   4777 			case WM_T_I354:
   4778 			case WM_T_I210:
   4779 			case WM_T_I211:
   4780 				/* TX */
   4781 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4782 					txq = &sc->sc_txq[i];
   4783 					qid = txq->txq_id;
   4784 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4785 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4786 					ivar |= __SHIFTIN((txq->txq_intr_idx
   4787 						| IVAR_VALID),
   4788 					    IVAR_TX_MASK_Q(qid));
   4789 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4790 				}
   4791 
   4792 				/* RX */
   4793 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4794 					rxq = &sc->sc_rxq[i];
   4795 					qid = rxq->rxq_id;
   4796 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4797 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4798 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
   4799 						| IVAR_VALID),
   4800 					    IVAR_RX_MASK_Q(qid));
   4801 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4802 				}
   4803 				break;
   4804 			case WM_T_82576:
   4805 				/* TX */
   4806 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4807 					txq = &sc->sc_txq[i];
   4808 					qid = txq->txq_id;
   4809 					ivar = CSR_READ(sc,
   4810 					    WMREG_IVAR_Q_82576(qid));
   4811 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4812 					ivar |= __SHIFTIN((txq->txq_intr_idx
   4813 						| IVAR_VALID),
   4814 					    IVAR_TX_MASK_Q_82576(qid));
   4815 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4816 					    ivar);
   4817 				}
   4818 
   4819 				/* RX */
   4820 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4821 					rxq = &sc->sc_rxq[i];
   4822 					qid = rxq->rxq_id;
   4823 					ivar = CSR_READ(sc,
   4824 					    WMREG_IVAR_Q_82576(qid));
   4825 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4826 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
   4827 						| IVAR_VALID),
   4828 					    IVAR_RX_MASK_Q_82576(qid));
   4829 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4830 					    ivar);
   4831 				}
   4832 				break;
   4833 			default:
   4834 				break;
   4835 			}
   4836 
   4837 			/* Link status */
   4838 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4839 			    IVAR_MISC_OTHER);
   4840 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4841 		}
   4842 
   4843 		if (sc->sc_nrxqueues > 1) {
   4844 			wm_init_rss(sc);
   4845 
   4846 			/*
   4847 			** NOTE: Receive Full-Packet Checksum Offload
   4848 			** is mutually exclusive with Multiqueue. However
   4849 			** this is not the same as TCP/IP checksums which
   4850 			** still work.
   4851 			*/
   4852 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4853 			reg |= RXCSUM_PCSD;
   4854 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4855 		}
   4856 	}
   4857 
   4858 	/* Set up the interrupt registers. */
   4859 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4860 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4861 	    ICR_RXO | ICR_RXT0;
   4862 	if (sc->sc_nintrs > 1) {
   4863 		uint32_t mask;
   4864 		struct wm_txqueue *txq;
   4865 		struct wm_rxqueue *rxq;
   4866 
   4867 		switch (sc->sc_type) {
   4868 		case WM_T_82574:
   4869 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4870 			    WMREG_EIAC_82574_MSIX_MASK);
   4871 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4872 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4873 			break;
   4874 		default:
   4875 			if (sc->sc_type == WM_T_82575) {
   4876 				mask = 0;
   4877 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4878 					txq = &sc->sc_txq[i];
   4879 					mask |= EITR_TX_QUEUE(txq->txq_id);
   4880 				}
   4881 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4882 					rxq = &sc->sc_rxq[i];
   4883 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
   4884 				}
   4885 				mask |= EITR_OTHER;
   4886 			} else {
   4887 				mask = 0;
   4888 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4889 					txq = &sc->sc_txq[i];
   4890 					mask |= 1 << txq->txq_intr_idx;
   4891 				}
   4892 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4893 					rxq = &sc->sc_rxq[i];
   4894 					mask |= 1 << rxq->rxq_intr_idx;
   4895 				}
   4896 				mask |= 1 << sc->sc_link_intr_idx;
   4897 			}
   4898 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4899 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4900 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4901 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4902 			break;
   4903 		}
   4904 	} else
   4905 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4906 
   4907 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4908 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4909 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4910 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4911 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4912 		reg |= KABGTXD_BGSQLBIAS;
   4913 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4914 	}
   4915 
   4916 	/* Set up the inter-packet gap. */
   4917 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4918 
   4919 	if (sc->sc_type >= WM_T_82543) {
   4920 		/*
   4921 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4922 		 * the multi queue function with MSI-X.
   4923 		 */
   4924 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4925 			int qidx;
   4926 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4927 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4928 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
   4929 				    sc->sc_itr);
   4930 			}
   4931 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4932 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4933 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
   4934 				    sc->sc_itr);
   4935 			}
   4936 			/*
   4937 			 * Link interrupts occur much less than TX
   4938 			 * interrupts and RX interrupts. So, we don't
   4939 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4940 			 * FreeBSD's if_igb.
   4941 			 */
   4942 		} else
   4943 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4944 	}
   4945 
   4946 	/* Set the VLAN ethernetype. */
   4947 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4948 
   4949 	/*
   4950 	 * Set up the transmit control register; we start out with
   4951 	 * a collision distance suitable for FDX, but update it whe
   4952 	 * we resolve the media type.
   4953 	 */
   4954 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4955 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4956 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4957 	if (sc->sc_type >= WM_T_82571)
   4958 		sc->sc_tctl |= TCTL_MULR;
   4959 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4960 
   4961 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4962 		/* Write TDT after TCTL.EN is set. See the document. */
   4963 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4964 	}
   4965 
   4966 	if (sc->sc_type == WM_T_80003) {
   4967 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4968 		reg &= ~TCTL_EXT_GCEX_MASK;
   4969 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4970 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4971 	}
   4972 
   4973 	/* Set the media. */
   4974 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4975 		goto out;
   4976 
   4977 	/* Configure for OS presence */
   4978 	wm_init_manageability(sc);
   4979 
   4980 	/*
   4981 	 * Set up the receive control register; we actually program
   4982 	 * the register when we set the receive filter.  Use multicast
   4983 	 * address offset type 0.
   4984 	 *
   4985 	 * Only the i82544 has the ability to strip the incoming
   4986 	 * CRC, so we don't enable that feature.
   4987 	 */
   4988 	sc->sc_mchash_type = 0;
   4989 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4990 	    | RCTL_MO(sc->sc_mchash_type);
   4991 
   4992 	/*
   4993 	 * The I350 has a bug where it always strips the CRC whether
   4994 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4995 	 */
   4996 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4997 	    || (sc->sc_type == WM_T_I210))
   4998 		sc->sc_rctl |= RCTL_SECRC;
   4999 
   5000 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5001 	    && (ifp->if_mtu > ETHERMTU)) {
   5002 		sc->sc_rctl |= RCTL_LPE;
   5003 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5004 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5005 	}
   5006 
   5007 	if (MCLBYTES == 2048) {
   5008 		sc->sc_rctl |= RCTL_2k;
   5009 	} else {
   5010 		if (sc->sc_type >= WM_T_82543) {
   5011 			switch (MCLBYTES) {
   5012 			case 4096:
   5013 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5014 				break;
   5015 			case 8192:
   5016 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5017 				break;
   5018 			case 16384:
   5019 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5020 				break;
   5021 			default:
   5022 				panic("wm_init: MCLBYTES %d unsupported",
   5023 				    MCLBYTES);
   5024 				break;
   5025 			}
   5026 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5027 	}
   5028 
   5029 	/* Set the receive filter. */
   5030 	wm_set_filter(sc);
   5031 
   5032 	/* Enable ECC */
   5033 	switch (sc->sc_type) {
   5034 	case WM_T_82571:
   5035 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5036 		reg |= PBA_ECC_CORR_EN;
   5037 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5038 		break;
   5039 	case WM_T_PCH_LPT:
   5040 	case WM_T_PCH_SPT:
   5041 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5042 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5043 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5044 
   5045 		reg = CSR_READ(sc, WMREG_CTRL);
   5046 		reg |= CTRL_MEHE;
   5047 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5048 		break;
   5049 	default:
   5050 		break;
   5051 	}
   5052 
   5053 	/* On 575 and later set RDT only if RX enabled */
   5054 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5055 		int qidx;
   5056 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   5057 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   5058 			for (i = 0; i < WM_NRXDESC; i++) {
   5059 				WM_RX_LOCK(rxq);
   5060 				wm_init_rxdesc(rxq, i);
   5061 				WM_RX_UNLOCK(rxq);
   5062 
   5063 			}
   5064 		}
   5065 	}
   5066 
   5067 	sc->sc_stopping = false;
   5068 
   5069 	/* Start the one second link check clock. */
   5070 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5071 
   5072 	/* ...all done! */
   5073 	ifp->if_flags |= IFF_RUNNING;
   5074 	ifp->if_flags &= ~IFF_OACTIVE;
   5075 
   5076  out:
   5077 	sc->sc_if_flags = ifp->if_flags;
   5078 	if (error)
   5079 		log(LOG_ERR, "%s: interface not running\n",
   5080 		    device_xname(sc->sc_dev));
   5081 	return error;
   5082 }
   5083 
   5084 /*
   5085  * wm_stop:		[ifnet interface function]
   5086  *
   5087  *	Stop transmission on the interface.
   5088  */
   5089 static void
   5090 wm_stop(struct ifnet *ifp, int disable)
   5091 {
   5092 	struct wm_softc *sc = ifp->if_softc;
   5093 
   5094 	WM_CORE_LOCK(sc);
   5095 	wm_stop_locked(ifp, disable);
   5096 	WM_CORE_UNLOCK(sc);
   5097 }
   5098 
   5099 static void
   5100 wm_stop_locked(struct ifnet *ifp, int disable)
   5101 {
   5102 	struct wm_softc *sc = ifp->if_softc;
   5103 	struct wm_txsoft *txs;
   5104 	int i, qidx;
   5105 
   5106 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5107 		device_xname(sc->sc_dev), __func__));
   5108 	KASSERT(WM_CORE_LOCKED(sc));
   5109 
   5110 	sc->sc_stopping = true;
   5111 
   5112 	/* Stop the one second clock. */
   5113 	callout_stop(&sc->sc_tick_ch);
   5114 
   5115 	/* Stop the 82547 Tx FIFO stall check timer. */
   5116 	if (sc->sc_type == WM_T_82547)
   5117 		callout_stop(&sc->sc_txfifo_ch);
   5118 
   5119 	if (sc->sc_flags & WM_F_HAS_MII) {
   5120 		/* Down the MII. */
   5121 		mii_down(&sc->sc_mii);
   5122 	} else {
   5123 #if 0
   5124 		/* Should we clear PHY's status properly? */
   5125 		wm_reset(sc);
   5126 #endif
   5127 	}
   5128 
   5129 	/* Stop the transmit and receive processes. */
   5130 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5131 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5132 	sc->sc_rctl &= ~RCTL_EN;
   5133 
   5134 	/*
   5135 	 * Clear the interrupt mask to ensure the device cannot assert its
   5136 	 * interrupt line.
   5137 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5138 	 * service any currently pending or shared interrupt.
   5139 	 */
   5140 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5141 	sc->sc_icr = 0;
   5142 	if (sc->sc_nintrs > 1) {
   5143 		if (sc->sc_type != WM_T_82574) {
   5144 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5145 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5146 		} else
   5147 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5148 	}
   5149 
   5150 	/* Release any queued transmit buffers. */
   5151 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   5152 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   5153 		WM_TX_LOCK(txq);
   5154 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5155 			txs = &txq->txq_soft[i];
   5156 			if (txs->txs_mbuf != NULL) {
   5157 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5158 				m_freem(txs->txs_mbuf);
   5159 				txs->txs_mbuf = NULL;
   5160 			}
   5161 		}
   5162 		if (sc->sc_type == WM_T_PCH_SPT) {
   5163 			pcireg_t preg;
   5164 			uint32_t reg;
   5165 			int nexttx;
   5166 
   5167 			/* First, disable MULR fix in FEXTNVM11 */
   5168 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5169 			reg |= FEXTNVM11_DIS_MULRFIX;
   5170 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5171 
   5172 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5173 			    WM_PCI_DESCRING_STATUS);
   5174 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5175 				/* TX */
   5176 				printf("XXX need TX flush (reg = %08x)\n",
   5177 				    preg);
   5178 				wm_init_tx_descs(sc, txq);
   5179 				wm_init_tx_regs(sc, txq);
   5180 				nexttx = txq->txq_next;
   5181 				wm_set_dma_addr(
   5182 					&txq->txq_descs[nexttx].wtx_addr,
   5183 					WM_CDTXADDR(txq, nexttx));
   5184 				txq->txq_descs[nexttx].wtx_cmdlen
   5185 				    = htole32(WTX_CMD_IFCS | 512);
   5186 				wm_cdtxsync(txq, nexttx, 1,
   5187 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5188 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5189 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5190 				CSR_WRITE_FLUSH(sc);
   5191 				delay(250);
   5192 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5193 			}
   5194 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5195 			    WM_PCI_DESCRING_STATUS);
   5196 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5197 				/* RX */
   5198 				printf("XXX need RX flush\n");
   5199 			}
   5200 		}
   5201 		WM_TX_UNLOCK(txq);
   5202 	}
   5203 
   5204 	/* Mark the interface as down and cancel the watchdog timer. */
   5205 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5206 	ifp->if_timer = 0;
   5207 
   5208 	if (disable) {
   5209 		for (i = 0; i < sc->sc_nrxqueues; i++) {
   5210 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5211 			WM_RX_LOCK(rxq);
   5212 			wm_rxdrain(rxq);
   5213 			WM_RX_UNLOCK(rxq);
   5214 		}
   5215 	}
   5216 
   5217 #if 0 /* notyet */
   5218 	if (sc->sc_type >= WM_T_82544)
   5219 		CSR_WRITE(sc, WMREG_WUC, 0);
   5220 #endif
   5221 }
   5222 
   5223 static void
   5224 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5225 {
   5226 	struct mbuf *m;
   5227 	int i;
   5228 
   5229 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5230 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5231 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5232 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5233 		    m->m_data, m->m_len, m->m_flags);
   5234 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5235 	    i, i == 1 ? "" : "s");
   5236 }
   5237 
   5238 /*
   5239  * wm_82547_txfifo_stall:
   5240  *
   5241  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5242  *	reset the FIFO pointers, and restart packet transmission.
   5243  */
   5244 static void
   5245 wm_82547_txfifo_stall(void *arg)
   5246 {
   5247 	struct wm_softc *sc = arg;
   5248 	struct wm_txqueue *txq = sc->sc_txq;
   5249 #ifndef WM_MPSAFE
   5250 	int s;
   5251 
   5252 	s = splnet();
   5253 #endif
   5254 	WM_TX_LOCK(txq);
   5255 
   5256 	if (sc->sc_stopping)
   5257 		goto out;
   5258 
   5259 	if (txq->txq_fifo_stall) {
   5260 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5261 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5262 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5263 			/*
   5264 			 * Packets have drained.  Stop transmitter, reset
   5265 			 * FIFO pointers, restart transmitter, and kick
   5266 			 * the packet queue.
   5267 			 */
   5268 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5269 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5270 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5271 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5272 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5273 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5274 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5275 			CSR_WRITE_FLUSH(sc);
   5276 
   5277 			txq->txq_fifo_head = 0;
   5278 			txq->txq_fifo_stall = 0;
   5279 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5280 		} else {
   5281 			/*
   5282 			 * Still waiting for packets to drain; try again in
   5283 			 * another tick.
   5284 			 */
   5285 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5286 		}
   5287 	}
   5288 
   5289 out:
   5290 	WM_TX_UNLOCK(txq);
   5291 #ifndef WM_MPSAFE
   5292 	splx(s);
   5293 #endif
   5294 }
   5295 
   5296 /*
   5297  * wm_82547_txfifo_bugchk:
   5298  *
   5299  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5300  *	prevent enqueueing a packet that would wrap around the end
   5301  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5302  *
   5303  *	We do this by checking the amount of space before the end
   5304  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5305  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5306  *	the internal FIFO pointers to the beginning, and restart
   5307  *	transmission on the interface.
   5308  */
   5309 #define	WM_FIFO_HDR		0x10
   5310 #define	WM_82547_PAD_LEN	0x3e0
   5311 static int
   5312 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5313 {
   5314 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5315 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5316 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5317 
   5318 	/* Just return if already stalled. */
   5319 	if (txq->txq_fifo_stall)
   5320 		return 1;
   5321 
   5322 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5323 		/* Stall only occurs in half-duplex mode. */
   5324 		goto send_packet;
   5325 	}
   5326 
   5327 	if (len >= WM_82547_PAD_LEN + space) {
   5328 		txq->txq_fifo_stall = 1;
   5329 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5330 		return 1;
   5331 	}
   5332 
   5333  send_packet:
   5334 	txq->txq_fifo_head += len;
   5335 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5336 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5337 
   5338 	return 0;
   5339 }
   5340 
   5341 static int
   5342 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5343 {
   5344 	int error;
   5345 
   5346 	/*
   5347 	 * Allocate the control data structures, and create and load the
   5348 	 * DMA map for it.
   5349 	 *
   5350 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5351 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5352 	 * both sets within the same 4G segment.
   5353 	 */
   5354 	if (sc->sc_type < WM_T_82544) {
   5355 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5356 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) *WM_NTXDESC(txq);
   5357 	} else {
   5358 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5359 		txq->txq_desc_size = sizeof(txdescs_t);
   5360 	}
   5361 
   5362 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size,
   5363 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5364 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5365 		aprint_error_dev(sc->sc_dev,
   5366 		    "unable to allocate TX control data, error = %d\n",
   5367 		    error);
   5368 		goto fail_0;
   5369 	}
   5370 
   5371 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5372 		    txq->txq_desc_rseg, txq->txq_desc_size,
   5373 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5374 		aprint_error_dev(sc->sc_dev,
   5375 		    "unable to map TX control data, error = %d\n", error);
   5376 		goto fail_1;
   5377 	}
   5378 
   5379 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
   5380 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5381 		aprint_error_dev(sc->sc_dev,
   5382 		    "unable to create TX control data DMA map, error = %d\n",
   5383 		    error);
   5384 		goto fail_2;
   5385 	}
   5386 
   5387 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5388 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
   5389 		aprint_error_dev(sc->sc_dev,
   5390 		    "unable to load TX control data DMA map, error = %d\n",
   5391 		    error);
   5392 		goto fail_3;
   5393 	}
   5394 
   5395 	return 0;
   5396 
   5397  fail_3:
   5398 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5399  fail_2:
   5400 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5401 	    txq->txq_desc_size);
   5402  fail_1:
   5403 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5404  fail_0:
   5405 	return error;
   5406 }
   5407 
   5408 static void
   5409 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5410 {
   5411 
   5412 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5413 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5414 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5415 	    txq->txq_desc_size);
   5416 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5417 }
   5418 
   5419 static int
   5420 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5421 {
   5422 	int error;
   5423 
   5424 	/*
   5425 	 * Allocate the control data structures, and create and load the
   5426 	 * DMA map for it.
   5427 	 *
   5428 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5429 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5430 	 * both sets within the same 4G segment.
   5431 	 */
   5432 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5433 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5434 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5435 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5436 		aprint_error_dev(sc->sc_dev,
   5437 		    "unable to allocate RX control data, error = %d\n",
   5438 		    error);
   5439 		goto fail_0;
   5440 	}
   5441 
   5442 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5443 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5444 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5445 		aprint_error_dev(sc->sc_dev,
   5446 		    "unable to map RX control data, error = %d\n", error);
   5447 		goto fail_1;
   5448 	}
   5449 
   5450 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5451 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5452 		aprint_error_dev(sc->sc_dev,
   5453 		    "unable to create RX control data DMA map, error = %d\n",
   5454 		    error);
   5455 		goto fail_2;
   5456 	}
   5457 
   5458 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5459 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5460 		aprint_error_dev(sc->sc_dev,
   5461 		    "unable to load RX control data DMA map, error = %d\n",
   5462 		    error);
   5463 		goto fail_3;
   5464 	}
   5465 
   5466 	return 0;
   5467 
   5468  fail_3:
   5469 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5470  fail_2:
   5471 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5472 	    rxq->rxq_desc_size);
   5473  fail_1:
   5474 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5475  fail_0:
   5476 	return error;
   5477 }
   5478 
   5479 static void
   5480 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5481 {
   5482 
   5483 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5484 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5485 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5486 	    rxq->rxq_desc_size);
   5487 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5488 }
   5489 
   5490 
   5491 static int
   5492 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5493 {
   5494 	int i, error;
   5495 
   5496 	/* Create the transmit buffer DMA maps. */
   5497 	WM_TXQUEUELEN(txq) =
   5498 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5499 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5500 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5501 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5502 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5503 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5504 			aprint_error_dev(sc->sc_dev,
   5505 			    "unable to create Tx DMA map %d, error = %d\n",
   5506 			    i, error);
   5507 			goto fail;
   5508 		}
   5509 	}
   5510 
   5511 	return 0;
   5512 
   5513  fail:
   5514 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5515 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5516 			bus_dmamap_destroy(sc->sc_dmat,
   5517 			    txq->txq_soft[i].txs_dmamap);
   5518 	}
   5519 	return error;
   5520 }
   5521 
   5522 static void
   5523 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5524 {
   5525 	int i;
   5526 
   5527 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5528 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5529 			bus_dmamap_destroy(sc->sc_dmat,
   5530 			    txq->txq_soft[i].txs_dmamap);
   5531 	}
   5532 }
   5533 
   5534 static int
   5535 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5536 {
   5537 	int i, error;
   5538 
   5539 	/* Create the receive buffer DMA maps. */
   5540 	for (i = 0; i < WM_NRXDESC; i++) {
   5541 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5542 			    MCLBYTES, 0, 0,
   5543 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5544 			aprint_error_dev(sc->sc_dev,
   5545 			    "unable to create Rx DMA map %d error = %d\n",
   5546 			    i, error);
   5547 			goto fail;
   5548 		}
   5549 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5550 	}
   5551 
   5552 	return 0;
   5553 
   5554  fail:
   5555 	for (i = 0; i < WM_NRXDESC; i++) {
   5556 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5557 			bus_dmamap_destroy(sc->sc_dmat,
   5558 			    rxq->rxq_soft[i].rxs_dmamap);
   5559 	}
   5560 	return error;
   5561 }
   5562 
   5563 static void
   5564 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5565 {
   5566 	int i;
   5567 
   5568 	for (i = 0; i < WM_NRXDESC; i++) {
   5569 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5570 			bus_dmamap_destroy(sc->sc_dmat,
   5571 			    rxq->rxq_soft[i].rxs_dmamap);
   5572 	}
   5573 }
   5574 
   5575 /*
   5576  * wm_alloc_quques:
   5577  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5578  */
   5579 static int
   5580 wm_alloc_txrx_queues(struct wm_softc *sc)
   5581 {
   5582 	int i, error, tx_done, rx_done;
   5583 
   5584 	/*
   5585 	 * For transmission
   5586 	 */
   5587 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
   5588 	    KM_SLEEP);
   5589 	if (sc->sc_txq == NULL) {
   5590 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_txqueue\n");
   5591 		error = ENOMEM;
   5592 		goto fail_0;
   5593 	}
   5594 
   5595 	error = 0;
   5596 	tx_done = 0;
   5597 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5598 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5599 		txq->txq_sc = sc;
   5600 #ifdef WM_MPSAFE
   5601 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5602 #else
   5603 		txq->txq_lock = NULL;
   5604 #endif
   5605 		error = wm_alloc_tx_descs(sc, txq);
   5606 		if (error)
   5607 			break;
   5608 		error = wm_alloc_tx_buffer(sc, txq);
   5609 		if (error) {
   5610 			wm_free_tx_descs(sc, txq);
   5611 			break;
   5612 		}
   5613 		tx_done++;
   5614 	}
   5615 	if (error)
   5616 		goto fail_1;
   5617 
   5618 	/*
   5619 	 * For recieve
   5620 	 */
   5621 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
   5622 	    KM_SLEEP);
   5623 	if (sc->sc_rxq == NULL) {
   5624 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_rxqueue\n");
   5625 		error = ENOMEM;
   5626 		goto fail_1;
   5627 	}
   5628 
   5629 	error = 0;
   5630 	rx_done = 0;
   5631 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5632 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5633 		rxq->rxq_sc = sc;
   5634 #ifdef WM_MPSAFE
   5635 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5636 #else
   5637 		rxq->rxq_lock = NULL;
   5638 #endif
   5639 		error = wm_alloc_rx_descs(sc, rxq);
   5640 		if (error)
   5641 			break;
   5642 
   5643 		error = wm_alloc_rx_buffer(sc, rxq);
   5644 		if (error) {
   5645 			wm_free_rx_descs(sc, rxq);
   5646 			break;
   5647 		}
   5648 
   5649 		rx_done++;
   5650 	}
   5651 	if (error)
   5652 		goto fail_2;
   5653 
   5654 	return 0;
   5655 
   5656  fail_2:
   5657 	for (i = 0; i < rx_done; i++) {
   5658 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5659 		wm_free_rx_buffer(sc, rxq);
   5660 		wm_free_rx_descs(sc, rxq);
   5661 		if (rxq->rxq_lock)
   5662 			mutex_obj_free(rxq->rxq_lock);
   5663 	}
   5664 	kmem_free(sc->sc_rxq,
   5665 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5666  fail_1:
   5667 	for (i = 0; i < tx_done; i++) {
   5668 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5669 		wm_free_tx_buffer(sc, txq);
   5670 		wm_free_tx_descs(sc, txq);
   5671 		if (txq->txq_lock)
   5672 			mutex_obj_free(txq->txq_lock);
   5673 	}
   5674 	kmem_free(sc->sc_txq,
   5675 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5676  fail_0:
   5677 	return error;
   5678 }
   5679 
   5680 /*
   5681  * wm_free_quques:
   5682  *	Free {tx,rx}descs and {tx,rx} buffers
   5683  */
   5684 static void
   5685 wm_free_txrx_queues(struct wm_softc *sc)
   5686 {
   5687 	int i;
   5688 
   5689 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5690 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5691 		wm_free_rx_buffer(sc, rxq);
   5692 		wm_free_rx_descs(sc, rxq);
   5693 		if (rxq->rxq_lock)
   5694 			mutex_obj_free(rxq->rxq_lock);
   5695 	}
   5696 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5697 
   5698 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5699 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5700 		wm_free_tx_buffer(sc, txq);
   5701 		wm_free_tx_descs(sc, txq);
   5702 		if (txq->txq_lock)
   5703 			mutex_obj_free(txq->txq_lock);
   5704 	}
   5705 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5706 }
   5707 
   5708 static void
   5709 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5710 {
   5711 
   5712 	KASSERT(WM_TX_LOCKED(txq));
   5713 
   5714 	/* Initialize the transmit descriptor ring. */
   5715 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
   5716 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5717 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5718 	txq->txq_free = WM_NTXDESC(txq);
   5719 	txq->txq_next = 0;
   5720 }
   5721 
   5722 static void
   5723 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
   5724 {
   5725 
   5726 	KASSERT(WM_TX_LOCKED(txq));
   5727 
   5728 	if (sc->sc_type < WM_T_82543) {
   5729 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5730 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5731 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
   5732 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5733 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5734 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5735 	} else {
   5736 		int qid = txq->txq_id;
   5737 
   5738 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5739 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5740 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
   5741 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5742 
   5743 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5744 			/*
   5745 			 * Don't write TDT before TCTL.EN is set.
   5746 			 * See the document.
   5747 			 */
   5748 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5749 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5750 			    | TXDCTL_WTHRESH(0));
   5751 		else {
   5752 			/* ITR / 4 */
   5753 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5754 			if (sc->sc_type >= WM_T_82540) {
   5755 				/* should be same */
   5756 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5757 			}
   5758 
   5759 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5760 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5761 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5762 		}
   5763 	}
   5764 }
   5765 
   5766 static void
   5767 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5768 {
   5769 	int i;
   5770 
   5771 	KASSERT(WM_TX_LOCKED(txq));
   5772 
   5773 	/* Initialize the transmit job descriptors. */
   5774 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5775 		txq->txq_soft[i].txs_mbuf = NULL;
   5776 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5777 	txq->txq_snext = 0;
   5778 	txq->txq_sdirty = 0;
   5779 }
   5780 
   5781 static void
   5782 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
   5783 {
   5784 
   5785 	KASSERT(WM_TX_LOCKED(txq));
   5786 
   5787 	/*
   5788 	 * Set up some register offsets that are different between
   5789 	 * the i82542 and the i82543 and later chips.
   5790 	 */
   5791 	if (sc->sc_type < WM_T_82543)
   5792 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5793 	else
   5794 		txq->txq_tdt_reg = WMREG_TDT(txq->txq_id);
   5795 
   5796 	wm_init_tx_descs(sc, txq);
   5797 	wm_init_tx_regs(sc, txq);
   5798 	wm_init_tx_buffer(sc, txq);
   5799 }
   5800 
   5801 static void
   5802 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5803 {
   5804 
   5805 	KASSERT(WM_RX_LOCKED(rxq));
   5806 
   5807 	/*
   5808 	 * Initialize the receive descriptor and receive job
   5809 	 * descriptor rings.
   5810 	 */
   5811 	if (sc->sc_type < WM_T_82543) {
   5812 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5813 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5814 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5815 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5816 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5817 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5818 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5819 
   5820 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5821 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5822 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5823 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5824 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5825 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5826 	} else {
   5827 		int qid = rxq->rxq_id;
   5828 
   5829 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5830 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5831 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5832 
   5833 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5834 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5835 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5836 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5837 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5838 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5839 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5840 			    | RXDCTL_WTHRESH(1));
   5841 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5842 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5843 		} else {
   5844 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5845 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5846 			/* ITR / 4 */
   5847 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5848 			/* MUST be same */
   5849 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5850 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5851 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5852 		}
   5853 	}
   5854 }
   5855 
   5856 static int
   5857 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5858 {
   5859 	struct wm_rxsoft *rxs;
   5860 	int error, i;
   5861 
   5862 	KASSERT(WM_RX_LOCKED(rxq));
   5863 
   5864 	for (i = 0; i < WM_NRXDESC; i++) {
   5865 		rxs = &rxq->rxq_soft[i];
   5866 		if (rxs->rxs_mbuf == NULL) {
   5867 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5868 				log(LOG_ERR, "%s: unable to allocate or map "
   5869 				    "rx buffer %d, error = %d\n",
   5870 				    device_xname(sc->sc_dev), i, error);
   5871 				/*
   5872 				 * XXX Should attempt to run with fewer receive
   5873 				 * XXX buffers instead of just failing.
   5874 				 */
   5875 				wm_rxdrain(rxq);
   5876 				return ENOMEM;
   5877 			}
   5878 		} else {
   5879 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5880 				wm_init_rxdesc(rxq, i);
   5881 			/*
   5882 			 * For 82575 and newer device, the RX descriptors
   5883 			 * must be initialized after the setting of RCTL.EN in
   5884 			 * wm_set_filter()
   5885 			 */
   5886 		}
   5887 	}
   5888 	rxq->rxq_ptr = 0;
   5889 	rxq->rxq_discard = 0;
   5890 	WM_RXCHAIN_RESET(rxq);
   5891 
   5892 	return 0;
   5893 }
   5894 
   5895 static int
   5896 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5897 {
   5898 
   5899 	KASSERT(WM_RX_LOCKED(rxq));
   5900 
   5901 	/*
   5902 	 * Set up some register offsets that are different between
   5903 	 * the i82542 and the i82543 and later chips.
   5904 	 */
   5905 	if (sc->sc_type < WM_T_82543)
   5906 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5907 	else
   5908 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
   5909 
   5910 	wm_init_rx_regs(sc, rxq);
   5911 	return wm_init_rx_buffer(sc, rxq);
   5912 }
   5913 
   5914 /*
   5915  * wm_init_quques:
   5916  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5917  */
   5918 static int
   5919 wm_init_txrx_queues(struct wm_softc *sc)
   5920 {
   5921 	int i, error;
   5922 
   5923 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5924 		device_xname(sc->sc_dev), __func__));
   5925 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5926 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5927 		WM_TX_LOCK(txq);
   5928 		wm_init_tx_queue(sc, txq);
   5929 		WM_TX_UNLOCK(txq);
   5930 	}
   5931 
   5932 	error = 0;
   5933 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5934 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5935 		WM_RX_LOCK(rxq);
   5936 		error = wm_init_rx_queue(sc, rxq);
   5937 		WM_RX_UNLOCK(rxq);
   5938 		if (error)
   5939 			break;
   5940 	}
   5941 
   5942 	return error;
   5943 }
   5944 
   5945 /*
   5946  * wm_tx_offload:
   5947  *
   5948  *	Set up TCP/IP checksumming parameters for the
   5949  *	specified packet.
   5950  */
   5951 static int
   5952 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5953     uint8_t *fieldsp)
   5954 {
   5955 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5956 	struct mbuf *m0 = txs->txs_mbuf;
   5957 	struct livengood_tcpip_ctxdesc *t;
   5958 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5959 	uint32_t ipcse;
   5960 	struct ether_header *eh;
   5961 	int offset, iphl;
   5962 	uint8_t fields;
   5963 
   5964 	/*
   5965 	 * XXX It would be nice if the mbuf pkthdr had offset
   5966 	 * fields for the protocol headers.
   5967 	 */
   5968 
   5969 	eh = mtod(m0, struct ether_header *);
   5970 	switch (htons(eh->ether_type)) {
   5971 	case ETHERTYPE_IP:
   5972 	case ETHERTYPE_IPV6:
   5973 		offset = ETHER_HDR_LEN;
   5974 		break;
   5975 
   5976 	case ETHERTYPE_VLAN:
   5977 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5978 		break;
   5979 
   5980 	default:
   5981 		/*
   5982 		 * Don't support this protocol or encapsulation.
   5983 		 */
   5984 		*fieldsp = 0;
   5985 		*cmdp = 0;
   5986 		return 0;
   5987 	}
   5988 
   5989 	if ((m0->m_pkthdr.csum_flags &
   5990 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   5991 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5992 	} else {
   5993 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5994 	}
   5995 	ipcse = offset + iphl - 1;
   5996 
   5997 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5998 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5999 	seg = 0;
   6000 	fields = 0;
   6001 
   6002 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6003 		int hlen = offset + iphl;
   6004 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6005 
   6006 		if (__predict_false(m0->m_len <
   6007 				    (hlen + sizeof(struct tcphdr)))) {
   6008 			/*
   6009 			 * TCP/IP headers are not in the first mbuf; we need
   6010 			 * to do this the slow and painful way.  Let's just
   6011 			 * hope this doesn't happen very often.
   6012 			 */
   6013 			struct tcphdr th;
   6014 
   6015 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6016 
   6017 			m_copydata(m0, hlen, sizeof(th), &th);
   6018 			if (v4) {
   6019 				struct ip ip;
   6020 
   6021 				m_copydata(m0, offset, sizeof(ip), &ip);
   6022 				ip.ip_len = 0;
   6023 				m_copyback(m0,
   6024 				    offset + offsetof(struct ip, ip_len),
   6025 				    sizeof(ip.ip_len), &ip.ip_len);
   6026 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6027 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6028 			} else {
   6029 				struct ip6_hdr ip6;
   6030 
   6031 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6032 				ip6.ip6_plen = 0;
   6033 				m_copyback(m0,
   6034 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6035 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6036 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6037 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6038 			}
   6039 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6040 			    sizeof(th.th_sum), &th.th_sum);
   6041 
   6042 			hlen += th.th_off << 2;
   6043 		} else {
   6044 			/*
   6045 			 * TCP/IP headers are in the first mbuf; we can do
   6046 			 * this the easy way.
   6047 			 */
   6048 			struct tcphdr *th;
   6049 
   6050 			if (v4) {
   6051 				struct ip *ip =
   6052 				    (void *)(mtod(m0, char *) + offset);
   6053 				th = (void *)(mtod(m0, char *) + hlen);
   6054 
   6055 				ip->ip_len = 0;
   6056 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6057 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6058 			} else {
   6059 				struct ip6_hdr *ip6 =
   6060 				    (void *)(mtod(m0, char *) + offset);
   6061 				th = (void *)(mtod(m0, char *) + hlen);
   6062 
   6063 				ip6->ip6_plen = 0;
   6064 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6065 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6066 			}
   6067 			hlen += th->th_off << 2;
   6068 		}
   6069 
   6070 		if (v4) {
   6071 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6072 			cmdlen |= WTX_TCPIP_CMD_IP;
   6073 		} else {
   6074 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6075 			ipcse = 0;
   6076 		}
   6077 		cmd |= WTX_TCPIP_CMD_TSE;
   6078 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6079 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6080 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6081 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6082 	}
   6083 
   6084 	/*
   6085 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6086 	 * offload feature, if we load the context descriptor, we
   6087 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6088 	 */
   6089 
   6090 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6091 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6092 	    WTX_TCPIP_IPCSE(ipcse);
   6093 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6094 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   6095 		fields |= WTX_IXSM;
   6096 	}
   6097 
   6098 	offset += iphl;
   6099 
   6100 	if (m0->m_pkthdr.csum_flags &
   6101 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6102 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6103 		fields |= WTX_TXSM;
   6104 		tucs = WTX_TCPIP_TUCSS(offset) |
   6105 		    WTX_TCPIP_TUCSO(offset +
   6106 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6107 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6108 	} else if ((m0->m_pkthdr.csum_flags &
   6109 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6110 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6111 		fields |= WTX_TXSM;
   6112 		tucs = WTX_TCPIP_TUCSS(offset) |
   6113 		    WTX_TCPIP_TUCSO(offset +
   6114 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6115 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6116 	} else {
   6117 		/* Just initialize it to a valid TCP context. */
   6118 		tucs = WTX_TCPIP_TUCSS(offset) |
   6119 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6120 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6121 	}
   6122 
   6123 	/* Fill in the context descriptor. */
   6124 	t = (struct livengood_tcpip_ctxdesc *)
   6125 	    &txq->txq_descs[txq->txq_next];
   6126 	t->tcpip_ipcs = htole32(ipcs);
   6127 	t->tcpip_tucs = htole32(tucs);
   6128 	t->tcpip_cmdlen = htole32(cmdlen);
   6129 	t->tcpip_seg = htole32(seg);
   6130 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6131 
   6132 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6133 	txs->txs_ndesc++;
   6134 
   6135 	*cmdp = cmd;
   6136 	*fieldsp = fields;
   6137 
   6138 	return 0;
   6139 }
   6140 
   6141 /*
   6142  * wm_start:		[ifnet interface function]
   6143  *
   6144  *	Start packet transmission on the interface.
   6145  */
   6146 static void
   6147 wm_start(struct ifnet *ifp)
   6148 {
   6149 	struct wm_softc *sc = ifp->if_softc;
   6150 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6151 
   6152 	WM_TX_LOCK(txq);
   6153 	if (!sc->sc_stopping)
   6154 		wm_start_locked(ifp);
   6155 	WM_TX_UNLOCK(txq);
   6156 }
   6157 
   6158 static void
   6159 wm_start_locked(struct ifnet *ifp)
   6160 {
   6161 	struct wm_softc *sc = ifp->if_softc;
   6162 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6163 	struct mbuf *m0;
   6164 	struct m_tag *mtag;
   6165 	struct wm_txsoft *txs;
   6166 	bus_dmamap_t dmamap;
   6167 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6168 	bus_addr_t curaddr;
   6169 	bus_size_t seglen, curlen;
   6170 	uint32_t cksumcmd;
   6171 	uint8_t cksumfields;
   6172 
   6173 	KASSERT(WM_TX_LOCKED(txq));
   6174 
   6175 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6176 		return;
   6177 
   6178 	/* Remember the previous number of free descriptors. */
   6179 	ofree = txq->txq_free;
   6180 
   6181 	/*
   6182 	 * Loop through the send queue, setting up transmit descriptors
   6183 	 * until we drain the queue, or use up all available transmit
   6184 	 * descriptors.
   6185 	 */
   6186 	for (;;) {
   6187 		m0 = NULL;
   6188 
   6189 		/* Get a work queue entry. */
   6190 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6191 			wm_txeof(sc);
   6192 			if (txq->txq_sfree == 0) {
   6193 				DPRINTF(WM_DEBUG_TX,
   6194 				    ("%s: TX: no free job descriptors\n",
   6195 					device_xname(sc->sc_dev)));
   6196 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6197 				break;
   6198 			}
   6199 		}
   6200 
   6201 		/* Grab a packet off the queue. */
   6202 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6203 		if (m0 == NULL)
   6204 			break;
   6205 
   6206 		DPRINTF(WM_DEBUG_TX,
   6207 		    ("%s: TX: have packet to transmit: %p\n",
   6208 		    device_xname(sc->sc_dev), m0));
   6209 
   6210 		txs = &txq->txq_soft[txq->txq_snext];
   6211 		dmamap = txs->txs_dmamap;
   6212 
   6213 		use_tso = (m0->m_pkthdr.csum_flags &
   6214 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6215 
   6216 		/*
   6217 		 * So says the Linux driver:
   6218 		 * The controller does a simple calculation to make sure
   6219 		 * there is enough room in the FIFO before initiating the
   6220 		 * DMA for each buffer.  The calc is:
   6221 		 *	4 = ceil(buffer len / MSS)
   6222 		 * To make sure we don't overrun the FIFO, adjust the max
   6223 		 * buffer len if the MSS drops.
   6224 		 */
   6225 		dmamap->dm_maxsegsz =
   6226 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6227 		    ? m0->m_pkthdr.segsz << 2
   6228 		    : WTX_MAX_LEN;
   6229 
   6230 		/*
   6231 		 * Load the DMA map.  If this fails, the packet either
   6232 		 * didn't fit in the allotted number of segments, or we
   6233 		 * were short on resources.  For the too-many-segments
   6234 		 * case, we simply report an error and drop the packet,
   6235 		 * since we can't sanely copy a jumbo packet to a single
   6236 		 * buffer.
   6237 		 */
   6238 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6239 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6240 		if (error) {
   6241 			if (error == EFBIG) {
   6242 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6243 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6244 				    "DMA segments, dropping...\n",
   6245 				    device_xname(sc->sc_dev));
   6246 				wm_dump_mbuf_chain(sc, m0);
   6247 				m_freem(m0);
   6248 				continue;
   6249 			}
   6250 			/*  Short on resources, just stop for now. */
   6251 			DPRINTF(WM_DEBUG_TX,
   6252 			    ("%s: TX: dmamap load failed: %d\n",
   6253 			    device_xname(sc->sc_dev), error));
   6254 			break;
   6255 		}
   6256 
   6257 		segs_needed = dmamap->dm_nsegs;
   6258 		if (use_tso) {
   6259 			/* For sentinel descriptor; see below. */
   6260 			segs_needed++;
   6261 		}
   6262 
   6263 		/*
   6264 		 * Ensure we have enough descriptors free to describe
   6265 		 * the packet.  Note, we always reserve one descriptor
   6266 		 * at the end of the ring due to the semantics of the
   6267 		 * TDT register, plus one more in the event we need
   6268 		 * to load offload context.
   6269 		 */
   6270 		if (segs_needed > txq->txq_free - 2) {
   6271 			/*
   6272 			 * Not enough free descriptors to transmit this
   6273 			 * packet.  We haven't committed anything yet,
   6274 			 * so just unload the DMA map, put the packet
   6275 			 * pack on the queue, and punt.  Notify the upper
   6276 			 * layer that there are no more slots left.
   6277 			 */
   6278 			DPRINTF(WM_DEBUG_TX,
   6279 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6280 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6281 			    segs_needed, txq->txq_free - 1));
   6282 			ifp->if_flags |= IFF_OACTIVE;
   6283 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6284 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6285 			break;
   6286 		}
   6287 
   6288 		/*
   6289 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6290 		 * once we know we can transmit the packet, since we
   6291 		 * do some internal FIFO space accounting here.
   6292 		 */
   6293 		if (sc->sc_type == WM_T_82547 &&
   6294 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6295 			DPRINTF(WM_DEBUG_TX,
   6296 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6297 			    device_xname(sc->sc_dev)));
   6298 			ifp->if_flags |= IFF_OACTIVE;
   6299 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6300 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6301 			break;
   6302 		}
   6303 
   6304 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6305 
   6306 		DPRINTF(WM_DEBUG_TX,
   6307 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6308 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6309 
   6310 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6311 
   6312 		/*
   6313 		 * Store a pointer to the packet so that we can free it
   6314 		 * later.
   6315 		 *
   6316 		 * Initially, we consider the number of descriptors the
   6317 		 * packet uses the number of DMA segments.  This may be
   6318 		 * incremented by 1 if we do checksum offload (a descriptor
   6319 		 * is used to set the checksum context).
   6320 		 */
   6321 		txs->txs_mbuf = m0;
   6322 		txs->txs_firstdesc = txq->txq_next;
   6323 		txs->txs_ndesc = segs_needed;
   6324 
   6325 		/* Set up offload parameters for this packet. */
   6326 		if (m0->m_pkthdr.csum_flags &
   6327 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6328 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6329 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6330 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6331 					  &cksumfields) != 0) {
   6332 				/* Error message already displayed. */
   6333 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6334 				continue;
   6335 			}
   6336 		} else {
   6337 			cksumcmd = 0;
   6338 			cksumfields = 0;
   6339 		}
   6340 
   6341 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6342 
   6343 		/* Sync the DMA map. */
   6344 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6345 		    BUS_DMASYNC_PREWRITE);
   6346 
   6347 		/* Initialize the transmit descriptor. */
   6348 		for (nexttx = txq->txq_next, seg = 0;
   6349 		     seg < dmamap->dm_nsegs; seg++) {
   6350 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6351 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6352 			     seglen != 0;
   6353 			     curaddr += curlen, seglen -= curlen,
   6354 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6355 				curlen = seglen;
   6356 
   6357 				/*
   6358 				 * So says the Linux driver:
   6359 				 * Work around for premature descriptor
   6360 				 * write-backs in TSO mode.  Append a
   6361 				 * 4-byte sentinel descriptor.
   6362 				 */
   6363 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6364 				    curlen > 8)
   6365 					curlen -= 4;
   6366 
   6367 				wm_set_dma_addr(
   6368 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6369 				txq->txq_descs[nexttx].wtx_cmdlen
   6370 				    = htole32(cksumcmd | curlen);
   6371 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6372 				    = 0;
   6373 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6374 				    = cksumfields;
   6375 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6376 				lasttx = nexttx;
   6377 
   6378 				DPRINTF(WM_DEBUG_TX,
   6379 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6380 				     "len %#04zx\n",
   6381 				    device_xname(sc->sc_dev), nexttx,
   6382 				    (uint64_t)curaddr, curlen));
   6383 			}
   6384 		}
   6385 
   6386 		KASSERT(lasttx != -1);
   6387 
   6388 		/*
   6389 		 * Set up the command byte on the last descriptor of
   6390 		 * the packet.  If we're in the interrupt delay window,
   6391 		 * delay the interrupt.
   6392 		 */
   6393 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6394 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6395 
   6396 		/*
   6397 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6398 		 * up the descriptor to encapsulate the packet for us.
   6399 		 *
   6400 		 * This is only valid on the last descriptor of the packet.
   6401 		 */
   6402 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6403 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6404 			    htole32(WTX_CMD_VLE);
   6405 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6406 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6407 		}
   6408 
   6409 		txs->txs_lastdesc = lasttx;
   6410 
   6411 		DPRINTF(WM_DEBUG_TX,
   6412 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6413 		    device_xname(sc->sc_dev),
   6414 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6415 
   6416 		/* Sync the descriptors we're using. */
   6417 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6418 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6419 
   6420 		/* Give the packet to the chip. */
   6421 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6422 
   6423 		DPRINTF(WM_DEBUG_TX,
   6424 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6425 
   6426 		DPRINTF(WM_DEBUG_TX,
   6427 		    ("%s: TX: finished transmitting packet, job %d\n",
   6428 		    device_xname(sc->sc_dev), txq->txq_snext));
   6429 
   6430 		/* Advance the tx pointer. */
   6431 		txq->txq_free -= txs->txs_ndesc;
   6432 		txq->txq_next = nexttx;
   6433 
   6434 		txq->txq_sfree--;
   6435 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6436 
   6437 		/* Pass the packet to any BPF listeners. */
   6438 		bpf_mtap(ifp, m0);
   6439 	}
   6440 
   6441 	if (m0 != NULL) {
   6442 		ifp->if_flags |= IFF_OACTIVE;
   6443 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6444 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6445 			__func__));
   6446 		m_freem(m0);
   6447 	}
   6448 
   6449 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6450 		/* No more slots; notify upper layer. */
   6451 		ifp->if_flags |= IFF_OACTIVE;
   6452 	}
   6453 
   6454 	if (txq->txq_free != ofree) {
   6455 		/* Set a watchdog timer in case the chip flakes out. */
   6456 		ifp->if_timer = 5;
   6457 	}
   6458 }
   6459 
   6460 /*
   6461  * wm_nq_tx_offload:
   6462  *
   6463  *	Set up TCP/IP checksumming parameters for the
   6464  *	specified packet, for NEWQUEUE devices
   6465  */
   6466 static int
   6467 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   6468     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6469 {
   6470 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6471 	struct mbuf *m0 = txs->txs_mbuf;
   6472 	struct m_tag *mtag;
   6473 	uint32_t vl_len, mssidx, cmdc;
   6474 	struct ether_header *eh;
   6475 	int offset, iphl;
   6476 
   6477 	/*
   6478 	 * XXX It would be nice if the mbuf pkthdr had offset
   6479 	 * fields for the protocol headers.
   6480 	 */
   6481 	*cmdlenp = 0;
   6482 	*fieldsp = 0;
   6483 
   6484 	eh = mtod(m0, struct ether_header *);
   6485 	switch (htons(eh->ether_type)) {
   6486 	case ETHERTYPE_IP:
   6487 	case ETHERTYPE_IPV6:
   6488 		offset = ETHER_HDR_LEN;
   6489 		break;
   6490 
   6491 	case ETHERTYPE_VLAN:
   6492 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6493 		break;
   6494 
   6495 	default:
   6496 		/* Don't support this protocol or encapsulation. */
   6497 		*do_csum = false;
   6498 		return 0;
   6499 	}
   6500 	*do_csum = true;
   6501 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6502 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6503 
   6504 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6505 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6506 
   6507 	if ((m0->m_pkthdr.csum_flags &
   6508 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6509 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6510 	} else {
   6511 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6512 	}
   6513 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6514 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6515 
   6516 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6517 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6518 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6519 		*cmdlenp |= NQTX_CMD_VLE;
   6520 	}
   6521 
   6522 	mssidx = 0;
   6523 
   6524 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6525 		int hlen = offset + iphl;
   6526 		int tcp_hlen;
   6527 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6528 
   6529 		if (__predict_false(m0->m_len <
   6530 				    (hlen + sizeof(struct tcphdr)))) {
   6531 			/*
   6532 			 * TCP/IP headers are not in the first mbuf; we need
   6533 			 * to do this the slow and painful way.  Let's just
   6534 			 * hope this doesn't happen very often.
   6535 			 */
   6536 			struct tcphdr th;
   6537 
   6538 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6539 
   6540 			m_copydata(m0, hlen, sizeof(th), &th);
   6541 			if (v4) {
   6542 				struct ip ip;
   6543 
   6544 				m_copydata(m0, offset, sizeof(ip), &ip);
   6545 				ip.ip_len = 0;
   6546 				m_copyback(m0,
   6547 				    offset + offsetof(struct ip, ip_len),
   6548 				    sizeof(ip.ip_len), &ip.ip_len);
   6549 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6550 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6551 			} else {
   6552 				struct ip6_hdr ip6;
   6553 
   6554 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6555 				ip6.ip6_plen = 0;
   6556 				m_copyback(m0,
   6557 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6558 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6559 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6560 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6561 			}
   6562 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6563 			    sizeof(th.th_sum), &th.th_sum);
   6564 
   6565 			tcp_hlen = th.th_off << 2;
   6566 		} else {
   6567 			/*
   6568 			 * TCP/IP headers are in the first mbuf; we can do
   6569 			 * this the easy way.
   6570 			 */
   6571 			struct tcphdr *th;
   6572 
   6573 			if (v4) {
   6574 				struct ip *ip =
   6575 				    (void *)(mtod(m0, char *) + offset);
   6576 				th = (void *)(mtod(m0, char *) + hlen);
   6577 
   6578 				ip->ip_len = 0;
   6579 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6580 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6581 			} else {
   6582 				struct ip6_hdr *ip6 =
   6583 				    (void *)(mtod(m0, char *) + offset);
   6584 				th = (void *)(mtod(m0, char *) + hlen);
   6585 
   6586 				ip6->ip6_plen = 0;
   6587 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6588 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6589 			}
   6590 			tcp_hlen = th->th_off << 2;
   6591 		}
   6592 		hlen += tcp_hlen;
   6593 		*cmdlenp |= NQTX_CMD_TSE;
   6594 
   6595 		if (v4) {
   6596 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6597 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6598 		} else {
   6599 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6600 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6601 		}
   6602 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6603 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6604 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6605 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6606 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6607 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6608 	} else {
   6609 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6610 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6611 	}
   6612 
   6613 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6614 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6615 		cmdc |= NQTXC_CMD_IP4;
   6616 	}
   6617 
   6618 	if (m0->m_pkthdr.csum_flags &
   6619 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6620 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6621 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6622 			cmdc |= NQTXC_CMD_TCP;
   6623 		} else {
   6624 			cmdc |= NQTXC_CMD_UDP;
   6625 		}
   6626 		cmdc |= NQTXC_CMD_IP4;
   6627 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6628 	}
   6629 	if (m0->m_pkthdr.csum_flags &
   6630 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6631 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6632 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6633 			cmdc |= NQTXC_CMD_TCP;
   6634 		} else {
   6635 			cmdc |= NQTXC_CMD_UDP;
   6636 		}
   6637 		cmdc |= NQTXC_CMD_IP6;
   6638 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6639 	}
   6640 
   6641 	/* Fill in the context descriptor. */
   6642 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6643 	    htole32(vl_len);
   6644 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6645 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6646 	    htole32(cmdc);
   6647 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6648 	    htole32(mssidx);
   6649 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6650 	DPRINTF(WM_DEBUG_TX,
   6651 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6652 	    txq->txq_next, 0, vl_len));
   6653 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6654 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6655 	txs->txs_ndesc++;
   6656 	return 0;
   6657 }
   6658 
   6659 /*
   6660  * wm_nq_start:		[ifnet interface function]
   6661  *
   6662  *	Start packet transmission on the interface for NEWQUEUE devices
   6663  */
   6664 static void
   6665 wm_nq_start(struct ifnet *ifp)
   6666 {
   6667 	struct wm_softc *sc = ifp->if_softc;
   6668 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6669 
   6670 	WM_TX_LOCK(txq);
   6671 	if (!sc->sc_stopping)
   6672 		wm_nq_start_locked(ifp);
   6673 	WM_TX_UNLOCK(txq);
   6674 }
   6675 
   6676 static void
   6677 wm_nq_start_locked(struct ifnet *ifp)
   6678 {
   6679 	struct wm_softc *sc = ifp->if_softc;
   6680 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6681 	struct mbuf *m0;
   6682 	struct m_tag *mtag;
   6683 	struct wm_txsoft *txs;
   6684 	bus_dmamap_t dmamap;
   6685 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6686 	bool do_csum, sent;
   6687 
   6688 	KASSERT(WM_TX_LOCKED(txq));
   6689 
   6690 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6691 		return;
   6692 
   6693 	sent = false;
   6694 
   6695 	/*
   6696 	 * Loop through the send queue, setting up transmit descriptors
   6697 	 * until we drain the queue, or use up all available transmit
   6698 	 * descriptors.
   6699 	 */
   6700 	for (;;) {
   6701 		m0 = NULL;
   6702 
   6703 		/* Get a work queue entry. */
   6704 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6705 			wm_txeof(sc);
   6706 			if (txq->txq_sfree == 0) {
   6707 				DPRINTF(WM_DEBUG_TX,
   6708 				    ("%s: TX: no free job descriptors\n",
   6709 					device_xname(sc->sc_dev)));
   6710 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6711 				break;
   6712 			}
   6713 		}
   6714 
   6715 		/* Grab a packet off the queue. */
   6716 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6717 		if (m0 == NULL)
   6718 			break;
   6719 
   6720 		DPRINTF(WM_DEBUG_TX,
   6721 		    ("%s: TX: have packet to transmit: %p\n",
   6722 		    device_xname(sc->sc_dev), m0));
   6723 
   6724 		txs = &txq->txq_soft[txq->txq_snext];
   6725 		dmamap = txs->txs_dmamap;
   6726 
   6727 		/*
   6728 		 * Load the DMA map.  If this fails, the packet either
   6729 		 * didn't fit in the allotted number of segments, or we
   6730 		 * were short on resources.  For the too-many-segments
   6731 		 * case, we simply report an error and drop the packet,
   6732 		 * since we can't sanely copy a jumbo packet to a single
   6733 		 * buffer.
   6734 		 */
   6735 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6736 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6737 		if (error) {
   6738 			if (error == EFBIG) {
   6739 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6740 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6741 				    "DMA segments, dropping...\n",
   6742 				    device_xname(sc->sc_dev));
   6743 				wm_dump_mbuf_chain(sc, m0);
   6744 				m_freem(m0);
   6745 				continue;
   6746 			}
   6747 			/* Short on resources, just stop for now. */
   6748 			DPRINTF(WM_DEBUG_TX,
   6749 			    ("%s: TX: dmamap load failed: %d\n",
   6750 			    device_xname(sc->sc_dev), error));
   6751 			break;
   6752 		}
   6753 
   6754 		segs_needed = dmamap->dm_nsegs;
   6755 
   6756 		/*
   6757 		 * Ensure we have enough descriptors free to describe
   6758 		 * the packet.  Note, we always reserve one descriptor
   6759 		 * at the end of the ring due to the semantics of the
   6760 		 * TDT register, plus one more in the event we need
   6761 		 * to load offload context.
   6762 		 */
   6763 		if (segs_needed > txq->txq_free - 2) {
   6764 			/*
   6765 			 * Not enough free descriptors to transmit this
   6766 			 * packet.  We haven't committed anything yet,
   6767 			 * so just unload the DMA map, put the packet
   6768 			 * pack on the queue, and punt.  Notify the upper
   6769 			 * layer that there are no more slots left.
   6770 			 */
   6771 			DPRINTF(WM_DEBUG_TX,
   6772 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6773 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6774 			    segs_needed, txq->txq_free - 1));
   6775 			ifp->if_flags |= IFF_OACTIVE;
   6776 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6777 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6778 			break;
   6779 		}
   6780 
   6781 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6782 
   6783 		DPRINTF(WM_DEBUG_TX,
   6784 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6785 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6786 
   6787 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6788 
   6789 		/*
   6790 		 * Store a pointer to the packet so that we can free it
   6791 		 * later.
   6792 		 *
   6793 		 * Initially, we consider the number of descriptors the
   6794 		 * packet uses the number of DMA segments.  This may be
   6795 		 * incremented by 1 if we do checksum offload (a descriptor
   6796 		 * is used to set the checksum context).
   6797 		 */
   6798 		txs->txs_mbuf = m0;
   6799 		txs->txs_firstdesc = txq->txq_next;
   6800 		txs->txs_ndesc = segs_needed;
   6801 
   6802 		/* Set up offload parameters for this packet. */
   6803 		uint32_t cmdlen, fields, dcmdlen;
   6804 		if (m0->m_pkthdr.csum_flags &
   6805 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6806 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6807 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6808 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   6809 			    &do_csum) != 0) {
   6810 				/* Error message already displayed. */
   6811 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6812 				continue;
   6813 			}
   6814 		} else {
   6815 			do_csum = false;
   6816 			cmdlen = 0;
   6817 			fields = 0;
   6818 		}
   6819 
   6820 		/* Sync the DMA map. */
   6821 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6822 		    BUS_DMASYNC_PREWRITE);
   6823 
   6824 		/* Initialize the first transmit descriptor. */
   6825 		nexttx = txq->txq_next;
   6826 		if (!do_csum) {
   6827 			/* setup a legacy descriptor */
   6828 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6829 			    dmamap->dm_segs[0].ds_addr);
   6830 			txq->txq_descs[nexttx].wtx_cmdlen =
   6831 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6832 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6833 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6834 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6835 			    NULL) {
   6836 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6837 				    htole32(WTX_CMD_VLE);
   6838 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6839 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6840 			} else {
   6841 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6842 			}
   6843 			dcmdlen = 0;
   6844 		} else {
   6845 			/* setup an advanced data descriptor */
   6846 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6847 			    htole64(dmamap->dm_segs[0].ds_addr);
   6848 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6849 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6850 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6851 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6852 			    htole32(fields);
   6853 			DPRINTF(WM_DEBUG_TX,
   6854 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6855 			    device_xname(sc->sc_dev), nexttx,
   6856 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6857 			DPRINTF(WM_DEBUG_TX,
   6858 			    ("\t 0x%08x%08x\n", fields,
   6859 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6860 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6861 		}
   6862 
   6863 		lasttx = nexttx;
   6864 		nexttx = WM_NEXTTX(txq, nexttx);
   6865 		/*
   6866 		 * fill in the next descriptors. legacy or adcanced format
   6867 		 * is the same here
   6868 		 */
   6869 		for (seg = 1; seg < dmamap->dm_nsegs;
   6870 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6871 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6872 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6873 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6874 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6875 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6876 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6877 			lasttx = nexttx;
   6878 
   6879 			DPRINTF(WM_DEBUG_TX,
   6880 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6881 			     "len %#04zx\n",
   6882 			    device_xname(sc->sc_dev), nexttx,
   6883 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6884 			    dmamap->dm_segs[seg].ds_len));
   6885 		}
   6886 
   6887 		KASSERT(lasttx != -1);
   6888 
   6889 		/*
   6890 		 * Set up the command byte on the last descriptor of
   6891 		 * the packet.  If we're in the interrupt delay window,
   6892 		 * delay the interrupt.
   6893 		 */
   6894 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6895 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6896 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6897 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6898 
   6899 		txs->txs_lastdesc = lasttx;
   6900 
   6901 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6902 		    device_xname(sc->sc_dev),
   6903 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6904 
   6905 		/* Sync the descriptors we're using. */
   6906 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6907 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6908 
   6909 		/* Give the packet to the chip. */
   6910 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6911 		sent = true;
   6912 
   6913 		DPRINTF(WM_DEBUG_TX,
   6914 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6915 
   6916 		DPRINTF(WM_DEBUG_TX,
   6917 		    ("%s: TX: finished transmitting packet, job %d\n",
   6918 		    device_xname(sc->sc_dev), txq->txq_snext));
   6919 
   6920 		/* Advance the tx pointer. */
   6921 		txq->txq_free -= txs->txs_ndesc;
   6922 		txq->txq_next = nexttx;
   6923 
   6924 		txq->txq_sfree--;
   6925 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6926 
   6927 		/* Pass the packet to any BPF listeners. */
   6928 		bpf_mtap(ifp, m0);
   6929 	}
   6930 
   6931 	if (m0 != NULL) {
   6932 		ifp->if_flags |= IFF_OACTIVE;
   6933 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6934 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6935 			__func__));
   6936 		m_freem(m0);
   6937 	}
   6938 
   6939 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6940 		/* No more slots; notify upper layer. */
   6941 		ifp->if_flags |= IFF_OACTIVE;
   6942 	}
   6943 
   6944 	if (sent) {
   6945 		/* Set a watchdog timer in case the chip flakes out. */
   6946 		ifp->if_timer = 5;
   6947 	}
   6948 }
   6949 
   6950 /* Interrupt */
   6951 
   6952 /*
   6953  * wm_txeof:
   6954  *
   6955  *	Helper; handle transmit interrupts.
   6956  */
   6957 static int
   6958 wm_txeof(struct wm_softc *sc)
   6959 {
   6960 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6961 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6962 	struct wm_txsoft *txs;
   6963 	bool processed = false;
   6964 	int count = 0;
   6965 	int i;
   6966 	uint8_t status;
   6967 
   6968 	if (sc->sc_stopping)
   6969 		return 0;
   6970 
   6971 	ifp->if_flags &= ~IFF_OACTIVE;
   6972 
   6973 	/*
   6974 	 * Go through the Tx list and free mbufs for those
   6975 	 * frames which have been transmitted.
   6976 	 */
   6977 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6978 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6979 		txs = &txq->txq_soft[i];
   6980 
   6981 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   6982 			device_xname(sc->sc_dev), i));
   6983 
   6984 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   6985 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   6986 
   6987 		status =
   6988 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6989 		if ((status & WTX_ST_DD) == 0) {
   6990 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   6991 			    BUS_DMASYNC_PREREAD);
   6992 			break;
   6993 		}
   6994 
   6995 		processed = true;
   6996 		count++;
   6997 		DPRINTF(WM_DEBUG_TX,
   6998 		    ("%s: TX: job %d done: descs %d..%d\n",
   6999 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7000 		    txs->txs_lastdesc));
   7001 
   7002 		/*
   7003 		 * XXX We should probably be using the statistics
   7004 		 * XXX registers, but I don't know if they exist
   7005 		 * XXX on chips before the i82544.
   7006 		 */
   7007 
   7008 #ifdef WM_EVENT_COUNTERS
   7009 		if (status & WTX_ST_TU)
   7010 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   7011 #endif /* WM_EVENT_COUNTERS */
   7012 
   7013 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7014 			ifp->if_oerrors++;
   7015 			if (status & WTX_ST_LC)
   7016 				log(LOG_WARNING, "%s: late collision\n",
   7017 				    device_xname(sc->sc_dev));
   7018 			else if (status & WTX_ST_EC) {
   7019 				ifp->if_collisions += 16;
   7020 				log(LOG_WARNING, "%s: excessive collisions\n",
   7021 				    device_xname(sc->sc_dev));
   7022 			}
   7023 		} else
   7024 			ifp->if_opackets++;
   7025 
   7026 		txq->txq_free += txs->txs_ndesc;
   7027 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7028 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7029 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7030 		m_freem(txs->txs_mbuf);
   7031 		txs->txs_mbuf = NULL;
   7032 	}
   7033 
   7034 	/* Update the dirty transmit buffer pointer. */
   7035 	txq->txq_sdirty = i;
   7036 	DPRINTF(WM_DEBUG_TX,
   7037 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7038 
   7039 	if (count != 0)
   7040 		rnd_add_uint32(&sc->rnd_source, count);
   7041 
   7042 	/*
   7043 	 * If there are no more pending transmissions, cancel the watchdog
   7044 	 * timer.
   7045 	 */
   7046 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7047 		ifp->if_timer = 0;
   7048 
   7049 	return processed;
   7050 }
   7051 
   7052 /*
   7053  * wm_rxeof:
   7054  *
   7055  *	Helper; handle receive interrupts.
   7056  */
   7057 static void
   7058 wm_rxeof(struct wm_rxqueue *rxq)
   7059 {
   7060 	struct wm_softc *sc = rxq->rxq_sc;
   7061 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7062 	struct wm_rxsoft *rxs;
   7063 	struct mbuf *m;
   7064 	int i, len;
   7065 	int count = 0;
   7066 	uint8_t status, errors;
   7067 	uint16_t vlantag;
   7068 
   7069 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7070 		rxs = &rxq->rxq_soft[i];
   7071 
   7072 		DPRINTF(WM_DEBUG_RX,
   7073 		    ("%s: RX: checking descriptor %d\n",
   7074 		    device_xname(sc->sc_dev), i));
   7075 
   7076 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7077 
   7078 		status = rxq->rxq_descs[i].wrx_status;
   7079 		errors = rxq->rxq_descs[i].wrx_errors;
   7080 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7081 		vlantag = rxq->rxq_descs[i].wrx_special;
   7082 
   7083 		if ((status & WRX_ST_DD) == 0) {
   7084 			/* We have processed all of the receive descriptors. */
   7085 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7086 			break;
   7087 		}
   7088 
   7089 		count++;
   7090 		if (__predict_false(rxq->rxq_discard)) {
   7091 			DPRINTF(WM_DEBUG_RX,
   7092 			    ("%s: RX: discarding contents of descriptor %d\n",
   7093 			    device_xname(sc->sc_dev), i));
   7094 			wm_init_rxdesc(rxq, i);
   7095 			if (status & WRX_ST_EOP) {
   7096 				/* Reset our state. */
   7097 				DPRINTF(WM_DEBUG_RX,
   7098 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7099 				    device_xname(sc->sc_dev)));
   7100 				rxq->rxq_discard = 0;
   7101 			}
   7102 			continue;
   7103 		}
   7104 
   7105 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7106 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7107 
   7108 		m = rxs->rxs_mbuf;
   7109 
   7110 		/*
   7111 		 * Add a new receive buffer to the ring, unless of
   7112 		 * course the length is zero. Treat the latter as a
   7113 		 * failed mapping.
   7114 		 */
   7115 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7116 			/*
   7117 			 * Failed, throw away what we've done so
   7118 			 * far, and discard the rest of the packet.
   7119 			 */
   7120 			ifp->if_ierrors++;
   7121 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7122 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7123 			wm_init_rxdesc(rxq, i);
   7124 			if ((status & WRX_ST_EOP) == 0)
   7125 				rxq->rxq_discard = 1;
   7126 			if (rxq->rxq_head != NULL)
   7127 				m_freem(rxq->rxq_head);
   7128 			WM_RXCHAIN_RESET(rxq);
   7129 			DPRINTF(WM_DEBUG_RX,
   7130 			    ("%s: RX: Rx buffer allocation failed, "
   7131 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7132 			    rxq->rxq_discard ? " (discard)" : ""));
   7133 			continue;
   7134 		}
   7135 
   7136 		m->m_len = len;
   7137 		rxq->rxq_len += len;
   7138 		DPRINTF(WM_DEBUG_RX,
   7139 		    ("%s: RX: buffer at %p len %d\n",
   7140 		    device_xname(sc->sc_dev), m->m_data, len));
   7141 
   7142 		/* If this is not the end of the packet, keep looking. */
   7143 		if ((status & WRX_ST_EOP) == 0) {
   7144 			WM_RXCHAIN_LINK(rxq, m);
   7145 			DPRINTF(WM_DEBUG_RX,
   7146 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7147 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7148 			continue;
   7149 		}
   7150 
   7151 		/*
   7152 		 * Okay, we have the entire packet now.  The chip is
   7153 		 * configured to include the FCS except I350 and I21[01]
   7154 		 * (not all chips can be configured to strip it),
   7155 		 * so we need to trim it.
   7156 		 * May need to adjust length of previous mbuf in the
   7157 		 * chain if the current mbuf is too short.
   7158 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7159 		 * is always set in I350, so we don't trim it.
   7160 		 */
   7161 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7162 		    && (sc->sc_type != WM_T_I210)
   7163 		    && (sc->sc_type != WM_T_I211)) {
   7164 			if (m->m_len < ETHER_CRC_LEN) {
   7165 				rxq->rxq_tail->m_len
   7166 				    -= (ETHER_CRC_LEN - m->m_len);
   7167 				m->m_len = 0;
   7168 			} else
   7169 				m->m_len -= ETHER_CRC_LEN;
   7170 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7171 		} else
   7172 			len = rxq->rxq_len;
   7173 
   7174 		WM_RXCHAIN_LINK(rxq, m);
   7175 
   7176 		*rxq->rxq_tailp = NULL;
   7177 		m = rxq->rxq_head;
   7178 
   7179 		WM_RXCHAIN_RESET(rxq);
   7180 
   7181 		DPRINTF(WM_DEBUG_RX,
   7182 		    ("%s: RX: have entire packet, len -> %d\n",
   7183 		    device_xname(sc->sc_dev), len));
   7184 
   7185 		/* If an error occurred, update stats and drop the packet. */
   7186 		if (errors &
   7187 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7188 			if (errors & WRX_ER_SE)
   7189 				log(LOG_WARNING, "%s: symbol error\n",
   7190 				    device_xname(sc->sc_dev));
   7191 			else if (errors & WRX_ER_SEQ)
   7192 				log(LOG_WARNING, "%s: receive sequence error\n",
   7193 				    device_xname(sc->sc_dev));
   7194 			else if (errors & WRX_ER_CE)
   7195 				log(LOG_WARNING, "%s: CRC error\n",
   7196 				    device_xname(sc->sc_dev));
   7197 			m_freem(m);
   7198 			continue;
   7199 		}
   7200 
   7201 		/* No errors.  Receive the packet. */
   7202 		m->m_pkthdr.rcvif = ifp;
   7203 		m->m_pkthdr.len = len;
   7204 
   7205 		/*
   7206 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7207 		 * for us.  Associate the tag with the packet.
   7208 		 */
   7209 		/* XXXX should check for i350 and i354 */
   7210 		if ((status & WRX_ST_VP) != 0) {
   7211 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7212 		}
   7213 
   7214 		/* Set up checksum info for this packet. */
   7215 		if ((status & WRX_ST_IXSM) == 0) {
   7216 			if (status & WRX_ST_IPCS) {
   7217 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7218 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7219 				if (errors & WRX_ER_IPE)
   7220 					m->m_pkthdr.csum_flags |=
   7221 					    M_CSUM_IPv4_BAD;
   7222 			}
   7223 			if (status & WRX_ST_TCPCS) {
   7224 				/*
   7225 				 * Note: we don't know if this was TCP or UDP,
   7226 				 * so we just set both bits, and expect the
   7227 				 * upper layers to deal.
   7228 				 */
   7229 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7230 				m->m_pkthdr.csum_flags |=
   7231 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7232 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7233 				if (errors & WRX_ER_TCPE)
   7234 					m->m_pkthdr.csum_flags |=
   7235 					    M_CSUM_TCP_UDP_BAD;
   7236 			}
   7237 		}
   7238 
   7239 		ifp->if_ipackets++;
   7240 
   7241 		WM_RX_UNLOCK(rxq);
   7242 
   7243 		/* Pass this up to any BPF listeners. */
   7244 		bpf_mtap(ifp, m);
   7245 
   7246 		/* Pass it on. */
   7247 		if_percpuq_enqueue(sc->sc_ipq, m);
   7248 
   7249 		WM_RX_LOCK(rxq);
   7250 
   7251 		if (sc->sc_stopping)
   7252 			break;
   7253 	}
   7254 
   7255 	/* Update the receive pointer. */
   7256 	rxq->rxq_ptr = i;
   7257 	if (count != 0)
   7258 		rnd_add_uint32(&sc->rnd_source, count);
   7259 
   7260 	DPRINTF(WM_DEBUG_RX,
   7261 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7262 }
   7263 
   7264 /*
   7265  * wm_linkintr_gmii:
   7266  *
   7267  *	Helper; handle link interrupts for GMII.
   7268  */
   7269 static void
   7270 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7271 {
   7272 
   7273 	KASSERT(WM_CORE_LOCKED(sc));
   7274 
   7275 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7276 		__func__));
   7277 
   7278 	if (icr & ICR_LSC) {
   7279 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7280 
   7281 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7282 			wm_gig_downshift_workaround_ich8lan(sc);
   7283 
   7284 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7285 			device_xname(sc->sc_dev)));
   7286 		mii_pollstat(&sc->sc_mii);
   7287 		if (sc->sc_type == WM_T_82543) {
   7288 			int miistatus, active;
   7289 
   7290 			/*
   7291 			 * With 82543, we need to force speed and
   7292 			 * duplex on the MAC equal to what the PHY
   7293 			 * speed and duplex configuration is.
   7294 			 */
   7295 			miistatus = sc->sc_mii.mii_media_status;
   7296 
   7297 			if (miistatus & IFM_ACTIVE) {
   7298 				active = sc->sc_mii.mii_media_active;
   7299 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7300 				switch (IFM_SUBTYPE(active)) {
   7301 				case IFM_10_T:
   7302 					sc->sc_ctrl |= CTRL_SPEED_10;
   7303 					break;
   7304 				case IFM_100_TX:
   7305 					sc->sc_ctrl |= CTRL_SPEED_100;
   7306 					break;
   7307 				case IFM_1000_T:
   7308 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7309 					break;
   7310 				default:
   7311 					/*
   7312 					 * fiber?
   7313 					 * Shoud not enter here.
   7314 					 */
   7315 					printf("unknown media (%x)\n", active);
   7316 					break;
   7317 				}
   7318 				if (active & IFM_FDX)
   7319 					sc->sc_ctrl |= CTRL_FD;
   7320 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7321 			}
   7322 		} else if ((sc->sc_type == WM_T_ICH8)
   7323 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7324 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7325 		} else if (sc->sc_type == WM_T_PCH) {
   7326 			wm_k1_gig_workaround_hv(sc,
   7327 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7328 		}
   7329 
   7330 		if ((sc->sc_phytype == WMPHY_82578)
   7331 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7332 			== IFM_1000_T)) {
   7333 
   7334 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7335 				delay(200*1000); /* XXX too big */
   7336 
   7337 				/* Link stall fix for link up */
   7338 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7339 				    HV_MUX_DATA_CTRL,
   7340 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7341 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7342 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7343 				    HV_MUX_DATA_CTRL,
   7344 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7345 			}
   7346 		}
   7347 	} else if (icr & ICR_RXSEQ) {
   7348 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7349 			device_xname(sc->sc_dev)));
   7350 	}
   7351 }
   7352 
   7353 /*
   7354  * wm_linkintr_tbi:
   7355  *
   7356  *	Helper; handle link interrupts for TBI mode.
   7357  */
   7358 static void
   7359 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7360 {
   7361 	uint32_t status;
   7362 
   7363 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7364 		__func__));
   7365 
   7366 	status = CSR_READ(sc, WMREG_STATUS);
   7367 	if (icr & ICR_LSC) {
   7368 		if (status & STATUS_LU) {
   7369 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7370 			    device_xname(sc->sc_dev),
   7371 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7372 			/*
   7373 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7374 			 * so we should update sc->sc_ctrl
   7375 			 */
   7376 
   7377 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7378 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7379 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7380 			if (status & STATUS_FD)
   7381 				sc->sc_tctl |=
   7382 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7383 			else
   7384 				sc->sc_tctl |=
   7385 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7386 			if (sc->sc_ctrl & CTRL_TFCE)
   7387 				sc->sc_fcrtl |= FCRTL_XONE;
   7388 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7389 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7390 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7391 				      sc->sc_fcrtl);
   7392 			sc->sc_tbi_linkup = 1;
   7393 		} else {
   7394 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7395 			    device_xname(sc->sc_dev)));
   7396 			sc->sc_tbi_linkup = 0;
   7397 		}
   7398 		/* Update LED */
   7399 		wm_tbi_serdes_set_linkled(sc);
   7400 	} else if (icr & ICR_RXSEQ) {
   7401 		DPRINTF(WM_DEBUG_LINK,
   7402 		    ("%s: LINK: Receive sequence error\n",
   7403 		    device_xname(sc->sc_dev)));
   7404 	}
   7405 }
   7406 
   7407 /*
   7408  * wm_linkintr_serdes:
   7409  *
   7410  *	Helper; handle link interrupts for TBI mode.
   7411  */
   7412 static void
   7413 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7414 {
   7415 	struct mii_data *mii = &sc->sc_mii;
   7416 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7417 	uint32_t pcs_adv, pcs_lpab, reg;
   7418 
   7419 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7420 		__func__));
   7421 
   7422 	if (icr & ICR_LSC) {
   7423 		/* Check PCS */
   7424 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7425 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7426 			mii->mii_media_status |= IFM_ACTIVE;
   7427 			sc->sc_tbi_linkup = 1;
   7428 		} else {
   7429 			mii->mii_media_status |= IFM_NONE;
   7430 			sc->sc_tbi_linkup = 0;
   7431 			wm_tbi_serdes_set_linkled(sc);
   7432 			return;
   7433 		}
   7434 		mii->mii_media_active |= IFM_1000_SX;
   7435 		if ((reg & PCS_LSTS_FDX) != 0)
   7436 			mii->mii_media_active |= IFM_FDX;
   7437 		else
   7438 			mii->mii_media_active |= IFM_HDX;
   7439 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7440 			/* Check flow */
   7441 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7442 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7443 				DPRINTF(WM_DEBUG_LINK,
   7444 				    ("XXX LINKOK but not ACOMP\n"));
   7445 				return;
   7446 			}
   7447 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7448 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7449 			DPRINTF(WM_DEBUG_LINK,
   7450 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7451 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7452 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7453 				mii->mii_media_active |= IFM_FLOW
   7454 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7455 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7456 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7457 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7458 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7459 				mii->mii_media_active |= IFM_FLOW
   7460 				    | IFM_ETH_TXPAUSE;
   7461 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7462 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7463 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7464 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7465 				mii->mii_media_active |= IFM_FLOW
   7466 				    | IFM_ETH_RXPAUSE;
   7467 		}
   7468 		/* Update LED */
   7469 		wm_tbi_serdes_set_linkled(sc);
   7470 	} else {
   7471 		DPRINTF(WM_DEBUG_LINK,
   7472 		    ("%s: LINK: Receive sequence error\n",
   7473 		    device_xname(sc->sc_dev)));
   7474 	}
   7475 }
   7476 
   7477 /*
   7478  * wm_linkintr:
   7479  *
   7480  *	Helper; handle link interrupts.
   7481  */
   7482 static void
   7483 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7484 {
   7485 
   7486 	KASSERT(WM_CORE_LOCKED(sc));
   7487 
   7488 	if (sc->sc_flags & WM_F_HAS_MII)
   7489 		wm_linkintr_gmii(sc, icr);
   7490 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7491 	    && (sc->sc_type >= WM_T_82575))
   7492 		wm_linkintr_serdes(sc, icr);
   7493 	else
   7494 		wm_linkintr_tbi(sc, icr);
   7495 }
   7496 
   7497 /*
   7498  * wm_intr_legacy:
   7499  *
   7500  *	Interrupt service routine for INTx and MSI.
   7501  */
   7502 static int
   7503 wm_intr_legacy(void *arg)
   7504 {
   7505 	struct wm_softc *sc = arg;
   7506 	struct wm_txqueue *txq = &sc->sc_txq[0];
   7507 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
   7508 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7509 	uint32_t icr, rndval = 0;
   7510 	int handled = 0;
   7511 
   7512 	DPRINTF(WM_DEBUG_TX,
   7513 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7514 	while (1 /* CONSTCOND */) {
   7515 		icr = CSR_READ(sc, WMREG_ICR);
   7516 		if ((icr & sc->sc_icr) == 0)
   7517 			break;
   7518 		if (rndval == 0)
   7519 			rndval = icr;
   7520 
   7521 		WM_RX_LOCK(rxq);
   7522 
   7523 		if (sc->sc_stopping) {
   7524 			WM_RX_UNLOCK(rxq);
   7525 			break;
   7526 		}
   7527 
   7528 		handled = 1;
   7529 
   7530 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7531 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7532 			DPRINTF(WM_DEBUG_RX,
   7533 			    ("%s: RX: got Rx intr 0x%08x\n",
   7534 			    device_xname(sc->sc_dev),
   7535 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7536 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7537 		}
   7538 #endif
   7539 		wm_rxeof(rxq);
   7540 
   7541 		WM_RX_UNLOCK(rxq);
   7542 		WM_TX_LOCK(txq);
   7543 
   7544 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7545 		if (icr & ICR_TXDW) {
   7546 			DPRINTF(WM_DEBUG_TX,
   7547 			    ("%s: TX: got TXDW interrupt\n",
   7548 			    device_xname(sc->sc_dev)));
   7549 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7550 		}
   7551 #endif
   7552 		wm_txeof(sc);
   7553 
   7554 		WM_TX_UNLOCK(txq);
   7555 		WM_CORE_LOCK(sc);
   7556 
   7557 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7558 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7559 			wm_linkintr(sc, icr);
   7560 		}
   7561 
   7562 		WM_CORE_UNLOCK(sc);
   7563 
   7564 		if (icr & ICR_RXO) {
   7565 #if defined(WM_DEBUG)
   7566 			log(LOG_WARNING, "%s: Receive overrun\n",
   7567 			    device_xname(sc->sc_dev));
   7568 #endif /* defined(WM_DEBUG) */
   7569 		}
   7570 	}
   7571 
   7572 	rnd_add_uint32(&sc->rnd_source, rndval);
   7573 
   7574 	if (handled) {
   7575 		/* Try to get more packets going. */
   7576 		ifp->if_start(ifp);
   7577 	}
   7578 
   7579 	return handled;
   7580 }
   7581 
   7582 /*
   7583  * wm_txintr_msix:
   7584  *
   7585  *	Interrupt service routine for TX complete interrupt for MSI-X.
   7586  */
   7587 static int
   7588 wm_txintr_msix(void *arg)
   7589 {
   7590 	struct wm_txqueue *txq = arg;
   7591 	struct wm_softc *sc = txq->txq_sc;
   7592 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7593 	int handled = 0;
   7594 
   7595 	DPRINTF(WM_DEBUG_TX,
   7596 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7597 
   7598 	if (sc->sc_type == WM_T_82574)
   7599 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id));
   7600 	else if (sc->sc_type == WM_T_82575)
   7601 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
   7602 	else
   7603 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
   7604 
   7605 	WM_TX_LOCK(txq);
   7606 
   7607 	if (sc->sc_stopping)
   7608 		goto out;
   7609 
   7610 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7611 	handled = wm_txeof(sc);
   7612 
   7613 out:
   7614 	WM_TX_UNLOCK(txq);
   7615 
   7616 	if (sc->sc_type == WM_T_82574)
   7617 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id));
   7618 	else if (sc->sc_type == WM_T_82575)
   7619 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
   7620 	else
   7621 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
   7622 
   7623 	if (handled) {
   7624 		/* Try to get more packets going. */
   7625 		ifp->if_start(ifp);
   7626 	}
   7627 
   7628 	return handled;
   7629 }
   7630 
   7631 /*
   7632  * wm_rxintr_msix:
   7633  *
   7634  *	Interrupt service routine for RX interrupt for MSI-X.
   7635  */
   7636 static int
   7637 wm_rxintr_msix(void *arg)
   7638 {
   7639 	struct wm_rxqueue *rxq = arg;
   7640 	struct wm_softc *sc = rxq->rxq_sc;
   7641 
   7642 	DPRINTF(WM_DEBUG_RX,
   7643 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7644 
   7645 	if (sc->sc_type == WM_T_82574)
   7646 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id));
   7647 	else if (sc->sc_type == WM_T_82575)
   7648 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
   7649 	else
   7650 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
   7651 
   7652 	WM_RX_LOCK(rxq);
   7653 
   7654 	if (sc->sc_stopping)
   7655 		goto out;
   7656 
   7657 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7658 	wm_rxeof(rxq);
   7659 
   7660 out:
   7661 	WM_RX_UNLOCK(rxq);
   7662 
   7663 	if (sc->sc_type == WM_T_82574)
   7664 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
   7665 	else if (sc->sc_type == WM_T_82575)
   7666 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
   7667 	else
   7668 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
   7669 
   7670 	return 1;
   7671 }
   7672 
   7673 /*
   7674  * wm_linkintr_msix:
   7675  *
   7676  *	Interrupt service routine for link status change for MSI-X.
   7677  */
   7678 static int
   7679 wm_linkintr_msix(void *arg)
   7680 {
   7681 	struct wm_softc *sc = arg;
   7682 	uint32_t reg;
   7683 
   7684 	DPRINTF(WM_DEBUG_LINK,
   7685 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7686 
   7687 	reg = CSR_READ(sc, WMREG_ICR);
   7688 	WM_CORE_LOCK(sc);
   7689 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7690 		goto out;
   7691 
   7692 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7693 	wm_linkintr(sc, ICR_LSC);
   7694 
   7695 out:
   7696 	WM_CORE_UNLOCK(sc);
   7697 
   7698 	if (sc->sc_type == WM_T_82574)
   7699 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7700 	else if (sc->sc_type == WM_T_82575)
   7701 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7702 	else
   7703 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7704 
   7705 	return 1;
   7706 }
   7707 
   7708 /*
   7709  * Media related.
   7710  * GMII, SGMII, TBI (and SERDES)
   7711  */
   7712 
   7713 /* Common */
   7714 
   7715 /*
   7716  * wm_tbi_serdes_set_linkled:
   7717  *
   7718  *	Update the link LED on TBI and SERDES devices.
   7719  */
   7720 static void
   7721 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7722 {
   7723 
   7724 	if (sc->sc_tbi_linkup)
   7725 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7726 	else
   7727 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7728 
   7729 	/* 82540 or newer devices are active low */
   7730 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7731 
   7732 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7733 }
   7734 
   7735 /* GMII related */
   7736 
   7737 /*
   7738  * wm_gmii_reset:
   7739  *
   7740  *	Reset the PHY.
   7741  */
   7742 static void
   7743 wm_gmii_reset(struct wm_softc *sc)
   7744 {
   7745 	uint32_t reg;
   7746 	int rv;
   7747 
   7748 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7749 		device_xname(sc->sc_dev), __func__));
   7750 	/* get phy semaphore */
   7751 	switch (sc->sc_type) {
   7752 	case WM_T_82571:
   7753 	case WM_T_82572:
   7754 	case WM_T_82573:
   7755 	case WM_T_82574:
   7756 	case WM_T_82583:
   7757 		 /* XXX should get sw semaphore, too */
   7758 		rv = wm_get_swsm_semaphore(sc);
   7759 		break;
   7760 	case WM_T_82575:
   7761 	case WM_T_82576:
   7762 	case WM_T_82580:
   7763 	case WM_T_I350:
   7764 	case WM_T_I354:
   7765 	case WM_T_I210:
   7766 	case WM_T_I211:
   7767 	case WM_T_80003:
   7768 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7769 		break;
   7770 	case WM_T_ICH8:
   7771 	case WM_T_ICH9:
   7772 	case WM_T_ICH10:
   7773 	case WM_T_PCH:
   7774 	case WM_T_PCH2:
   7775 	case WM_T_PCH_LPT:
   7776 	case WM_T_PCH_SPT:
   7777 		rv = wm_get_swfwhw_semaphore(sc);
   7778 		break;
   7779 	default:
   7780 		/* nothing to do*/
   7781 		rv = 0;
   7782 		break;
   7783 	}
   7784 	if (rv != 0) {
   7785 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7786 		    __func__);
   7787 		return;
   7788 	}
   7789 
   7790 	switch (sc->sc_type) {
   7791 	case WM_T_82542_2_0:
   7792 	case WM_T_82542_2_1:
   7793 		/* null */
   7794 		break;
   7795 	case WM_T_82543:
   7796 		/*
   7797 		 * With 82543, we need to force speed and duplex on the MAC
   7798 		 * equal to what the PHY speed and duplex configuration is.
   7799 		 * In addition, we need to perform a hardware reset on the PHY
   7800 		 * to take it out of reset.
   7801 		 */
   7802 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7803 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7804 
   7805 		/* The PHY reset pin is active-low. */
   7806 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7807 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7808 		    CTRL_EXT_SWDPIN(4));
   7809 		reg |= CTRL_EXT_SWDPIO(4);
   7810 
   7811 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7812 		CSR_WRITE_FLUSH(sc);
   7813 		delay(10*1000);
   7814 
   7815 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7816 		CSR_WRITE_FLUSH(sc);
   7817 		delay(150);
   7818 #if 0
   7819 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7820 #endif
   7821 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7822 		break;
   7823 	case WM_T_82544:	/* reset 10000us */
   7824 	case WM_T_82540:
   7825 	case WM_T_82545:
   7826 	case WM_T_82545_3:
   7827 	case WM_T_82546:
   7828 	case WM_T_82546_3:
   7829 	case WM_T_82541:
   7830 	case WM_T_82541_2:
   7831 	case WM_T_82547:
   7832 	case WM_T_82547_2:
   7833 	case WM_T_82571:	/* reset 100us */
   7834 	case WM_T_82572:
   7835 	case WM_T_82573:
   7836 	case WM_T_82574:
   7837 	case WM_T_82575:
   7838 	case WM_T_82576:
   7839 	case WM_T_82580:
   7840 	case WM_T_I350:
   7841 	case WM_T_I354:
   7842 	case WM_T_I210:
   7843 	case WM_T_I211:
   7844 	case WM_T_82583:
   7845 	case WM_T_80003:
   7846 		/* generic reset */
   7847 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7848 		CSR_WRITE_FLUSH(sc);
   7849 		delay(20000);
   7850 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7851 		CSR_WRITE_FLUSH(sc);
   7852 		delay(20000);
   7853 
   7854 		if ((sc->sc_type == WM_T_82541)
   7855 		    || (sc->sc_type == WM_T_82541_2)
   7856 		    || (sc->sc_type == WM_T_82547)
   7857 		    || (sc->sc_type == WM_T_82547_2)) {
   7858 			/* workaround for igp are done in igp_reset() */
   7859 			/* XXX add code to set LED after phy reset */
   7860 		}
   7861 		break;
   7862 	case WM_T_ICH8:
   7863 	case WM_T_ICH9:
   7864 	case WM_T_ICH10:
   7865 	case WM_T_PCH:
   7866 	case WM_T_PCH2:
   7867 	case WM_T_PCH_LPT:
   7868 	case WM_T_PCH_SPT:
   7869 		/* generic reset */
   7870 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7871 		CSR_WRITE_FLUSH(sc);
   7872 		delay(100);
   7873 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7874 		CSR_WRITE_FLUSH(sc);
   7875 		delay(150);
   7876 		break;
   7877 	default:
   7878 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7879 		    __func__);
   7880 		break;
   7881 	}
   7882 
   7883 	/* release PHY semaphore */
   7884 	switch (sc->sc_type) {
   7885 	case WM_T_82571:
   7886 	case WM_T_82572:
   7887 	case WM_T_82573:
   7888 	case WM_T_82574:
   7889 	case WM_T_82583:
   7890 		 /* XXX should put sw semaphore, too */
   7891 		wm_put_swsm_semaphore(sc);
   7892 		break;
   7893 	case WM_T_82575:
   7894 	case WM_T_82576:
   7895 	case WM_T_82580:
   7896 	case WM_T_I350:
   7897 	case WM_T_I354:
   7898 	case WM_T_I210:
   7899 	case WM_T_I211:
   7900 	case WM_T_80003:
   7901 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7902 		break;
   7903 	case WM_T_ICH8:
   7904 	case WM_T_ICH9:
   7905 	case WM_T_ICH10:
   7906 	case WM_T_PCH:
   7907 	case WM_T_PCH2:
   7908 	case WM_T_PCH_LPT:
   7909 	case WM_T_PCH_SPT:
   7910 		wm_put_swfwhw_semaphore(sc);
   7911 		break;
   7912 	default:
   7913 		/* nothing to do */
   7914 		rv = 0;
   7915 		break;
   7916 	}
   7917 
   7918 	/* get_cfg_done */
   7919 	wm_get_cfg_done(sc);
   7920 
   7921 	/* extra setup */
   7922 	switch (sc->sc_type) {
   7923 	case WM_T_82542_2_0:
   7924 	case WM_T_82542_2_1:
   7925 	case WM_T_82543:
   7926 	case WM_T_82544:
   7927 	case WM_T_82540:
   7928 	case WM_T_82545:
   7929 	case WM_T_82545_3:
   7930 	case WM_T_82546:
   7931 	case WM_T_82546_3:
   7932 	case WM_T_82541_2:
   7933 	case WM_T_82547_2:
   7934 	case WM_T_82571:
   7935 	case WM_T_82572:
   7936 	case WM_T_82573:
   7937 	case WM_T_82575:
   7938 	case WM_T_82576:
   7939 	case WM_T_82580:
   7940 	case WM_T_I350:
   7941 	case WM_T_I354:
   7942 	case WM_T_I210:
   7943 	case WM_T_I211:
   7944 	case WM_T_80003:
   7945 		/* null */
   7946 		break;
   7947 	case WM_T_82574:
   7948 	case WM_T_82583:
   7949 		wm_lplu_d0_disable(sc);
   7950 		break;
   7951 	case WM_T_82541:
   7952 	case WM_T_82547:
   7953 		/* XXX Configure actively LED after PHY reset */
   7954 		break;
   7955 	case WM_T_ICH8:
   7956 	case WM_T_ICH9:
   7957 	case WM_T_ICH10:
   7958 	case WM_T_PCH:
   7959 	case WM_T_PCH2:
   7960 	case WM_T_PCH_LPT:
   7961 	case WM_T_PCH_SPT:
   7962 		/* Allow time for h/w to get to a quiescent state afer reset */
   7963 		delay(10*1000);
   7964 
   7965 		if (sc->sc_type == WM_T_PCH)
   7966 			wm_hv_phy_workaround_ich8lan(sc);
   7967 
   7968 		if (sc->sc_type == WM_T_PCH2)
   7969 			wm_lv_phy_workaround_ich8lan(sc);
   7970 
   7971 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7972 			/*
   7973 			 * dummy read to clear the phy wakeup bit after lcd
   7974 			 * reset
   7975 			 */
   7976 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7977 		}
   7978 
   7979 		/*
   7980 		 * XXX Configure the LCD with th extended configuration region
   7981 		 * in NVM
   7982 		 */
   7983 
   7984 		/* Disable D0 LPLU. */
   7985 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   7986 			wm_lplu_d0_disable_pch(sc);
   7987 		else
   7988 			wm_lplu_d0_disable(sc);	/* ICH* */
   7989 		break;
   7990 	default:
   7991 		panic("%s: unknown type\n", __func__);
   7992 		break;
   7993 	}
   7994 }
   7995 
   7996 /*
   7997  * wm_get_phy_id_82575:
   7998  *
   7999  * Return PHY ID. Return -1 if it failed.
   8000  */
   8001 static int
   8002 wm_get_phy_id_82575(struct wm_softc *sc)
   8003 {
   8004 	uint32_t reg;
   8005 	int phyid = -1;
   8006 
   8007 	/* XXX */
   8008 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8009 		return -1;
   8010 
   8011 	if (wm_sgmii_uses_mdio(sc)) {
   8012 		switch (sc->sc_type) {
   8013 		case WM_T_82575:
   8014 		case WM_T_82576:
   8015 			reg = CSR_READ(sc, WMREG_MDIC);
   8016 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8017 			break;
   8018 		case WM_T_82580:
   8019 		case WM_T_I350:
   8020 		case WM_T_I354:
   8021 		case WM_T_I210:
   8022 		case WM_T_I211:
   8023 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8024 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8025 			break;
   8026 		default:
   8027 			return -1;
   8028 		}
   8029 	}
   8030 
   8031 	return phyid;
   8032 }
   8033 
   8034 
   8035 /*
   8036  * wm_gmii_mediainit:
   8037  *
   8038  *	Initialize media for use on 1000BASE-T devices.
   8039  */
   8040 static void
   8041 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8042 {
   8043 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8044 	struct mii_data *mii = &sc->sc_mii;
   8045 	uint32_t reg;
   8046 
   8047 	/* We have GMII. */
   8048 	sc->sc_flags |= WM_F_HAS_MII;
   8049 
   8050 	if (sc->sc_type == WM_T_80003)
   8051 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8052 	else
   8053 		sc->sc_tipg = TIPG_1000T_DFLT;
   8054 
   8055 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8056 	if ((sc->sc_type == WM_T_82580)
   8057 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8058 	    || (sc->sc_type == WM_T_I211)) {
   8059 		reg = CSR_READ(sc, WMREG_PHPM);
   8060 		reg &= ~PHPM_GO_LINK_D;
   8061 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8062 	}
   8063 
   8064 	/*
   8065 	 * Let the chip set speed/duplex on its own based on
   8066 	 * signals from the PHY.
   8067 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8068 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8069 	 */
   8070 	sc->sc_ctrl |= CTRL_SLU;
   8071 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8072 
   8073 	/* Initialize our media structures and probe the GMII. */
   8074 	mii->mii_ifp = ifp;
   8075 
   8076 	/*
   8077 	 * Determine the PHY access method.
   8078 	 *
   8079 	 *  For SGMII, use SGMII specific method.
   8080 	 *
   8081 	 *  For some devices, we can determine the PHY access method
   8082 	 * from sc_type.
   8083 	 *
   8084 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8085 	 * access  method by sc_type, so use the PCI product ID for some
   8086 	 * devices.
   8087 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8088 	 * can't detect, then use bm's method.
   8089 	 */
   8090 	switch (prodid) {
   8091 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8092 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8093 		/* 82577 */
   8094 		sc->sc_phytype = WMPHY_82577;
   8095 		break;
   8096 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8097 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8098 		/* 82578 */
   8099 		sc->sc_phytype = WMPHY_82578;
   8100 		break;
   8101 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8102 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8103 		/* 82579 */
   8104 		sc->sc_phytype = WMPHY_82579;
   8105 		break;
   8106 	case PCI_PRODUCT_INTEL_82801I_BM:
   8107 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8108 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8109 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8110 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8111 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8112 		/* 82567 */
   8113 		sc->sc_phytype = WMPHY_BM;
   8114 		mii->mii_readreg = wm_gmii_bm_readreg;
   8115 		mii->mii_writereg = wm_gmii_bm_writereg;
   8116 		break;
   8117 	default:
   8118 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8119 		    && !wm_sgmii_uses_mdio(sc)){
   8120 			/* SGMII */
   8121 			mii->mii_readreg = wm_sgmii_readreg;
   8122 			mii->mii_writereg = wm_sgmii_writereg;
   8123 		} else if (sc->sc_type >= WM_T_80003) {
   8124 			/* 80003 */
   8125 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8126 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8127 		} else if (sc->sc_type >= WM_T_I210) {
   8128 			/* I210 and I211 */
   8129 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8130 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8131 		} else if (sc->sc_type >= WM_T_82580) {
   8132 			/* 82580, I350 and I354 */
   8133 			sc->sc_phytype = WMPHY_82580;
   8134 			mii->mii_readreg = wm_gmii_82580_readreg;
   8135 			mii->mii_writereg = wm_gmii_82580_writereg;
   8136 		} else if (sc->sc_type >= WM_T_82544) {
   8137 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8138 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8139 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8140 		} else {
   8141 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8142 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8143 		}
   8144 		break;
   8145 	}
   8146 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8147 		/* All PCH* use _hv_ */
   8148 		mii->mii_readreg = wm_gmii_hv_readreg;
   8149 		mii->mii_writereg = wm_gmii_hv_writereg;
   8150 	}
   8151 	mii->mii_statchg = wm_gmii_statchg;
   8152 
   8153 	wm_gmii_reset(sc);
   8154 
   8155 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8156 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8157 	    wm_gmii_mediastatus);
   8158 
   8159 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8160 	    || (sc->sc_type == WM_T_82580)
   8161 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8162 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8163 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8164 			/* Attach only one port */
   8165 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8166 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8167 		} else {
   8168 			int i, id;
   8169 			uint32_t ctrl_ext;
   8170 
   8171 			id = wm_get_phy_id_82575(sc);
   8172 			if (id != -1) {
   8173 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8174 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8175 			}
   8176 			if ((id == -1)
   8177 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8178 				/* Power on sgmii phy if it is disabled */
   8179 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8180 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8181 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8182 				CSR_WRITE_FLUSH(sc);
   8183 				delay(300*1000); /* XXX too long */
   8184 
   8185 				/* from 1 to 8 */
   8186 				for (i = 1; i < 8; i++)
   8187 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8188 					    0xffffffff, i, MII_OFFSET_ANY,
   8189 					    MIIF_DOPAUSE);
   8190 
   8191 				/* restore previous sfp cage power state */
   8192 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8193 			}
   8194 		}
   8195 	} else {
   8196 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8197 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8198 	}
   8199 
   8200 	/*
   8201 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8202 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8203 	 */
   8204 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8205 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8206 		wm_set_mdio_slow_mode_hv(sc);
   8207 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8208 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8209 	}
   8210 
   8211 	/*
   8212 	 * (For ICH8 variants)
   8213 	 * If PHY detection failed, use BM's r/w function and retry.
   8214 	 */
   8215 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8216 		/* if failed, retry with *_bm_* */
   8217 		mii->mii_readreg = wm_gmii_bm_readreg;
   8218 		mii->mii_writereg = wm_gmii_bm_writereg;
   8219 
   8220 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8221 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8222 	}
   8223 
   8224 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8225 		/* Any PHY wasn't find */
   8226 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8227 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8228 		sc->sc_phytype = WMPHY_NONE;
   8229 	} else {
   8230 		/*
   8231 		 * PHY Found!
   8232 		 * Check PHY type.
   8233 		 */
   8234 		uint32_t model;
   8235 		struct mii_softc *child;
   8236 
   8237 		child = LIST_FIRST(&mii->mii_phys);
   8238 		model = child->mii_mpd_model;
   8239 		if (model == MII_MODEL_yyINTEL_I82566)
   8240 			sc->sc_phytype = WMPHY_IGP_3;
   8241 
   8242 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8243 	}
   8244 }
   8245 
   8246 /*
   8247  * wm_gmii_mediachange:	[ifmedia interface function]
   8248  *
   8249  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8250  */
   8251 static int
   8252 wm_gmii_mediachange(struct ifnet *ifp)
   8253 {
   8254 	struct wm_softc *sc = ifp->if_softc;
   8255 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8256 	int rc;
   8257 
   8258 	if ((ifp->if_flags & IFF_UP) == 0)
   8259 		return 0;
   8260 
   8261 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8262 	sc->sc_ctrl |= CTRL_SLU;
   8263 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8264 	    || (sc->sc_type > WM_T_82543)) {
   8265 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8266 	} else {
   8267 		sc->sc_ctrl &= ~CTRL_ASDE;
   8268 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8269 		if (ife->ifm_media & IFM_FDX)
   8270 			sc->sc_ctrl |= CTRL_FD;
   8271 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8272 		case IFM_10_T:
   8273 			sc->sc_ctrl |= CTRL_SPEED_10;
   8274 			break;
   8275 		case IFM_100_TX:
   8276 			sc->sc_ctrl |= CTRL_SPEED_100;
   8277 			break;
   8278 		case IFM_1000_T:
   8279 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8280 			break;
   8281 		default:
   8282 			panic("wm_gmii_mediachange: bad media 0x%x",
   8283 			    ife->ifm_media);
   8284 		}
   8285 	}
   8286 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8287 	if (sc->sc_type <= WM_T_82543)
   8288 		wm_gmii_reset(sc);
   8289 
   8290 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8291 		return 0;
   8292 	return rc;
   8293 }
   8294 
   8295 /*
   8296  * wm_gmii_mediastatus:	[ifmedia interface function]
   8297  *
   8298  *	Get the current interface media status on a 1000BASE-T device.
   8299  */
   8300 static void
   8301 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8302 {
   8303 	struct wm_softc *sc = ifp->if_softc;
   8304 
   8305 	ether_mediastatus(ifp, ifmr);
   8306 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8307 	    | sc->sc_flowflags;
   8308 }
   8309 
   8310 #define	MDI_IO		CTRL_SWDPIN(2)
   8311 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8312 #define	MDI_CLK		CTRL_SWDPIN(3)
   8313 
   8314 static void
   8315 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8316 {
   8317 	uint32_t i, v;
   8318 
   8319 	v = CSR_READ(sc, WMREG_CTRL);
   8320 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8321 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8322 
   8323 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8324 		if (data & i)
   8325 			v |= MDI_IO;
   8326 		else
   8327 			v &= ~MDI_IO;
   8328 		CSR_WRITE(sc, WMREG_CTRL, v);
   8329 		CSR_WRITE_FLUSH(sc);
   8330 		delay(10);
   8331 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8332 		CSR_WRITE_FLUSH(sc);
   8333 		delay(10);
   8334 		CSR_WRITE(sc, WMREG_CTRL, v);
   8335 		CSR_WRITE_FLUSH(sc);
   8336 		delay(10);
   8337 	}
   8338 }
   8339 
   8340 static uint32_t
   8341 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8342 {
   8343 	uint32_t v, i, data = 0;
   8344 
   8345 	v = CSR_READ(sc, WMREG_CTRL);
   8346 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8347 	v |= CTRL_SWDPIO(3);
   8348 
   8349 	CSR_WRITE(sc, WMREG_CTRL, v);
   8350 	CSR_WRITE_FLUSH(sc);
   8351 	delay(10);
   8352 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8353 	CSR_WRITE_FLUSH(sc);
   8354 	delay(10);
   8355 	CSR_WRITE(sc, WMREG_CTRL, v);
   8356 	CSR_WRITE_FLUSH(sc);
   8357 	delay(10);
   8358 
   8359 	for (i = 0; i < 16; i++) {
   8360 		data <<= 1;
   8361 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8362 		CSR_WRITE_FLUSH(sc);
   8363 		delay(10);
   8364 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8365 			data |= 1;
   8366 		CSR_WRITE(sc, WMREG_CTRL, v);
   8367 		CSR_WRITE_FLUSH(sc);
   8368 		delay(10);
   8369 	}
   8370 
   8371 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8372 	CSR_WRITE_FLUSH(sc);
   8373 	delay(10);
   8374 	CSR_WRITE(sc, WMREG_CTRL, v);
   8375 	CSR_WRITE_FLUSH(sc);
   8376 	delay(10);
   8377 
   8378 	return data;
   8379 }
   8380 
   8381 #undef MDI_IO
   8382 #undef MDI_DIR
   8383 #undef MDI_CLK
   8384 
   8385 /*
   8386  * wm_gmii_i82543_readreg:	[mii interface function]
   8387  *
   8388  *	Read a PHY register on the GMII (i82543 version).
   8389  */
   8390 static int
   8391 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8392 {
   8393 	struct wm_softc *sc = device_private(self);
   8394 	int rv;
   8395 
   8396 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8397 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8398 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8399 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8400 
   8401 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8402 	    device_xname(sc->sc_dev), phy, reg, rv));
   8403 
   8404 	return rv;
   8405 }
   8406 
   8407 /*
   8408  * wm_gmii_i82543_writereg:	[mii interface function]
   8409  *
   8410  *	Write a PHY register on the GMII (i82543 version).
   8411  */
   8412 static void
   8413 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8414 {
   8415 	struct wm_softc *sc = device_private(self);
   8416 
   8417 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8418 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8419 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8420 	    (MII_COMMAND_START << 30), 32);
   8421 }
   8422 
   8423 /*
   8424  * wm_gmii_i82544_readreg:	[mii interface function]
   8425  *
   8426  *	Read a PHY register on the GMII.
   8427  */
   8428 static int
   8429 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8430 {
   8431 	struct wm_softc *sc = device_private(self);
   8432 	uint32_t mdic = 0;
   8433 	int i, rv;
   8434 
   8435 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8436 	    MDIC_REGADD(reg));
   8437 
   8438 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8439 		mdic = CSR_READ(sc, WMREG_MDIC);
   8440 		if (mdic & MDIC_READY)
   8441 			break;
   8442 		delay(50);
   8443 	}
   8444 
   8445 	if ((mdic & MDIC_READY) == 0) {
   8446 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8447 		    device_xname(sc->sc_dev), phy, reg);
   8448 		rv = 0;
   8449 	} else if (mdic & MDIC_E) {
   8450 #if 0 /* This is normal if no PHY is present. */
   8451 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8452 		    device_xname(sc->sc_dev), phy, reg);
   8453 #endif
   8454 		rv = 0;
   8455 	} else {
   8456 		rv = MDIC_DATA(mdic);
   8457 		if (rv == 0xffff)
   8458 			rv = 0;
   8459 	}
   8460 
   8461 	return rv;
   8462 }
   8463 
   8464 /*
   8465  * wm_gmii_i82544_writereg:	[mii interface function]
   8466  *
   8467  *	Write a PHY register on the GMII.
   8468  */
   8469 static void
   8470 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8471 {
   8472 	struct wm_softc *sc = device_private(self);
   8473 	uint32_t mdic = 0;
   8474 	int i;
   8475 
   8476 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8477 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8478 
   8479 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8480 		mdic = CSR_READ(sc, WMREG_MDIC);
   8481 		if (mdic & MDIC_READY)
   8482 			break;
   8483 		delay(50);
   8484 	}
   8485 
   8486 	if ((mdic & MDIC_READY) == 0)
   8487 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8488 		    device_xname(sc->sc_dev), phy, reg);
   8489 	else if (mdic & MDIC_E)
   8490 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8491 		    device_xname(sc->sc_dev), phy, reg);
   8492 }
   8493 
   8494 /*
   8495  * wm_gmii_i80003_readreg:	[mii interface function]
   8496  *
   8497  *	Read a PHY register on the kumeran
   8498  * This could be handled by the PHY layer if we didn't have to lock the
   8499  * ressource ...
   8500  */
   8501 static int
   8502 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8503 {
   8504 	struct wm_softc *sc = device_private(self);
   8505 	int sem;
   8506 	int rv;
   8507 
   8508 	if (phy != 1) /* only one PHY on kumeran bus */
   8509 		return 0;
   8510 
   8511 	sem = swfwphysem[sc->sc_funcid];
   8512 	if (wm_get_swfw_semaphore(sc, sem)) {
   8513 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8514 		    __func__);
   8515 		return 0;
   8516 	}
   8517 
   8518 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8519 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8520 		    reg >> GG82563_PAGE_SHIFT);
   8521 	} else {
   8522 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8523 		    reg >> GG82563_PAGE_SHIFT);
   8524 	}
   8525 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8526 	delay(200);
   8527 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8528 	delay(200);
   8529 
   8530 	wm_put_swfw_semaphore(sc, sem);
   8531 	return rv;
   8532 }
   8533 
   8534 /*
   8535  * wm_gmii_i80003_writereg:	[mii interface function]
   8536  *
   8537  *	Write a PHY register on the kumeran.
   8538  * This could be handled by the PHY layer if we didn't have to lock the
   8539  * ressource ...
   8540  */
   8541 static void
   8542 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8543 {
   8544 	struct wm_softc *sc = device_private(self);
   8545 	int sem;
   8546 
   8547 	if (phy != 1) /* only one PHY on kumeran bus */
   8548 		return;
   8549 
   8550 	sem = swfwphysem[sc->sc_funcid];
   8551 	if (wm_get_swfw_semaphore(sc, sem)) {
   8552 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8553 		    __func__);
   8554 		return;
   8555 	}
   8556 
   8557 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8558 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8559 		    reg >> GG82563_PAGE_SHIFT);
   8560 	} else {
   8561 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8562 		    reg >> GG82563_PAGE_SHIFT);
   8563 	}
   8564 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8565 	delay(200);
   8566 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8567 	delay(200);
   8568 
   8569 	wm_put_swfw_semaphore(sc, sem);
   8570 }
   8571 
   8572 /*
   8573  * wm_gmii_bm_readreg:	[mii interface function]
   8574  *
   8575  *	Read a PHY register on the kumeran
   8576  * This could be handled by the PHY layer if we didn't have to lock the
   8577  * ressource ...
   8578  */
   8579 static int
   8580 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8581 {
   8582 	struct wm_softc *sc = device_private(self);
   8583 	int sem;
   8584 	int rv;
   8585 
   8586 	sem = swfwphysem[sc->sc_funcid];
   8587 	if (wm_get_swfw_semaphore(sc, sem)) {
   8588 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8589 		    __func__);
   8590 		return 0;
   8591 	}
   8592 
   8593 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8594 		if (phy == 1)
   8595 			wm_gmii_i82544_writereg(self, phy,
   8596 			    MII_IGPHY_PAGE_SELECT, reg);
   8597 		else
   8598 			wm_gmii_i82544_writereg(self, phy,
   8599 			    GG82563_PHY_PAGE_SELECT,
   8600 			    reg >> GG82563_PAGE_SHIFT);
   8601 	}
   8602 
   8603 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8604 	wm_put_swfw_semaphore(sc, sem);
   8605 	return rv;
   8606 }
   8607 
   8608 /*
   8609  * wm_gmii_bm_writereg:	[mii interface function]
   8610  *
   8611  *	Write a PHY register on the kumeran.
   8612  * This could be handled by the PHY layer if we didn't have to lock the
   8613  * ressource ...
   8614  */
   8615 static void
   8616 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8617 {
   8618 	struct wm_softc *sc = device_private(self);
   8619 	int sem;
   8620 
   8621 	sem = swfwphysem[sc->sc_funcid];
   8622 	if (wm_get_swfw_semaphore(sc, sem)) {
   8623 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8624 		    __func__);
   8625 		return;
   8626 	}
   8627 
   8628 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8629 		if (phy == 1)
   8630 			wm_gmii_i82544_writereg(self, phy,
   8631 			    MII_IGPHY_PAGE_SELECT, reg);
   8632 		else
   8633 			wm_gmii_i82544_writereg(self, phy,
   8634 			    GG82563_PHY_PAGE_SELECT,
   8635 			    reg >> GG82563_PAGE_SHIFT);
   8636 	}
   8637 
   8638 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8639 	wm_put_swfw_semaphore(sc, sem);
   8640 }
   8641 
   8642 static void
   8643 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8644 {
   8645 	struct wm_softc *sc = device_private(self);
   8646 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8647 	uint16_t wuce;
   8648 
   8649 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8650 	if (sc->sc_type == WM_T_PCH) {
   8651 		/* XXX e1000 driver do nothing... why? */
   8652 	}
   8653 
   8654 	/* Set page 769 */
   8655 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8656 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8657 
   8658 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8659 
   8660 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8661 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8662 	    wuce | BM_WUC_ENABLE_BIT);
   8663 
   8664 	/* Select page 800 */
   8665 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8666 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8667 
   8668 	/* Write page 800 */
   8669 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8670 
   8671 	if (rd)
   8672 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8673 	else
   8674 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8675 
   8676 	/* Set page 769 */
   8677 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8678 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8679 
   8680 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8681 }
   8682 
   8683 /*
   8684  * wm_gmii_hv_readreg:	[mii interface function]
   8685  *
   8686  *	Read a PHY register on the kumeran
   8687  * This could be handled by the PHY layer if we didn't have to lock the
   8688  * ressource ...
   8689  */
   8690 static int
   8691 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8692 {
   8693 	struct wm_softc *sc = device_private(self);
   8694 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8695 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8696 	uint16_t val;
   8697 	int rv;
   8698 
   8699 	if (wm_get_swfwhw_semaphore(sc)) {
   8700 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8701 		    __func__);
   8702 		return 0;
   8703 	}
   8704 
   8705 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8706 	if (sc->sc_phytype == WMPHY_82577) {
   8707 		/* XXX must write */
   8708 	}
   8709 
   8710 	/* Page 800 works differently than the rest so it has its own func */
   8711 	if (page == BM_WUC_PAGE) {
   8712 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8713 		return val;
   8714 	}
   8715 
   8716 	/*
   8717 	 * Lower than page 768 works differently than the rest so it has its
   8718 	 * own func
   8719 	 */
   8720 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8721 		printf("gmii_hv_readreg!!!\n");
   8722 		return 0;
   8723 	}
   8724 
   8725 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8726 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8727 		    page << BME1000_PAGE_SHIFT);
   8728 	}
   8729 
   8730 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8731 	wm_put_swfwhw_semaphore(sc);
   8732 	return rv;
   8733 }
   8734 
   8735 /*
   8736  * wm_gmii_hv_writereg:	[mii interface function]
   8737  *
   8738  *	Write a PHY register on the kumeran.
   8739  * This could be handled by the PHY layer if we didn't have to lock the
   8740  * ressource ...
   8741  */
   8742 static void
   8743 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8744 {
   8745 	struct wm_softc *sc = device_private(self);
   8746 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8747 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8748 
   8749 	if (wm_get_swfwhw_semaphore(sc)) {
   8750 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8751 		    __func__);
   8752 		return;
   8753 	}
   8754 
   8755 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8756 
   8757 	/* Page 800 works differently than the rest so it has its own func */
   8758 	if (page == BM_WUC_PAGE) {
   8759 		uint16_t tmp;
   8760 
   8761 		tmp = val;
   8762 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8763 		return;
   8764 	}
   8765 
   8766 	/*
   8767 	 * Lower than page 768 works differently than the rest so it has its
   8768 	 * own func
   8769 	 */
   8770 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8771 		printf("gmii_hv_writereg!!!\n");
   8772 		return;
   8773 	}
   8774 
   8775 	/*
   8776 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8777 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8778 	 */
   8779 
   8780 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8781 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8782 		    page << BME1000_PAGE_SHIFT);
   8783 	}
   8784 
   8785 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8786 	wm_put_swfwhw_semaphore(sc);
   8787 }
   8788 
   8789 /*
   8790  * wm_gmii_82580_readreg:	[mii interface function]
   8791  *
   8792  *	Read a PHY register on the 82580 and I350.
   8793  * This could be handled by the PHY layer if we didn't have to lock the
   8794  * ressource ...
   8795  */
   8796 static int
   8797 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8798 {
   8799 	struct wm_softc *sc = device_private(self);
   8800 	int sem;
   8801 	int rv;
   8802 
   8803 	sem = swfwphysem[sc->sc_funcid];
   8804 	if (wm_get_swfw_semaphore(sc, sem)) {
   8805 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8806 		    __func__);
   8807 		return 0;
   8808 	}
   8809 
   8810 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8811 
   8812 	wm_put_swfw_semaphore(sc, sem);
   8813 	return rv;
   8814 }
   8815 
   8816 /*
   8817  * wm_gmii_82580_writereg:	[mii interface function]
   8818  *
   8819  *	Write a PHY register on the 82580 and I350.
   8820  * This could be handled by the PHY layer if we didn't have to lock the
   8821  * ressource ...
   8822  */
   8823 static void
   8824 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8825 {
   8826 	struct wm_softc *sc = device_private(self);
   8827 	int sem;
   8828 
   8829 	sem = swfwphysem[sc->sc_funcid];
   8830 	if (wm_get_swfw_semaphore(sc, sem)) {
   8831 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8832 		    __func__);
   8833 		return;
   8834 	}
   8835 
   8836 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8837 
   8838 	wm_put_swfw_semaphore(sc, sem);
   8839 }
   8840 
   8841 /*
   8842  * wm_gmii_gs40g_readreg:	[mii interface function]
   8843  *
   8844  *	Read a PHY register on the I2100 and I211.
   8845  * This could be handled by the PHY layer if we didn't have to lock the
   8846  * ressource ...
   8847  */
   8848 static int
   8849 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8850 {
   8851 	struct wm_softc *sc = device_private(self);
   8852 	int sem;
   8853 	int page, offset;
   8854 	int rv;
   8855 
   8856 	/* Acquire semaphore */
   8857 	sem = swfwphysem[sc->sc_funcid];
   8858 	if (wm_get_swfw_semaphore(sc, sem)) {
   8859 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8860 		    __func__);
   8861 		return 0;
   8862 	}
   8863 
   8864 	/* Page select */
   8865 	page = reg >> GS40G_PAGE_SHIFT;
   8866 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8867 
   8868 	/* Read reg */
   8869 	offset = reg & GS40G_OFFSET_MASK;
   8870 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8871 
   8872 	wm_put_swfw_semaphore(sc, sem);
   8873 	return rv;
   8874 }
   8875 
   8876 /*
   8877  * wm_gmii_gs40g_writereg:	[mii interface function]
   8878  *
   8879  *	Write a PHY register on the I210 and I211.
   8880  * This could be handled by the PHY layer if we didn't have to lock the
   8881  * ressource ...
   8882  */
   8883 static void
   8884 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8885 {
   8886 	struct wm_softc *sc = device_private(self);
   8887 	int sem;
   8888 	int page, offset;
   8889 
   8890 	/* Acquire semaphore */
   8891 	sem = swfwphysem[sc->sc_funcid];
   8892 	if (wm_get_swfw_semaphore(sc, sem)) {
   8893 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8894 		    __func__);
   8895 		return;
   8896 	}
   8897 
   8898 	/* Page select */
   8899 	page = reg >> GS40G_PAGE_SHIFT;
   8900 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8901 
   8902 	/* Write reg */
   8903 	offset = reg & GS40G_OFFSET_MASK;
   8904 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8905 
   8906 	/* Release semaphore */
   8907 	wm_put_swfw_semaphore(sc, sem);
   8908 }
   8909 
   8910 /*
   8911  * wm_gmii_statchg:	[mii interface function]
   8912  *
   8913  *	Callback from MII layer when media changes.
   8914  */
   8915 static void
   8916 wm_gmii_statchg(struct ifnet *ifp)
   8917 {
   8918 	struct wm_softc *sc = ifp->if_softc;
   8919 	struct mii_data *mii = &sc->sc_mii;
   8920 
   8921 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8922 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8923 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8924 
   8925 	/*
   8926 	 * Get flow control negotiation result.
   8927 	 */
   8928 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8929 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8930 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8931 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8932 	}
   8933 
   8934 	if (sc->sc_flowflags & IFM_FLOW) {
   8935 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8936 			sc->sc_ctrl |= CTRL_TFCE;
   8937 			sc->sc_fcrtl |= FCRTL_XONE;
   8938 		}
   8939 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8940 			sc->sc_ctrl |= CTRL_RFCE;
   8941 	}
   8942 
   8943 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8944 		DPRINTF(WM_DEBUG_LINK,
   8945 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8946 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8947 	} else {
   8948 		DPRINTF(WM_DEBUG_LINK,
   8949 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8950 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8951 	}
   8952 
   8953 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8954 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8955 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8956 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8957 	if (sc->sc_type == WM_T_80003) {
   8958 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8959 		case IFM_1000_T:
   8960 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8961 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8962 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8963 			break;
   8964 		default:
   8965 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8966 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8967 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8968 			break;
   8969 		}
   8970 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8971 	}
   8972 }
   8973 
   8974 /*
   8975  * wm_kmrn_readreg:
   8976  *
   8977  *	Read a kumeran register
   8978  */
   8979 static int
   8980 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8981 {
   8982 	int rv;
   8983 
   8984 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8985 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8986 			aprint_error_dev(sc->sc_dev,
   8987 			    "%s: failed to get semaphore\n", __func__);
   8988 			return 0;
   8989 		}
   8990 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8991 		if (wm_get_swfwhw_semaphore(sc)) {
   8992 			aprint_error_dev(sc->sc_dev,
   8993 			    "%s: failed to get semaphore\n", __func__);
   8994 			return 0;
   8995 		}
   8996 	}
   8997 
   8998 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8999 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9000 	    KUMCTRLSTA_REN);
   9001 	CSR_WRITE_FLUSH(sc);
   9002 	delay(2);
   9003 
   9004 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9005 
   9006 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9007 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9008 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9009 		wm_put_swfwhw_semaphore(sc);
   9010 
   9011 	return rv;
   9012 }
   9013 
   9014 /*
   9015  * wm_kmrn_writereg:
   9016  *
   9017  *	Write a kumeran register
   9018  */
   9019 static void
   9020 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9021 {
   9022 
   9023 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   9024 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   9025 			aprint_error_dev(sc->sc_dev,
   9026 			    "%s: failed to get semaphore\n", __func__);
   9027 			return;
   9028 		}
   9029 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   9030 		if (wm_get_swfwhw_semaphore(sc)) {
   9031 			aprint_error_dev(sc->sc_dev,
   9032 			    "%s: failed to get semaphore\n", __func__);
   9033 			return;
   9034 		}
   9035 	}
   9036 
   9037 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9038 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9039 	    (val & KUMCTRLSTA_MASK));
   9040 
   9041 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   9042 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9043 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   9044 		wm_put_swfwhw_semaphore(sc);
   9045 }
   9046 
   9047 /* SGMII related */
   9048 
   9049 /*
   9050  * wm_sgmii_uses_mdio
   9051  *
   9052  * Check whether the transaction is to the internal PHY or the external
   9053  * MDIO interface. Return true if it's MDIO.
   9054  */
   9055 static bool
   9056 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9057 {
   9058 	uint32_t reg;
   9059 	bool ismdio = false;
   9060 
   9061 	switch (sc->sc_type) {
   9062 	case WM_T_82575:
   9063 	case WM_T_82576:
   9064 		reg = CSR_READ(sc, WMREG_MDIC);
   9065 		ismdio = ((reg & MDIC_DEST) != 0);
   9066 		break;
   9067 	case WM_T_82580:
   9068 	case WM_T_I350:
   9069 	case WM_T_I354:
   9070 	case WM_T_I210:
   9071 	case WM_T_I211:
   9072 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9073 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9074 		break;
   9075 	default:
   9076 		break;
   9077 	}
   9078 
   9079 	return ismdio;
   9080 }
   9081 
   9082 /*
   9083  * wm_sgmii_readreg:	[mii interface function]
   9084  *
   9085  *	Read a PHY register on the SGMII
   9086  * This could be handled by the PHY layer if we didn't have to lock the
   9087  * ressource ...
   9088  */
   9089 static int
   9090 wm_sgmii_readreg(device_t self, int phy, int reg)
   9091 {
   9092 	struct wm_softc *sc = device_private(self);
   9093 	uint32_t i2ccmd;
   9094 	int i, rv;
   9095 
   9096 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9097 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9098 		    __func__);
   9099 		return 0;
   9100 	}
   9101 
   9102 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9103 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9104 	    | I2CCMD_OPCODE_READ;
   9105 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9106 
   9107 	/* Poll the ready bit */
   9108 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9109 		delay(50);
   9110 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9111 		if (i2ccmd & I2CCMD_READY)
   9112 			break;
   9113 	}
   9114 	if ((i2ccmd & I2CCMD_READY) == 0)
   9115 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9116 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9117 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9118 
   9119 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9120 
   9121 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   9122 	return rv;
   9123 }
   9124 
   9125 /*
   9126  * wm_sgmii_writereg:	[mii interface function]
   9127  *
   9128  *	Write a PHY register on the SGMII.
   9129  * This could be handled by the PHY layer if we didn't have to lock the
   9130  * ressource ...
   9131  */
   9132 static void
   9133 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9134 {
   9135 	struct wm_softc *sc = device_private(self);
   9136 	uint32_t i2ccmd;
   9137 	int i;
   9138 	int val_swapped;
   9139 
   9140 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   9141 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9142 		    __func__);
   9143 		return;
   9144 	}
   9145 	/* Swap the data bytes for the I2C interface */
   9146 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9147 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9148 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9149 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9150 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9151 
   9152 	/* Poll the ready bit */
   9153 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9154 		delay(50);
   9155 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9156 		if (i2ccmd & I2CCMD_READY)
   9157 			break;
   9158 	}
   9159 	if ((i2ccmd & I2CCMD_READY) == 0)
   9160 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9161 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9162 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9163 
   9164 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9165 }
   9166 
   9167 /* TBI related */
   9168 
   9169 /*
   9170  * wm_tbi_mediainit:
   9171  *
   9172  *	Initialize media for use on 1000BASE-X devices.
   9173  */
   9174 static void
   9175 wm_tbi_mediainit(struct wm_softc *sc)
   9176 {
   9177 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9178 	const char *sep = "";
   9179 
   9180 	if (sc->sc_type < WM_T_82543)
   9181 		sc->sc_tipg = TIPG_WM_DFLT;
   9182 	else
   9183 		sc->sc_tipg = TIPG_LG_DFLT;
   9184 
   9185 	sc->sc_tbi_serdes_anegticks = 5;
   9186 
   9187 	/* Initialize our media structures */
   9188 	sc->sc_mii.mii_ifp = ifp;
   9189 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9190 
   9191 	if ((sc->sc_type >= WM_T_82575)
   9192 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9193 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9194 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9195 	else
   9196 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9197 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9198 
   9199 	/*
   9200 	 * SWD Pins:
   9201 	 *
   9202 	 *	0 = Link LED (output)
   9203 	 *	1 = Loss Of Signal (input)
   9204 	 */
   9205 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9206 
   9207 	/* XXX Perhaps this is only for TBI */
   9208 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9209 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9210 
   9211 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9212 		sc->sc_ctrl &= ~CTRL_LRST;
   9213 
   9214 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9215 
   9216 #define	ADD(ss, mm, dd)							\
   9217 do {									\
   9218 	aprint_normal("%s%s", sep, ss);					\
   9219 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9220 	sep = ", ";							\
   9221 } while (/*CONSTCOND*/0)
   9222 
   9223 	aprint_normal_dev(sc->sc_dev, "");
   9224 
   9225 	/* Only 82545 is LX */
   9226 	if (sc->sc_type == WM_T_82545) {
   9227 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9228 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9229 	} else {
   9230 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9231 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9232 	}
   9233 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9234 	aprint_normal("\n");
   9235 
   9236 #undef ADD
   9237 
   9238 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9239 }
   9240 
   9241 /*
   9242  * wm_tbi_mediachange:	[ifmedia interface function]
   9243  *
   9244  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9245  */
   9246 static int
   9247 wm_tbi_mediachange(struct ifnet *ifp)
   9248 {
   9249 	struct wm_softc *sc = ifp->if_softc;
   9250 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9251 	uint32_t status;
   9252 	int i;
   9253 
   9254 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9255 		/* XXX need some work for >= 82571 and < 82575 */
   9256 		if (sc->sc_type < WM_T_82575)
   9257 			return 0;
   9258 	}
   9259 
   9260 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9261 	    || (sc->sc_type >= WM_T_82575))
   9262 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9263 
   9264 	sc->sc_ctrl &= ~CTRL_LRST;
   9265 	sc->sc_txcw = TXCW_ANE;
   9266 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9267 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9268 	else if (ife->ifm_media & IFM_FDX)
   9269 		sc->sc_txcw |= TXCW_FD;
   9270 	else
   9271 		sc->sc_txcw |= TXCW_HD;
   9272 
   9273 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9274 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9275 
   9276 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9277 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9278 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9279 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9280 	CSR_WRITE_FLUSH(sc);
   9281 	delay(1000);
   9282 
   9283 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9284 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9285 
   9286 	/*
   9287 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9288 	 * optics detect a signal, 0 if they don't.
   9289 	 */
   9290 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9291 		/* Have signal; wait for the link to come up. */
   9292 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9293 			delay(10000);
   9294 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9295 				break;
   9296 		}
   9297 
   9298 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9299 			    device_xname(sc->sc_dev),i));
   9300 
   9301 		status = CSR_READ(sc, WMREG_STATUS);
   9302 		DPRINTF(WM_DEBUG_LINK,
   9303 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9304 			device_xname(sc->sc_dev),status, STATUS_LU));
   9305 		if (status & STATUS_LU) {
   9306 			/* Link is up. */
   9307 			DPRINTF(WM_DEBUG_LINK,
   9308 			    ("%s: LINK: set media -> link up %s\n",
   9309 			    device_xname(sc->sc_dev),
   9310 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9311 
   9312 			/*
   9313 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9314 			 * so we should update sc->sc_ctrl
   9315 			 */
   9316 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9317 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9318 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9319 			if (status & STATUS_FD)
   9320 				sc->sc_tctl |=
   9321 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9322 			else
   9323 				sc->sc_tctl |=
   9324 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9325 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9326 				sc->sc_fcrtl |= FCRTL_XONE;
   9327 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9328 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9329 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9330 				      sc->sc_fcrtl);
   9331 			sc->sc_tbi_linkup = 1;
   9332 		} else {
   9333 			if (i == WM_LINKUP_TIMEOUT)
   9334 				wm_check_for_link(sc);
   9335 			/* Link is down. */
   9336 			DPRINTF(WM_DEBUG_LINK,
   9337 			    ("%s: LINK: set media -> link down\n",
   9338 			    device_xname(sc->sc_dev)));
   9339 			sc->sc_tbi_linkup = 0;
   9340 		}
   9341 	} else {
   9342 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9343 		    device_xname(sc->sc_dev)));
   9344 		sc->sc_tbi_linkup = 0;
   9345 	}
   9346 
   9347 	wm_tbi_serdes_set_linkled(sc);
   9348 
   9349 	return 0;
   9350 }
   9351 
   9352 /*
   9353  * wm_tbi_mediastatus:	[ifmedia interface function]
   9354  *
   9355  *	Get the current interface media status on a 1000BASE-X device.
   9356  */
   9357 static void
   9358 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9359 {
   9360 	struct wm_softc *sc = ifp->if_softc;
   9361 	uint32_t ctrl, status;
   9362 
   9363 	ifmr->ifm_status = IFM_AVALID;
   9364 	ifmr->ifm_active = IFM_ETHER;
   9365 
   9366 	status = CSR_READ(sc, WMREG_STATUS);
   9367 	if ((status & STATUS_LU) == 0) {
   9368 		ifmr->ifm_active |= IFM_NONE;
   9369 		return;
   9370 	}
   9371 
   9372 	ifmr->ifm_status |= IFM_ACTIVE;
   9373 	/* Only 82545 is LX */
   9374 	if (sc->sc_type == WM_T_82545)
   9375 		ifmr->ifm_active |= IFM_1000_LX;
   9376 	else
   9377 		ifmr->ifm_active |= IFM_1000_SX;
   9378 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9379 		ifmr->ifm_active |= IFM_FDX;
   9380 	else
   9381 		ifmr->ifm_active |= IFM_HDX;
   9382 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9383 	if (ctrl & CTRL_RFCE)
   9384 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9385 	if (ctrl & CTRL_TFCE)
   9386 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9387 }
   9388 
   9389 /* XXX TBI only */
   9390 static int
   9391 wm_check_for_link(struct wm_softc *sc)
   9392 {
   9393 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9394 	uint32_t rxcw;
   9395 	uint32_t ctrl;
   9396 	uint32_t status;
   9397 	uint32_t sig;
   9398 
   9399 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9400 		/* XXX need some work for >= 82571 */
   9401 		if (sc->sc_type >= WM_T_82571) {
   9402 			sc->sc_tbi_linkup = 1;
   9403 			return 0;
   9404 		}
   9405 	}
   9406 
   9407 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9408 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9409 	status = CSR_READ(sc, WMREG_STATUS);
   9410 
   9411 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9412 
   9413 	DPRINTF(WM_DEBUG_LINK,
   9414 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9415 		device_xname(sc->sc_dev), __func__,
   9416 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9417 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9418 
   9419 	/*
   9420 	 * SWDPIN   LU RXCW
   9421 	 *      0    0    0
   9422 	 *      0    0    1	(should not happen)
   9423 	 *      0    1    0	(should not happen)
   9424 	 *      0    1    1	(should not happen)
   9425 	 *      1    0    0	Disable autonego and force linkup
   9426 	 *      1    0    1	got /C/ but not linkup yet
   9427 	 *      1    1    0	(linkup)
   9428 	 *      1    1    1	If IFM_AUTO, back to autonego
   9429 	 *
   9430 	 */
   9431 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9432 	    && ((status & STATUS_LU) == 0)
   9433 	    && ((rxcw & RXCW_C) == 0)) {
   9434 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9435 			__func__));
   9436 		sc->sc_tbi_linkup = 0;
   9437 		/* Disable auto-negotiation in the TXCW register */
   9438 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9439 
   9440 		/*
   9441 		 * Force link-up and also force full-duplex.
   9442 		 *
   9443 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9444 		 * so we should update sc->sc_ctrl
   9445 		 */
   9446 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9447 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9448 	} else if (((status & STATUS_LU) != 0)
   9449 	    && ((rxcw & RXCW_C) != 0)
   9450 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9451 		sc->sc_tbi_linkup = 1;
   9452 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9453 			__func__));
   9454 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9455 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9456 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9457 	    && ((rxcw & RXCW_C) != 0)) {
   9458 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9459 	} else {
   9460 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9461 			status));
   9462 	}
   9463 
   9464 	return 0;
   9465 }
   9466 
   9467 /*
   9468  * wm_tbi_tick:
   9469  *
   9470  *	Check the link on TBI devices.
   9471  *	This function acts as mii_tick().
   9472  */
   9473 static void
   9474 wm_tbi_tick(struct wm_softc *sc)
   9475 {
   9476 	struct mii_data *mii = &sc->sc_mii;
   9477 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9478 	uint32_t status;
   9479 
   9480 	KASSERT(WM_CORE_LOCKED(sc));
   9481 
   9482 	status = CSR_READ(sc, WMREG_STATUS);
   9483 
   9484 	/* XXX is this needed? */
   9485 	(void)CSR_READ(sc, WMREG_RXCW);
   9486 	(void)CSR_READ(sc, WMREG_CTRL);
   9487 
   9488 	/* set link status */
   9489 	if ((status & STATUS_LU) == 0) {
   9490 		DPRINTF(WM_DEBUG_LINK,
   9491 		    ("%s: LINK: checklink -> down\n",
   9492 			device_xname(sc->sc_dev)));
   9493 		sc->sc_tbi_linkup = 0;
   9494 	} else if (sc->sc_tbi_linkup == 0) {
   9495 		DPRINTF(WM_DEBUG_LINK,
   9496 		    ("%s: LINK: checklink -> up %s\n",
   9497 			device_xname(sc->sc_dev),
   9498 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9499 		sc->sc_tbi_linkup = 1;
   9500 		sc->sc_tbi_serdes_ticks = 0;
   9501 	}
   9502 
   9503 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9504 		goto setled;
   9505 
   9506 	if ((status & STATUS_LU) == 0) {
   9507 		sc->sc_tbi_linkup = 0;
   9508 		/* If the timer expired, retry autonegotiation */
   9509 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9510 		    && (++sc->sc_tbi_serdes_ticks
   9511 			>= sc->sc_tbi_serdes_anegticks)) {
   9512 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9513 			sc->sc_tbi_serdes_ticks = 0;
   9514 			/*
   9515 			 * Reset the link, and let autonegotiation do
   9516 			 * its thing
   9517 			 */
   9518 			sc->sc_ctrl |= CTRL_LRST;
   9519 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9520 			CSR_WRITE_FLUSH(sc);
   9521 			delay(1000);
   9522 			sc->sc_ctrl &= ~CTRL_LRST;
   9523 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9524 			CSR_WRITE_FLUSH(sc);
   9525 			delay(1000);
   9526 			CSR_WRITE(sc, WMREG_TXCW,
   9527 			    sc->sc_txcw & ~TXCW_ANE);
   9528 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9529 		}
   9530 	}
   9531 
   9532 setled:
   9533 	wm_tbi_serdes_set_linkled(sc);
   9534 }
   9535 
   9536 /* SERDES related */
   9537 static void
   9538 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9539 {
   9540 	uint32_t reg;
   9541 
   9542 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9543 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9544 		return;
   9545 
   9546 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9547 	reg |= PCS_CFG_PCS_EN;
   9548 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9549 
   9550 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9551 	reg &= ~CTRL_EXT_SWDPIN(3);
   9552 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9553 	CSR_WRITE_FLUSH(sc);
   9554 }
   9555 
   9556 static int
   9557 wm_serdes_mediachange(struct ifnet *ifp)
   9558 {
   9559 	struct wm_softc *sc = ifp->if_softc;
   9560 	bool pcs_autoneg = true; /* XXX */
   9561 	uint32_t ctrl_ext, pcs_lctl, reg;
   9562 
   9563 	/* XXX Currently, this function is not called on 8257[12] */
   9564 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9565 	    || (sc->sc_type >= WM_T_82575))
   9566 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9567 
   9568 	wm_serdes_power_up_link_82575(sc);
   9569 
   9570 	sc->sc_ctrl |= CTRL_SLU;
   9571 
   9572 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9573 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9574 
   9575 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9576 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9577 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9578 	case CTRL_EXT_LINK_MODE_SGMII:
   9579 		pcs_autoneg = true;
   9580 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9581 		break;
   9582 	case CTRL_EXT_LINK_MODE_1000KX:
   9583 		pcs_autoneg = false;
   9584 		/* FALLTHROUGH */
   9585 	default:
   9586 		if ((sc->sc_type == WM_T_82575)
   9587 		    || (sc->sc_type == WM_T_82576)) {
   9588 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9589 				pcs_autoneg = false;
   9590 		}
   9591 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9592 		    | CTRL_FRCFDX;
   9593 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9594 	}
   9595 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9596 
   9597 	if (pcs_autoneg) {
   9598 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9599 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9600 
   9601 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9602 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9603 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9604 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9605 	} else
   9606 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9607 
   9608 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9609 
   9610 
   9611 	return 0;
   9612 }
   9613 
   9614 static void
   9615 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9616 {
   9617 	struct wm_softc *sc = ifp->if_softc;
   9618 	struct mii_data *mii = &sc->sc_mii;
   9619 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9620 	uint32_t pcs_adv, pcs_lpab, reg;
   9621 
   9622 	ifmr->ifm_status = IFM_AVALID;
   9623 	ifmr->ifm_active = IFM_ETHER;
   9624 
   9625 	/* Check PCS */
   9626 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9627 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9628 		ifmr->ifm_active |= IFM_NONE;
   9629 		sc->sc_tbi_linkup = 0;
   9630 		goto setled;
   9631 	}
   9632 
   9633 	sc->sc_tbi_linkup = 1;
   9634 	ifmr->ifm_status |= IFM_ACTIVE;
   9635 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9636 	if ((reg & PCS_LSTS_FDX) != 0)
   9637 		ifmr->ifm_active |= IFM_FDX;
   9638 	else
   9639 		ifmr->ifm_active |= IFM_HDX;
   9640 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9641 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9642 		/* Check flow */
   9643 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9644 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9645 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9646 			goto setled;
   9647 		}
   9648 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9649 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9650 		DPRINTF(WM_DEBUG_LINK,
   9651 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9652 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9653 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9654 			mii->mii_media_active |= IFM_FLOW
   9655 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9656 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9657 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9658 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9659 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9660 			mii->mii_media_active |= IFM_FLOW
   9661 			    | IFM_ETH_TXPAUSE;
   9662 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9663 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9664 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9665 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9666 			mii->mii_media_active |= IFM_FLOW
   9667 			    | IFM_ETH_RXPAUSE;
   9668 		} else {
   9669 		}
   9670 	}
   9671 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9672 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9673 setled:
   9674 	wm_tbi_serdes_set_linkled(sc);
   9675 }
   9676 
   9677 /*
   9678  * wm_serdes_tick:
   9679  *
   9680  *	Check the link on serdes devices.
   9681  */
   9682 static void
   9683 wm_serdes_tick(struct wm_softc *sc)
   9684 {
   9685 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9686 	struct mii_data *mii = &sc->sc_mii;
   9687 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9688 	uint32_t reg;
   9689 
   9690 	KASSERT(WM_CORE_LOCKED(sc));
   9691 
   9692 	mii->mii_media_status = IFM_AVALID;
   9693 	mii->mii_media_active = IFM_ETHER;
   9694 
   9695 	/* Check PCS */
   9696 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9697 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9698 		mii->mii_media_status |= IFM_ACTIVE;
   9699 		sc->sc_tbi_linkup = 1;
   9700 		sc->sc_tbi_serdes_ticks = 0;
   9701 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9702 		if ((reg & PCS_LSTS_FDX) != 0)
   9703 			mii->mii_media_active |= IFM_FDX;
   9704 		else
   9705 			mii->mii_media_active |= IFM_HDX;
   9706 	} else {
   9707 		mii->mii_media_status |= IFM_NONE;
   9708 		sc->sc_tbi_linkup = 0;
   9709 		    /* If the timer expired, retry autonegotiation */
   9710 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9711 		    && (++sc->sc_tbi_serdes_ticks
   9712 			>= sc->sc_tbi_serdes_anegticks)) {
   9713 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9714 			sc->sc_tbi_serdes_ticks = 0;
   9715 			/* XXX */
   9716 			wm_serdes_mediachange(ifp);
   9717 		}
   9718 	}
   9719 
   9720 	wm_tbi_serdes_set_linkled(sc);
   9721 }
   9722 
   9723 /* SFP related */
   9724 
   9725 static int
   9726 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9727 {
   9728 	uint32_t i2ccmd;
   9729 	int i;
   9730 
   9731 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9732 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9733 
   9734 	/* Poll the ready bit */
   9735 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9736 		delay(50);
   9737 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9738 		if (i2ccmd & I2CCMD_READY)
   9739 			break;
   9740 	}
   9741 	if ((i2ccmd & I2CCMD_READY) == 0)
   9742 		return -1;
   9743 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9744 		return -1;
   9745 
   9746 	*data = i2ccmd & 0x00ff;
   9747 
   9748 	return 0;
   9749 }
   9750 
   9751 static uint32_t
   9752 wm_sfp_get_media_type(struct wm_softc *sc)
   9753 {
   9754 	uint32_t ctrl_ext;
   9755 	uint8_t val = 0;
   9756 	int timeout = 3;
   9757 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9758 	int rv = -1;
   9759 
   9760 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9761 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9762 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9763 	CSR_WRITE_FLUSH(sc);
   9764 
   9765 	/* Read SFP module data */
   9766 	while (timeout) {
   9767 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9768 		if (rv == 0)
   9769 			break;
   9770 		delay(100*1000); /* XXX too big */
   9771 		timeout--;
   9772 	}
   9773 	if (rv != 0)
   9774 		goto out;
   9775 	switch (val) {
   9776 	case SFF_SFP_ID_SFF:
   9777 		aprint_normal_dev(sc->sc_dev,
   9778 		    "Module/Connector soldered to board\n");
   9779 		break;
   9780 	case SFF_SFP_ID_SFP:
   9781 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9782 		break;
   9783 	case SFF_SFP_ID_UNKNOWN:
   9784 		goto out;
   9785 	default:
   9786 		break;
   9787 	}
   9788 
   9789 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9790 	if (rv != 0) {
   9791 		goto out;
   9792 	}
   9793 
   9794 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9795 		mediatype = WM_MEDIATYPE_SERDES;
   9796 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9797 		sc->sc_flags |= WM_F_SGMII;
   9798 		mediatype = WM_MEDIATYPE_COPPER;
   9799 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9800 		sc->sc_flags |= WM_F_SGMII;
   9801 		mediatype = WM_MEDIATYPE_SERDES;
   9802 	}
   9803 
   9804 out:
   9805 	/* Restore I2C interface setting */
   9806 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9807 
   9808 	return mediatype;
   9809 }
   9810 /*
   9811  * NVM related.
   9812  * Microwire, SPI (w/wo EERD) and Flash.
   9813  */
   9814 
   9815 /* Both spi and uwire */
   9816 
   9817 /*
   9818  * wm_eeprom_sendbits:
   9819  *
   9820  *	Send a series of bits to the EEPROM.
   9821  */
   9822 static void
   9823 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9824 {
   9825 	uint32_t reg;
   9826 	int x;
   9827 
   9828 	reg = CSR_READ(sc, WMREG_EECD);
   9829 
   9830 	for (x = nbits; x > 0; x--) {
   9831 		if (bits & (1U << (x - 1)))
   9832 			reg |= EECD_DI;
   9833 		else
   9834 			reg &= ~EECD_DI;
   9835 		CSR_WRITE(sc, WMREG_EECD, reg);
   9836 		CSR_WRITE_FLUSH(sc);
   9837 		delay(2);
   9838 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9839 		CSR_WRITE_FLUSH(sc);
   9840 		delay(2);
   9841 		CSR_WRITE(sc, WMREG_EECD, reg);
   9842 		CSR_WRITE_FLUSH(sc);
   9843 		delay(2);
   9844 	}
   9845 }
   9846 
   9847 /*
   9848  * wm_eeprom_recvbits:
   9849  *
   9850  *	Receive a series of bits from the EEPROM.
   9851  */
   9852 static void
   9853 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9854 {
   9855 	uint32_t reg, val;
   9856 	int x;
   9857 
   9858 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9859 
   9860 	val = 0;
   9861 	for (x = nbits; x > 0; x--) {
   9862 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9863 		CSR_WRITE_FLUSH(sc);
   9864 		delay(2);
   9865 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9866 			val |= (1U << (x - 1));
   9867 		CSR_WRITE(sc, WMREG_EECD, reg);
   9868 		CSR_WRITE_FLUSH(sc);
   9869 		delay(2);
   9870 	}
   9871 	*valp = val;
   9872 }
   9873 
   9874 /* Microwire */
   9875 
   9876 /*
   9877  * wm_nvm_read_uwire:
   9878  *
   9879  *	Read a word from the EEPROM using the MicroWire protocol.
   9880  */
   9881 static int
   9882 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9883 {
   9884 	uint32_t reg, val;
   9885 	int i;
   9886 
   9887 	for (i = 0; i < wordcnt; i++) {
   9888 		/* Clear SK and DI. */
   9889 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9890 		CSR_WRITE(sc, WMREG_EECD, reg);
   9891 
   9892 		/*
   9893 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9894 		 * and Xen.
   9895 		 *
   9896 		 * We use this workaround only for 82540 because qemu's
   9897 		 * e1000 act as 82540.
   9898 		 */
   9899 		if (sc->sc_type == WM_T_82540) {
   9900 			reg |= EECD_SK;
   9901 			CSR_WRITE(sc, WMREG_EECD, reg);
   9902 			reg &= ~EECD_SK;
   9903 			CSR_WRITE(sc, WMREG_EECD, reg);
   9904 			CSR_WRITE_FLUSH(sc);
   9905 			delay(2);
   9906 		}
   9907 		/* XXX: end of workaround */
   9908 
   9909 		/* Set CHIP SELECT. */
   9910 		reg |= EECD_CS;
   9911 		CSR_WRITE(sc, WMREG_EECD, reg);
   9912 		CSR_WRITE_FLUSH(sc);
   9913 		delay(2);
   9914 
   9915 		/* Shift in the READ command. */
   9916 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9917 
   9918 		/* Shift in address. */
   9919 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9920 
   9921 		/* Shift out the data. */
   9922 		wm_eeprom_recvbits(sc, &val, 16);
   9923 		data[i] = val & 0xffff;
   9924 
   9925 		/* Clear CHIP SELECT. */
   9926 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9927 		CSR_WRITE(sc, WMREG_EECD, reg);
   9928 		CSR_WRITE_FLUSH(sc);
   9929 		delay(2);
   9930 	}
   9931 
   9932 	return 0;
   9933 }
   9934 
   9935 /* SPI */
   9936 
   9937 /*
   9938  * Set SPI and FLASH related information from the EECD register.
   9939  * For 82541 and 82547, the word size is taken from EEPROM.
   9940  */
   9941 static int
   9942 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9943 {
   9944 	int size;
   9945 	uint32_t reg;
   9946 	uint16_t data;
   9947 
   9948 	reg = CSR_READ(sc, WMREG_EECD);
   9949 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9950 
   9951 	/* Read the size of NVM from EECD by default */
   9952 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9953 	switch (sc->sc_type) {
   9954 	case WM_T_82541:
   9955 	case WM_T_82541_2:
   9956 	case WM_T_82547:
   9957 	case WM_T_82547_2:
   9958 		/* Set dummy value to access EEPROM */
   9959 		sc->sc_nvm_wordsize = 64;
   9960 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9961 		reg = data;
   9962 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9963 		if (size == 0)
   9964 			size = 6; /* 64 word size */
   9965 		else
   9966 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9967 		break;
   9968 	case WM_T_80003:
   9969 	case WM_T_82571:
   9970 	case WM_T_82572:
   9971 	case WM_T_82573: /* SPI case */
   9972 	case WM_T_82574: /* SPI case */
   9973 	case WM_T_82583: /* SPI case */
   9974 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9975 		if (size > 14)
   9976 			size = 14;
   9977 		break;
   9978 	case WM_T_82575:
   9979 	case WM_T_82576:
   9980 	case WM_T_82580:
   9981 	case WM_T_I350:
   9982 	case WM_T_I354:
   9983 	case WM_T_I210:
   9984 	case WM_T_I211:
   9985 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9986 		if (size > 15)
   9987 			size = 15;
   9988 		break;
   9989 	default:
   9990 		aprint_error_dev(sc->sc_dev,
   9991 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9992 		return -1;
   9993 		break;
   9994 	}
   9995 
   9996 	sc->sc_nvm_wordsize = 1 << size;
   9997 
   9998 	return 0;
   9999 }
   10000 
   10001 /*
   10002  * wm_nvm_ready_spi:
   10003  *
   10004  *	Wait for a SPI EEPROM to be ready for commands.
   10005  */
   10006 static int
   10007 wm_nvm_ready_spi(struct wm_softc *sc)
   10008 {
   10009 	uint32_t val;
   10010 	int usec;
   10011 
   10012 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10013 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10014 		wm_eeprom_recvbits(sc, &val, 8);
   10015 		if ((val & SPI_SR_RDY) == 0)
   10016 			break;
   10017 	}
   10018 	if (usec >= SPI_MAX_RETRIES) {
   10019 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10020 		return 1;
   10021 	}
   10022 	return 0;
   10023 }
   10024 
   10025 /*
   10026  * wm_nvm_read_spi:
   10027  *
   10028  *	Read a work from the EEPROM using the SPI protocol.
   10029  */
   10030 static int
   10031 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10032 {
   10033 	uint32_t reg, val;
   10034 	int i;
   10035 	uint8_t opc;
   10036 
   10037 	/* Clear SK and CS. */
   10038 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10039 	CSR_WRITE(sc, WMREG_EECD, reg);
   10040 	CSR_WRITE_FLUSH(sc);
   10041 	delay(2);
   10042 
   10043 	if (wm_nvm_ready_spi(sc))
   10044 		return 1;
   10045 
   10046 	/* Toggle CS to flush commands. */
   10047 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10048 	CSR_WRITE_FLUSH(sc);
   10049 	delay(2);
   10050 	CSR_WRITE(sc, WMREG_EECD, reg);
   10051 	CSR_WRITE_FLUSH(sc);
   10052 	delay(2);
   10053 
   10054 	opc = SPI_OPC_READ;
   10055 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10056 		opc |= SPI_OPC_A8;
   10057 
   10058 	wm_eeprom_sendbits(sc, opc, 8);
   10059 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10060 
   10061 	for (i = 0; i < wordcnt; i++) {
   10062 		wm_eeprom_recvbits(sc, &val, 16);
   10063 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10064 	}
   10065 
   10066 	/* Raise CS and clear SK. */
   10067 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10068 	CSR_WRITE(sc, WMREG_EECD, reg);
   10069 	CSR_WRITE_FLUSH(sc);
   10070 	delay(2);
   10071 
   10072 	return 0;
   10073 }
   10074 
   10075 /* Using with EERD */
   10076 
   10077 static int
   10078 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10079 {
   10080 	uint32_t attempts = 100000;
   10081 	uint32_t i, reg = 0;
   10082 	int32_t done = -1;
   10083 
   10084 	for (i = 0; i < attempts; i++) {
   10085 		reg = CSR_READ(sc, rw);
   10086 
   10087 		if (reg & EERD_DONE) {
   10088 			done = 0;
   10089 			break;
   10090 		}
   10091 		delay(5);
   10092 	}
   10093 
   10094 	return done;
   10095 }
   10096 
   10097 static int
   10098 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10099     uint16_t *data)
   10100 {
   10101 	int i, eerd = 0;
   10102 	int error = 0;
   10103 
   10104 	for (i = 0; i < wordcnt; i++) {
   10105 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10106 
   10107 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10108 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10109 		if (error != 0)
   10110 			break;
   10111 
   10112 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10113 	}
   10114 
   10115 	return error;
   10116 }
   10117 
   10118 /* Flash */
   10119 
   10120 static int
   10121 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10122 {
   10123 	uint32_t eecd;
   10124 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10125 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10126 	uint8_t sig_byte = 0;
   10127 
   10128 	switch (sc->sc_type) {
   10129 	case WM_T_PCH_SPT:
   10130 		/*
   10131 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10132 		 * sector valid bits from the NVM.
   10133 		 */
   10134 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10135 		if ((*bank == 0) || (*bank == 1)) {
   10136 			aprint_error_dev(sc->sc_dev,
   10137 					 "%s: no valid NVM bank present\n",
   10138 				__func__);
   10139 			return -1;
   10140 		} else {
   10141 			*bank = *bank - 2;
   10142 			return 0;
   10143 		}
   10144 	case WM_T_ICH8:
   10145 	case WM_T_ICH9:
   10146 		eecd = CSR_READ(sc, WMREG_EECD);
   10147 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10148 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10149 			return 0;
   10150 		}
   10151 		/* FALLTHROUGH */
   10152 	default:
   10153 		/* Default to 0 */
   10154 		*bank = 0;
   10155 
   10156 		/* Check bank 0 */
   10157 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10158 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10159 			*bank = 0;
   10160 			return 0;
   10161 		}
   10162 
   10163 		/* Check bank 1 */
   10164 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10165 		    &sig_byte);
   10166 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10167 			*bank = 1;
   10168 			return 0;
   10169 		}
   10170 	}
   10171 
   10172 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10173 		device_xname(sc->sc_dev)));
   10174 	return -1;
   10175 }
   10176 
   10177 /******************************************************************************
   10178  * This function does initial flash setup so that a new read/write/erase cycle
   10179  * can be started.
   10180  *
   10181  * sc - The pointer to the hw structure
   10182  ****************************************************************************/
   10183 static int32_t
   10184 wm_ich8_cycle_init(struct wm_softc *sc)
   10185 {
   10186 	uint16_t hsfsts;
   10187 	int32_t error = 1;
   10188 	int32_t i     = 0;
   10189 
   10190 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10191 
   10192 	/* May be check the Flash Des Valid bit in Hw status */
   10193 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10194 		return error;
   10195 	}
   10196 
   10197 	/* Clear FCERR in Hw status by writing 1 */
   10198 	/* Clear DAEL in Hw status by writing a 1 */
   10199 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10200 
   10201 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10202 
   10203 	/*
   10204 	 * Either we should have a hardware SPI cycle in progress bit to check
   10205 	 * against, in order to start a new cycle or FDONE bit should be
   10206 	 * changed in the hardware so that it is 1 after harware reset, which
   10207 	 * can then be used as an indication whether a cycle is in progress or
   10208 	 * has been completed .. we should also have some software semaphore
   10209 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10210 	 * threads access to those bits can be sequentiallized or a way so that
   10211 	 * 2 threads dont start the cycle at the same time
   10212 	 */
   10213 
   10214 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10215 		/*
   10216 		 * There is no cycle running at present, so we can start a
   10217 		 * cycle
   10218 		 */
   10219 
   10220 		/* Begin by setting Flash Cycle Done. */
   10221 		hsfsts |= HSFSTS_DONE;
   10222 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10223 		error = 0;
   10224 	} else {
   10225 		/*
   10226 		 * otherwise poll for sometime so the current cycle has a
   10227 		 * chance to end before giving up.
   10228 		 */
   10229 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10230 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10231 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10232 				error = 0;
   10233 				break;
   10234 			}
   10235 			delay(1);
   10236 		}
   10237 		if (error == 0) {
   10238 			/*
   10239 			 * Successful in waiting for previous cycle to timeout,
   10240 			 * now set the Flash Cycle Done.
   10241 			 */
   10242 			hsfsts |= HSFSTS_DONE;
   10243 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10244 		}
   10245 	}
   10246 	return error;
   10247 }
   10248 
   10249 /******************************************************************************
   10250  * This function starts a flash cycle and waits for its completion
   10251  *
   10252  * sc - The pointer to the hw structure
   10253  ****************************************************************************/
   10254 static int32_t
   10255 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10256 {
   10257 	uint16_t hsflctl;
   10258 	uint16_t hsfsts;
   10259 	int32_t error = 1;
   10260 	uint32_t i = 0;
   10261 
   10262 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10263 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10264 	hsflctl |= HSFCTL_GO;
   10265 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10266 
   10267 	/* Wait till FDONE bit is set to 1 */
   10268 	do {
   10269 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10270 		if (hsfsts & HSFSTS_DONE)
   10271 			break;
   10272 		delay(1);
   10273 		i++;
   10274 	} while (i < timeout);
   10275 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10276 		error = 0;
   10277 
   10278 	return error;
   10279 }
   10280 
   10281 /******************************************************************************
   10282  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10283  *
   10284  * sc - The pointer to the hw structure
   10285  * index - The index of the byte or word to read.
   10286  * size - Size of data to read, 1=byte 2=word, 4=dword
   10287  * data - Pointer to the word to store the value read.
   10288  *****************************************************************************/
   10289 static int32_t
   10290 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10291     uint32_t size, uint32_t *data)
   10292 {
   10293 	uint16_t hsfsts;
   10294 	uint16_t hsflctl;
   10295 	uint32_t flash_linear_address;
   10296 	uint32_t flash_data = 0;
   10297 	int32_t error = 1;
   10298 	int32_t count = 0;
   10299 
   10300 	if (size < 1  || size > 4 || data == 0x0 ||
   10301 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10302 		return error;
   10303 
   10304 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10305 	    sc->sc_ich8_flash_base;
   10306 
   10307 	do {
   10308 		delay(1);
   10309 		/* Steps */
   10310 		error = wm_ich8_cycle_init(sc);
   10311 		if (error)
   10312 			break;
   10313 
   10314 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10315 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10316 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10317 		    & HSFCTL_BCOUNT_MASK;
   10318 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10319 		if (sc->sc_type == WM_T_PCH_SPT) {
   10320 			/*
   10321 			 * In SPT, This register is in Lan memory space, not
   10322 			 * flash. Therefore, only 32 bit access is supported.
   10323 			 */
   10324 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10325 			    (uint32_t)hsflctl);
   10326 		} else
   10327 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10328 
   10329 		/*
   10330 		 * Write the last 24 bits of index into Flash Linear address
   10331 		 * field in Flash Address
   10332 		 */
   10333 		/* TODO: TBD maybe check the index against the size of flash */
   10334 
   10335 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10336 
   10337 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10338 
   10339 		/*
   10340 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10341 		 * the whole sequence a few more times, else read in (shift in)
   10342 		 * the Flash Data0, the order is least significant byte first
   10343 		 * msb to lsb
   10344 		 */
   10345 		if (error == 0) {
   10346 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10347 			if (size == 1)
   10348 				*data = (uint8_t)(flash_data & 0x000000FF);
   10349 			else if (size == 2)
   10350 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10351 			else if (size == 4)
   10352 				*data = (uint32_t)flash_data;
   10353 			break;
   10354 		} else {
   10355 			/*
   10356 			 * If we've gotten here, then things are probably
   10357 			 * completely hosed, but if the error condition is
   10358 			 * detected, it won't hurt to give it another try...
   10359 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10360 			 */
   10361 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10362 			if (hsfsts & HSFSTS_ERR) {
   10363 				/* Repeat for some time before giving up. */
   10364 				continue;
   10365 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10366 				break;
   10367 		}
   10368 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10369 
   10370 	return error;
   10371 }
   10372 
   10373 /******************************************************************************
   10374  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10375  *
   10376  * sc - pointer to wm_hw structure
   10377  * index - The index of the byte to read.
   10378  * data - Pointer to a byte to store the value read.
   10379  *****************************************************************************/
   10380 static int32_t
   10381 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10382 {
   10383 	int32_t status;
   10384 	uint32_t word = 0;
   10385 
   10386 	status = wm_read_ich8_data(sc, index, 1, &word);
   10387 	if (status == 0)
   10388 		*data = (uint8_t)word;
   10389 	else
   10390 		*data = 0;
   10391 
   10392 	return status;
   10393 }
   10394 
   10395 /******************************************************************************
   10396  * Reads a word from the NVM using the ICH8 flash access registers.
   10397  *
   10398  * sc - pointer to wm_hw structure
   10399  * index - The starting byte index of the word to read.
   10400  * data - Pointer to a word to store the value read.
   10401  *****************************************************************************/
   10402 static int32_t
   10403 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10404 {
   10405 	int32_t status;
   10406 	uint32_t word = 0;
   10407 
   10408 	status = wm_read_ich8_data(sc, index, 2, &word);
   10409 	if (status == 0)
   10410 		*data = (uint16_t)word;
   10411 	else
   10412 		*data = 0;
   10413 
   10414 	return status;
   10415 }
   10416 
   10417 /******************************************************************************
   10418  * Reads a dword from the NVM using the ICH8 flash access registers.
   10419  *
   10420  * sc - pointer to wm_hw structure
   10421  * index - The starting byte index of the word to read.
   10422  * data - Pointer to a word to store the value read.
   10423  *****************************************************************************/
   10424 static int32_t
   10425 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10426 {
   10427 	int32_t status;
   10428 
   10429 	status = wm_read_ich8_data(sc, index, 4, data);
   10430 	return status;
   10431 }
   10432 
   10433 /******************************************************************************
   10434  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10435  * register.
   10436  *
   10437  * sc - Struct containing variables accessed by shared code
   10438  * offset - offset of word in the EEPROM to read
   10439  * data - word read from the EEPROM
   10440  * words - number of words to read
   10441  *****************************************************************************/
   10442 static int
   10443 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10444 {
   10445 	int32_t  error = 0;
   10446 	uint32_t flash_bank = 0;
   10447 	uint32_t act_offset = 0;
   10448 	uint32_t bank_offset = 0;
   10449 	uint16_t word = 0;
   10450 	uint16_t i = 0;
   10451 
   10452 	/*
   10453 	 * We need to know which is the valid flash bank.  In the event
   10454 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10455 	 * managing flash_bank.  So it cannot be trusted and needs
   10456 	 * to be updated with each read.
   10457 	 */
   10458 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10459 	if (error) {
   10460 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10461 			device_xname(sc->sc_dev)));
   10462 		flash_bank = 0;
   10463 	}
   10464 
   10465 	/*
   10466 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10467 	 * size
   10468 	 */
   10469 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10470 
   10471 	error = wm_get_swfwhw_semaphore(sc);
   10472 	if (error) {
   10473 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10474 		    __func__);
   10475 		return error;
   10476 	}
   10477 
   10478 	for (i = 0; i < words; i++) {
   10479 		/* The NVM part needs a byte offset, hence * 2 */
   10480 		act_offset = bank_offset + ((offset + i) * 2);
   10481 		error = wm_read_ich8_word(sc, act_offset, &word);
   10482 		if (error) {
   10483 			aprint_error_dev(sc->sc_dev,
   10484 			    "%s: failed to read NVM\n", __func__);
   10485 			break;
   10486 		}
   10487 		data[i] = word;
   10488 	}
   10489 
   10490 	wm_put_swfwhw_semaphore(sc);
   10491 	return error;
   10492 }
   10493 
   10494 /******************************************************************************
   10495  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10496  * register.
   10497  *
   10498  * sc - Struct containing variables accessed by shared code
   10499  * offset - offset of word in the EEPROM to read
   10500  * data - word read from the EEPROM
   10501  * words - number of words to read
   10502  *****************************************************************************/
   10503 static int
   10504 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10505 {
   10506 	int32_t  error = 0;
   10507 	uint32_t flash_bank = 0;
   10508 	uint32_t act_offset = 0;
   10509 	uint32_t bank_offset = 0;
   10510 	uint32_t dword = 0;
   10511 	uint16_t i = 0;
   10512 
   10513 	/*
   10514 	 * We need to know which is the valid flash bank.  In the event
   10515 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10516 	 * managing flash_bank.  So it cannot be trusted and needs
   10517 	 * to be updated with each read.
   10518 	 */
   10519 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10520 	if (error) {
   10521 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10522 			device_xname(sc->sc_dev)));
   10523 		flash_bank = 0;
   10524 	}
   10525 
   10526 	/*
   10527 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10528 	 * size
   10529 	 */
   10530 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10531 
   10532 	error = wm_get_swfwhw_semaphore(sc);
   10533 	if (error) {
   10534 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10535 		    __func__);
   10536 		return error;
   10537 	}
   10538 
   10539 	for (i = 0; i < words; i++) {
   10540 		/* The NVM part needs a byte offset, hence * 2 */
   10541 		act_offset = bank_offset + ((offset + i) * 2);
   10542 		/* but we must read dword aligned, so mask ... */
   10543 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10544 		if (error) {
   10545 			aprint_error_dev(sc->sc_dev,
   10546 			    "%s: failed to read NVM\n", __func__);
   10547 			break;
   10548 		}
   10549 		/* ... and pick out low or high word */
   10550 		if ((act_offset & 0x2) == 0)
   10551 			data[i] = (uint16_t)(dword & 0xFFFF);
   10552 		else
   10553 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10554 	}
   10555 
   10556 	wm_put_swfwhw_semaphore(sc);
   10557 	return error;
   10558 }
   10559 
   10560 /* iNVM */
   10561 
   10562 static int
   10563 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10564 {
   10565 	int32_t  rv = 0;
   10566 	uint32_t invm_dword;
   10567 	uint16_t i;
   10568 	uint8_t record_type, word_address;
   10569 
   10570 	for (i = 0; i < INVM_SIZE; i++) {
   10571 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10572 		/* Get record type */
   10573 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10574 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10575 			break;
   10576 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10577 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10578 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10579 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10580 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10581 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10582 			if (word_address == address) {
   10583 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10584 				rv = 0;
   10585 				break;
   10586 			}
   10587 		}
   10588 	}
   10589 
   10590 	return rv;
   10591 }
   10592 
   10593 static int
   10594 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10595 {
   10596 	int rv = 0;
   10597 	int i;
   10598 
   10599 	for (i = 0; i < words; i++) {
   10600 		switch (offset + i) {
   10601 		case NVM_OFF_MACADDR:
   10602 		case NVM_OFF_MACADDR1:
   10603 		case NVM_OFF_MACADDR2:
   10604 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10605 			if (rv != 0) {
   10606 				data[i] = 0xffff;
   10607 				rv = -1;
   10608 			}
   10609 			break;
   10610 		case NVM_OFF_CFG2:
   10611 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10612 			if (rv != 0) {
   10613 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10614 				rv = 0;
   10615 			}
   10616 			break;
   10617 		case NVM_OFF_CFG4:
   10618 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10619 			if (rv != 0) {
   10620 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10621 				rv = 0;
   10622 			}
   10623 			break;
   10624 		case NVM_OFF_LED_1_CFG:
   10625 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10626 			if (rv != 0) {
   10627 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10628 				rv = 0;
   10629 			}
   10630 			break;
   10631 		case NVM_OFF_LED_0_2_CFG:
   10632 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10633 			if (rv != 0) {
   10634 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10635 				rv = 0;
   10636 			}
   10637 			break;
   10638 		case NVM_OFF_ID_LED_SETTINGS:
   10639 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10640 			if (rv != 0) {
   10641 				*data = ID_LED_RESERVED_FFFF;
   10642 				rv = 0;
   10643 			}
   10644 			break;
   10645 		default:
   10646 			DPRINTF(WM_DEBUG_NVM,
   10647 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10648 			*data = NVM_RESERVED_WORD;
   10649 			break;
   10650 		}
   10651 	}
   10652 
   10653 	return rv;
   10654 }
   10655 
   10656 /* Lock, detecting NVM type, validate checksum, version and read */
   10657 
   10658 /*
   10659  * wm_nvm_acquire:
   10660  *
   10661  *	Perform the EEPROM handshake required on some chips.
   10662  */
   10663 static int
   10664 wm_nvm_acquire(struct wm_softc *sc)
   10665 {
   10666 	uint32_t reg;
   10667 	int x;
   10668 	int ret = 0;
   10669 
   10670 	/* always success */
   10671 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10672 		return 0;
   10673 
   10674 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10675 		ret = wm_get_swfwhw_semaphore(sc);
   10676 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10677 		/* This will also do wm_get_swsm_semaphore() if needed */
   10678 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10679 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10680 		ret = wm_get_swsm_semaphore(sc);
   10681 	}
   10682 
   10683 	if (ret) {
   10684 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10685 			__func__);
   10686 		return 1;
   10687 	}
   10688 
   10689 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10690 		reg = CSR_READ(sc, WMREG_EECD);
   10691 
   10692 		/* Request EEPROM access. */
   10693 		reg |= EECD_EE_REQ;
   10694 		CSR_WRITE(sc, WMREG_EECD, reg);
   10695 
   10696 		/* ..and wait for it to be granted. */
   10697 		for (x = 0; x < 1000; x++) {
   10698 			reg = CSR_READ(sc, WMREG_EECD);
   10699 			if (reg & EECD_EE_GNT)
   10700 				break;
   10701 			delay(5);
   10702 		}
   10703 		if ((reg & EECD_EE_GNT) == 0) {
   10704 			aprint_error_dev(sc->sc_dev,
   10705 			    "could not acquire EEPROM GNT\n");
   10706 			reg &= ~EECD_EE_REQ;
   10707 			CSR_WRITE(sc, WMREG_EECD, reg);
   10708 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10709 				wm_put_swfwhw_semaphore(sc);
   10710 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10711 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10712 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10713 				wm_put_swsm_semaphore(sc);
   10714 			return 1;
   10715 		}
   10716 	}
   10717 
   10718 	return 0;
   10719 }
   10720 
   10721 /*
   10722  * wm_nvm_release:
   10723  *
   10724  *	Release the EEPROM mutex.
   10725  */
   10726 static void
   10727 wm_nvm_release(struct wm_softc *sc)
   10728 {
   10729 	uint32_t reg;
   10730 
   10731 	/* always success */
   10732 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10733 		return;
   10734 
   10735 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10736 		reg = CSR_READ(sc, WMREG_EECD);
   10737 		reg &= ~EECD_EE_REQ;
   10738 		CSR_WRITE(sc, WMREG_EECD, reg);
   10739 	}
   10740 
   10741 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10742 		wm_put_swfwhw_semaphore(sc);
   10743 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10744 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10745 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10746 		wm_put_swsm_semaphore(sc);
   10747 }
   10748 
   10749 static int
   10750 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10751 {
   10752 	uint32_t eecd = 0;
   10753 
   10754 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10755 	    || sc->sc_type == WM_T_82583) {
   10756 		eecd = CSR_READ(sc, WMREG_EECD);
   10757 
   10758 		/* Isolate bits 15 & 16 */
   10759 		eecd = ((eecd >> 15) & 0x03);
   10760 
   10761 		/* If both bits are set, device is Flash type */
   10762 		if (eecd == 0x03)
   10763 			return 0;
   10764 	}
   10765 	return 1;
   10766 }
   10767 
   10768 static int
   10769 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10770 {
   10771 	uint32_t eec;
   10772 
   10773 	eec = CSR_READ(sc, WMREG_EEC);
   10774 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10775 		return 1;
   10776 
   10777 	return 0;
   10778 }
   10779 
   10780 /*
   10781  * wm_nvm_validate_checksum
   10782  *
   10783  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10784  */
   10785 static int
   10786 wm_nvm_validate_checksum(struct wm_softc *sc)
   10787 {
   10788 	uint16_t checksum;
   10789 	uint16_t eeprom_data;
   10790 #ifdef WM_DEBUG
   10791 	uint16_t csum_wordaddr, valid_checksum;
   10792 #endif
   10793 	int i;
   10794 
   10795 	checksum = 0;
   10796 
   10797 	/* Don't check for I211 */
   10798 	if (sc->sc_type == WM_T_I211)
   10799 		return 0;
   10800 
   10801 #ifdef WM_DEBUG
   10802 	if (sc->sc_type == WM_T_PCH_LPT) {
   10803 		csum_wordaddr = NVM_OFF_COMPAT;
   10804 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10805 	} else {
   10806 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10807 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10808 	}
   10809 
   10810 	/* Dump EEPROM image for debug */
   10811 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10812 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10813 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10814 		/* XXX PCH_SPT? */
   10815 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10816 		if ((eeprom_data & valid_checksum) == 0) {
   10817 			DPRINTF(WM_DEBUG_NVM,
   10818 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10819 				device_xname(sc->sc_dev), eeprom_data,
   10820 				    valid_checksum));
   10821 		}
   10822 	}
   10823 
   10824 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10825 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10826 		for (i = 0; i < NVM_SIZE; i++) {
   10827 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10828 				printf("XXXX ");
   10829 			else
   10830 				printf("%04hx ", eeprom_data);
   10831 			if (i % 8 == 7)
   10832 				printf("\n");
   10833 		}
   10834 	}
   10835 
   10836 #endif /* WM_DEBUG */
   10837 
   10838 	for (i = 0; i < NVM_SIZE; i++) {
   10839 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10840 			return 1;
   10841 		checksum += eeprom_data;
   10842 	}
   10843 
   10844 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10845 #ifdef WM_DEBUG
   10846 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10847 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10848 #endif
   10849 	}
   10850 
   10851 	return 0;
   10852 }
   10853 
   10854 static void
   10855 wm_nvm_version_invm(struct wm_softc *sc)
   10856 {
   10857 	uint32_t dword;
   10858 
   10859 	/*
   10860 	 * Linux's code to decode version is very strange, so we don't
   10861 	 * obey that algorithm and just use word 61 as the document.
   10862 	 * Perhaps it's not perfect though...
   10863 	 *
   10864 	 * Example:
   10865 	 *
   10866 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10867 	 */
   10868 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10869 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10870 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10871 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10872 }
   10873 
   10874 static void
   10875 wm_nvm_version(struct wm_softc *sc)
   10876 {
   10877 	uint16_t major, minor, build, patch;
   10878 	uint16_t uid0, uid1;
   10879 	uint16_t nvm_data;
   10880 	uint16_t off;
   10881 	bool check_version = false;
   10882 	bool check_optionrom = false;
   10883 	bool have_build = false;
   10884 
   10885 	/*
   10886 	 * Version format:
   10887 	 *
   10888 	 * XYYZ
   10889 	 * X0YZ
   10890 	 * X0YY
   10891 	 *
   10892 	 * Example:
   10893 	 *
   10894 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10895 	 *	82571	0x50a6	5.10.6?
   10896 	 *	82572	0x506a	5.6.10?
   10897 	 *	82572EI	0x5069	5.6.9?
   10898 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10899 	 *		0x2013	2.1.3?
   10900 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10901 	 */
   10902 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10903 	switch (sc->sc_type) {
   10904 	case WM_T_82571:
   10905 	case WM_T_82572:
   10906 	case WM_T_82574:
   10907 	case WM_T_82583:
   10908 		check_version = true;
   10909 		check_optionrom = true;
   10910 		have_build = true;
   10911 		break;
   10912 	case WM_T_82575:
   10913 	case WM_T_82576:
   10914 	case WM_T_82580:
   10915 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10916 			check_version = true;
   10917 		break;
   10918 	case WM_T_I211:
   10919 		wm_nvm_version_invm(sc);
   10920 		goto printver;
   10921 	case WM_T_I210:
   10922 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10923 			wm_nvm_version_invm(sc);
   10924 			goto printver;
   10925 		}
   10926 		/* FALLTHROUGH */
   10927 	case WM_T_I350:
   10928 	case WM_T_I354:
   10929 		check_version = true;
   10930 		check_optionrom = true;
   10931 		break;
   10932 	default:
   10933 		return;
   10934 	}
   10935 	if (check_version) {
   10936 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10937 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10938 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10939 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10940 			build = nvm_data & NVM_BUILD_MASK;
   10941 			have_build = true;
   10942 		} else
   10943 			minor = nvm_data & 0x00ff;
   10944 
   10945 		/* Decimal */
   10946 		minor = (minor / 16) * 10 + (minor % 16);
   10947 		sc->sc_nvm_ver_major = major;
   10948 		sc->sc_nvm_ver_minor = minor;
   10949 
   10950 printver:
   10951 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10952 		    sc->sc_nvm_ver_minor);
   10953 		if (have_build) {
   10954 			sc->sc_nvm_ver_build = build;
   10955 			aprint_verbose(".%d", build);
   10956 		}
   10957 	}
   10958 	if (check_optionrom) {
   10959 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10960 		/* Option ROM Version */
   10961 		if ((off != 0x0000) && (off != 0xffff)) {
   10962 			off += NVM_COMBO_VER_OFF;
   10963 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10964 			wm_nvm_read(sc, off, 1, &uid0);
   10965 			if ((uid0 != 0) && (uid0 != 0xffff)
   10966 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10967 				/* 16bits */
   10968 				major = uid0 >> 8;
   10969 				build = (uid0 << 8) | (uid1 >> 8);
   10970 				patch = uid1 & 0x00ff;
   10971 				aprint_verbose(", option ROM Version %d.%d.%d",
   10972 				    major, build, patch);
   10973 			}
   10974 		}
   10975 	}
   10976 
   10977 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10978 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10979 }
   10980 
   10981 /*
   10982  * wm_nvm_read:
   10983  *
   10984  *	Read data from the serial EEPROM.
   10985  */
   10986 static int
   10987 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10988 {
   10989 	int rv;
   10990 
   10991 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10992 		return 1;
   10993 
   10994 	if (wm_nvm_acquire(sc))
   10995 		return 1;
   10996 
   10997 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10998 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10999 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11000 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11001 	else if (sc->sc_type == WM_T_PCH_SPT)
   11002 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11003 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11004 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11005 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11006 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11007 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11008 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11009 	else
   11010 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11011 
   11012 	wm_nvm_release(sc);
   11013 	return rv;
   11014 }
   11015 
   11016 /*
   11017  * Hardware semaphores.
   11018  * Very complexed...
   11019  */
   11020 
   11021 static int
   11022 wm_get_swsm_semaphore(struct wm_softc *sc)
   11023 {
   11024 	int32_t timeout;
   11025 	uint32_t swsm;
   11026 
   11027 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11028 		/* Get the SW semaphore. */
   11029 		timeout = sc->sc_nvm_wordsize + 1;
   11030 		while (timeout) {
   11031 			swsm = CSR_READ(sc, WMREG_SWSM);
   11032 
   11033 			if ((swsm & SWSM_SMBI) == 0)
   11034 				break;
   11035 
   11036 			delay(50);
   11037 			timeout--;
   11038 		}
   11039 
   11040 		if (timeout == 0) {
   11041 			aprint_error_dev(sc->sc_dev,
   11042 			    "could not acquire SWSM SMBI\n");
   11043 			return 1;
   11044 		}
   11045 	}
   11046 
   11047 	/* Get the FW semaphore. */
   11048 	timeout = sc->sc_nvm_wordsize + 1;
   11049 	while (timeout) {
   11050 		swsm = CSR_READ(sc, WMREG_SWSM);
   11051 		swsm |= SWSM_SWESMBI;
   11052 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11053 		/* If we managed to set the bit we got the semaphore. */
   11054 		swsm = CSR_READ(sc, WMREG_SWSM);
   11055 		if (swsm & SWSM_SWESMBI)
   11056 			break;
   11057 
   11058 		delay(50);
   11059 		timeout--;
   11060 	}
   11061 
   11062 	if (timeout == 0) {
   11063 		aprint_error_dev(sc->sc_dev,
   11064 		    "could not acquire SWSM SWESMBI\n");
   11065 		/* Release semaphores */
   11066 		wm_put_swsm_semaphore(sc);
   11067 		return 1;
   11068 	}
   11069 	return 0;
   11070 }
   11071 
   11072 static void
   11073 wm_put_swsm_semaphore(struct wm_softc *sc)
   11074 {
   11075 	uint32_t swsm;
   11076 
   11077 	swsm = CSR_READ(sc, WMREG_SWSM);
   11078 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11079 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11080 }
   11081 
   11082 static int
   11083 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11084 {
   11085 	uint32_t swfw_sync;
   11086 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11087 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11088 	int timeout = 200;
   11089 
   11090 	for (timeout = 0; timeout < 200; timeout++) {
   11091 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11092 			if (wm_get_swsm_semaphore(sc)) {
   11093 				aprint_error_dev(sc->sc_dev,
   11094 				    "%s: failed to get semaphore\n",
   11095 				    __func__);
   11096 				return 1;
   11097 			}
   11098 		}
   11099 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11100 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11101 			swfw_sync |= swmask;
   11102 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11103 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11104 				wm_put_swsm_semaphore(sc);
   11105 			return 0;
   11106 		}
   11107 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11108 			wm_put_swsm_semaphore(sc);
   11109 		delay(5000);
   11110 	}
   11111 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11112 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11113 	return 1;
   11114 }
   11115 
   11116 static void
   11117 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11118 {
   11119 	uint32_t swfw_sync;
   11120 
   11121 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11122 		while (wm_get_swsm_semaphore(sc) != 0)
   11123 			continue;
   11124 	}
   11125 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11126 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11127 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11128 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11129 		wm_put_swsm_semaphore(sc);
   11130 }
   11131 
   11132 static int
   11133 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11134 {
   11135 	uint32_t ext_ctrl;
   11136 	int timeout = 200;
   11137 
   11138 	for (timeout = 0; timeout < 200; timeout++) {
   11139 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11140 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11141 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11142 
   11143 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11144 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11145 			return 0;
   11146 		delay(5000);
   11147 	}
   11148 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11149 	    device_xname(sc->sc_dev), ext_ctrl);
   11150 	return 1;
   11151 }
   11152 
   11153 static void
   11154 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11155 {
   11156 	uint32_t ext_ctrl;
   11157 
   11158 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11159 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11160 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11161 }
   11162 
   11163 static int
   11164 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11165 {
   11166 	int i = 0;
   11167 	uint32_t reg;
   11168 
   11169 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11170 	do {
   11171 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11172 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11173 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11174 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11175 			break;
   11176 		delay(2*1000);
   11177 		i++;
   11178 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11179 
   11180 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11181 		wm_put_hw_semaphore_82573(sc);
   11182 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11183 		    device_xname(sc->sc_dev));
   11184 		return -1;
   11185 	}
   11186 
   11187 	return 0;
   11188 }
   11189 
   11190 static void
   11191 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11192 {
   11193 	uint32_t reg;
   11194 
   11195 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11196 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11197 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11198 }
   11199 
   11200 /*
   11201  * Management mode and power management related subroutines.
   11202  * BMC, AMT, suspend/resume and EEE.
   11203  */
   11204 
   11205 #ifdef WM_WOL
   11206 static int
   11207 wm_check_mng_mode(struct wm_softc *sc)
   11208 {
   11209 	int rv;
   11210 
   11211 	switch (sc->sc_type) {
   11212 	case WM_T_ICH8:
   11213 	case WM_T_ICH9:
   11214 	case WM_T_ICH10:
   11215 	case WM_T_PCH:
   11216 	case WM_T_PCH2:
   11217 	case WM_T_PCH_LPT:
   11218 	case WM_T_PCH_SPT:
   11219 		rv = wm_check_mng_mode_ich8lan(sc);
   11220 		break;
   11221 	case WM_T_82574:
   11222 	case WM_T_82583:
   11223 		rv = wm_check_mng_mode_82574(sc);
   11224 		break;
   11225 	case WM_T_82571:
   11226 	case WM_T_82572:
   11227 	case WM_T_82573:
   11228 	case WM_T_80003:
   11229 		rv = wm_check_mng_mode_generic(sc);
   11230 		break;
   11231 	default:
   11232 		/* noting to do */
   11233 		rv = 0;
   11234 		break;
   11235 	}
   11236 
   11237 	return rv;
   11238 }
   11239 
   11240 static int
   11241 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11242 {
   11243 	uint32_t fwsm;
   11244 
   11245 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11246 
   11247 	if (((fwsm & FWSM_FW_VALID) != 0)
   11248 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11249 		return 1;
   11250 
   11251 	return 0;
   11252 }
   11253 
   11254 static int
   11255 wm_check_mng_mode_82574(struct wm_softc *sc)
   11256 {
   11257 	uint16_t data;
   11258 
   11259 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11260 
   11261 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11262 		return 1;
   11263 
   11264 	return 0;
   11265 }
   11266 
   11267 static int
   11268 wm_check_mng_mode_generic(struct wm_softc *sc)
   11269 {
   11270 	uint32_t fwsm;
   11271 
   11272 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11273 
   11274 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11275 		return 1;
   11276 
   11277 	return 0;
   11278 }
   11279 #endif /* WM_WOL */
   11280 
   11281 static int
   11282 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11283 {
   11284 	uint32_t manc, fwsm, factps;
   11285 
   11286 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11287 		return 0;
   11288 
   11289 	manc = CSR_READ(sc, WMREG_MANC);
   11290 
   11291 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11292 		device_xname(sc->sc_dev), manc));
   11293 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11294 		return 0;
   11295 
   11296 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11297 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11298 		factps = CSR_READ(sc, WMREG_FACTPS);
   11299 		if (((factps & FACTPS_MNGCG) == 0)
   11300 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11301 			return 1;
   11302 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11303 		uint16_t data;
   11304 
   11305 		factps = CSR_READ(sc, WMREG_FACTPS);
   11306 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11307 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11308 			device_xname(sc->sc_dev), factps, data));
   11309 		if (((factps & FACTPS_MNGCG) == 0)
   11310 		    && ((data & NVM_CFG2_MNGM_MASK)
   11311 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11312 			return 1;
   11313 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11314 	    && ((manc & MANC_ASF_EN) == 0))
   11315 		return 1;
   11316 
   11317 	return 0;
   11318 }
   11319 
   11320 static bool
   11321 wm_phy_resetisblocked(struct wm_softc *sc)
   11322 {
   11323 	bool blocked = false;
   11324 	uint32_t reg;
   11325 	int i = 0;
   11326 
   11327 	switch (sc->sc_type) {
   11328 	case WM_T_ICH8:
   11329 	case WM_T_ICH9:
   11330 	case WM_T_ICH10:
   11331 	case WM_T_PCH:
   11332 	case WM_T_PCH2:
   11333 	case WM_T_PCH_LPT:
   11334 	case WM_T_PCH_SPT:
   11335 		do {
   11336 			reg = CSR_READ(sc, WMREG_FWSM);
   11337 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11338 				blocked = true;
   11339 				delay(10*1000);
   11340 				continue;
   11341 			}
   11342 			blocked = false;
   11343 		} while (blocked && (i++ < 10));
   11344 		return blocked;
   11345 		break;
   11346 	case WM_T_82571:
   11347 	case WM_T_82572:
   11348 	case WM_T_82573:
   11349 	case WM_T_82574:
   11350 	case WM_T_82583:
   11351 	case WM_T_80003:
   11352 		reg = CSR_READ(sc, WMREG_MANC);
   11353 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11354 			return true;
   11355 		else
   11356 			return false;
   11357 		break;
   11358 	default:
   11359 		/* no problem */
   11360 		break;
   11361 	}
   11362 
   11363 	return false;
   11364 }
   11365 
   11366 static void
   11367 wm_get_hw_control(struct wm_softc *sc)
   11368 {
   11369 	uint32_t reg;
   11370 
   11371 	switch (sc->sc_type) {
   11372 	case WM_T_82573:
   11373 		reg = CSR_READ(sc, WMREG_SWSM);
   11374 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11375 		break;
   11376 	case WM_T_82571:
   11377 	case WM_T_82572:
   11378 	case WM_T_82574:
   11379 	case WM_T_82583:
   11380 	case WM_T_80003:
   11381 	case WM_T_ICH8:
   11382 	case WM_T_ICH9:
   11383 	case WM_T_ICH10:
   11384 	case WM_T_PCH:
   11385 	case WM_T_PCH2:
   11386 	case WM_T_PCH_LPT:
   11387 	case WM_T_PCH_SPT:
   11388 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11389 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11390 		break;
   11391 	default:
   11392 		break;
   11393 	}
   11394 }
   11395 
   11396 static void
   11397 wm_release_hw_control(struct wm_softc *sc)
   11398 {
   11399 	uint32_t reg;
   11400 
   11401 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11402 		return;
   11403 
   11404 	if (sc->sc_type == WM_T_82573) {
   11405 		reg = CSR_READ(sc, WMREG_SWSM);
   11406 		reg &= ~SWSM_DRV_LOAD;
   11407 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11408 	} else {
   11409 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11410 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11411 	}
   11412 }
   11413 
   11414 static void
   11415 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11416 {
   11417 	uint32_t reg;
   11418 
   11419 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11420 
   11421 	if (gate)
   11422 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11423 	else
   11424 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11425 
   11426 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11427 }
   11428 
   11429 static void
   11430 wm_smbustopci(struct wm_softc *sc)
   11431 {
   11432 	uint32_t fwsm;
   11433 
   11434 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11435 	if (((fwsm & FWSM_FW_VALID) == 0)
   11436 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11437 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11438 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11439 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11440 		CSR_WRITE_FLUSH(sc);
   11441 		delay(10);
   11442 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11443 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11444 		CSR_WRITE_FLUSH(sc);
   11445 		delay(50*1000);
   11446 
   11447 		/*
   11448 		 * Gate automatic PHY configuration by hardware on non-managed
   11449 		 * 82579
   11450 		 */
   11451 		if (sc->sc_type == WM_T_PCH2)
   11452 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   11453 	}
   11454 }
   11455 
   11456 static void
   11457 wm_init_manageability(struct wm_softc *sc)
   11458 {
   11459 
   11460 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11461 		device_xname(sc->sc_dev), __func__));
   11462 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11463 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11464 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11465 
   11466 		/* Disable hardware interception of ARP */
   11467 		manc &= ~MANC_ARP_EN;
   11468 
   11469 		/* Enable receiving management packets to the host */
   11470 		if (sc->sc_type >= WM_T_82571) {
   11471 			manc |= MANC_EN_MNG2HOST;
   11472 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11473 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11474 		}
   11475 
   11476 		CSR_WRITE(sc, WMREG_MANC, manc);
   11477 	}
   11478 }
   11479 
   11480 static void
   11481 wm_release_manageability(struct wm_softc *sc)
   11482 {
   11483 
   11484 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11485 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11486 
   11487 		manc |= MANC_ARP_EN;
   11488 		if (sc->sc_type >= WM_T_82571)
   11489 			manc &= ~MANC_EN_MNG2HOST;
   11490 
   11491 		CSR_WRITE(sc, WMREG_MANC, manc);
   11492 	}
   11493 }
   11494 
   11495 static void
   11496 wm_get_wakeup(struct wm_softc *sc)
   11497 {
   11498 
   11499 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11500 	switch (sc->sc_type) {
   11501 	case WM_T_82573:
   11502 	case WM_T_82583:
   11503 		sc->sc_flags |= WM_F_HAS_AMT;
   11504 		/* FALLTHROUGH */
   11505 	case WM_T_80003:
   11506 	case WM_T_82541:
   11507 	case WM_T_82547:
   11508 	case WM_T_82571:
   11509 	case WM_T_82572:
   11510 	case WM_T_82574:
   11511 	case WM_T_82575:
   11512 	case WM_T_82576:
   11513 	case WM_T_82580:
   11514 	case WM_T_I350:
   11515 	case WM_T_I354:
   11516 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11517 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11518 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11519 		break;
   11520 	case WM_T_ICH8:
   11521 	case WM_T_ICH9:
   11522 	case WM_T_ICH10:
   11523 	case WM_T_PCH:
   11524 	case WM_T_PCH2:
   11525 	case WM_T_PCH_LPT:
   11526 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11527 		sc->sc_flags |= WM_F_HAS_AMT;
   11528 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11529 		break;
   11530 	default:
   11531 		break;
   11532 	}
   11533 
   11534 	/* 1: HAS_MANAGE */
   11535 	if (wm_enable_mng_pass_thru(sc) != 0)
   11536 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11537 
   11538 #ifdef WM_DEBUG
   11539 	printf("\n");
   11540 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11541 		printf("HAS_AMT,");
   11542 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11543 		printf("ARC_SUBSYS_VALID,");
   11544 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11545 		printf("ASF_FIRMWARE_PRES,");
   11546 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11547 		printf("HAS_MANAGE,");
   11548 	printf("\n");
   11549 #endif
   11550 	/*
   11551 	 * Note that the WOL flags is set after the resetting of the eeprom
   11552 	 * stuff
   11553 	 */
   11554 }
   11555 
   11556 #ifdef WM_WOL
   11557 /* WOL in the newer chipset interfaces (pchlan) */
   11558 static void
   11559 wm_enable_phy_wakeup(struct wm_softc *sc)
   11560 {
   11561 #if 0
   11562 	uint16_t preg;
   11563 
   11564 	/* Copy MAC RARs to PHY RARs */
   11565 
   11566 	/* Copy MAC MTA to PHY MTA */
   11567 
   11568 	/* Configure PHY Rx Control register */
   11569 
   11570 	/* Enable PHY wakeup in MAC register */
   11571 
   11572 	/* Configure and enable PHY wakeup in PHY registers */
   11573 
   11574 	/* Activate PHY wakeup */
   11575 
   11576 	/* XXX */
   11577 #endif
   11578 }
   11579 
   11580 /* Power down workaround on D3 */
   11581 static void
   11582 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11583 {
   11584 	uint32_t reg;
   11585 	int i;
   11586 
   11587 	for (i = 0; i < 2; i++) {
   11588 		/* Disable link */
   11589 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11590 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11591 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11592 
   11593 		/*
   11594 		 * Call gig speed drop workaround on Gig disable before
   11595 		 * accessing any PHY registers
   11596 		 */
   11597 		if (sc->sc_type == WM_T_ICH8)
   11598 			wm_gig_downshift_workaround_ich8lan(sc);
   11599 
   11600 		/* Write VR power-down enable */
   11601 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11602 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11603 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11604 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11605 
   11606 		/* Read it back and test */
   11607 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11608 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11609 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11610 			break;
   11611 
   11612 		/* Issue PHY reset and repeat at most one more time */
   11613 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11614 	}
   11615 }
   11616 
   11617 static void
   11618 wm_enable_wakeup(struct wm_softc *sc)
   11619 {
   11620 	uint32_t reg, pmreg;
   11621 	pcireg_t pmode;
   11622 
   11623 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11624 		&pmreg, NULL) == 0)
   11625 		return;
   11626 
   11627 	/* Advertise the wakeup capability */
   11628 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11629 	    | CTRL_SWDPIN(3));
   11630 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11631 
   11632 	/* ICH workaround */
   11633 	switch (sc->sc_type) {
   11634 	case WM_T_ICH8:
   11635 	case WM_T_ICH9:
   11636 	case WM_T_ICH10:
   11637 	case WM_T_PCH:
   11638 	case WM_T_PCH2:
   11639 	case WM_T_PCH_LPT:
   11640 	case WM_T_PCH_SPT:
   11641 		/* Disable gig during WOL */
   11642 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11643 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11644 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11645 		if (sc->sc_type == WM_T_PCH)
   11646 			wm_gmii_reset(sc);
   11647 
   11648 		/* Power down workaround */
   11649 		if (sc->sc_phytype == WMPHY_82577) {
   11650 			struct mii_softc *child;
   11651 
   11652 			/* Assume that the PHY is copper */
   11653 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11654 			if (child->mii_mpd_rev <= 2)
   11655 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11656 				    (768 << 5) | 25, 0x0444); /* magic num */
   11657 		}
   11658 		break;
   11659 	default:
   11660 		break;
   11661 	}
   11662 
   11663 	/* Keep the laser running on fiber adapters */
   11664 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11665 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11666 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11667 		reg |= CTRL_EXT_SWDPIN(3);
   11668 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11669 	}
   11670 
   11671 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11672 #if 0	/* for the multicast packet */
   11673 	reg |= WUFC_MC;
   11674 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11675 #endif
   11676 
   11677 	if (sc->sc_type == WM_T_PCH) {
   11678 		wm_enable_phy_wakeup(sc);
   11679 	} else {
   11680 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11681 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11682 	}
   11683 
   11684 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11685 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11686 		|| (sc->sc_type == WM_T_PCH2))
   11687 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11688 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11689 
   11690 	/* Request PME */
   11691 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11692 #if 0
   11693 	/* Disable WOL */
   11694 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11695 #else
   11696 	/* For WOL */
   11697 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11698 #endif
   11699 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11700 }
   11701 #endif /* WM_WOL */
   11702 
   11703 /* LPLU */
   11704 
   11705 static void
   11706 wm_lplu_d0_disable(struct wm_softc *sc)
   11707 {
   11708 	uint32_t reg;
   11709 
   11710 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11711 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11712 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11713 }
   11714 
   11715 static void
   11716 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11717 {
   11718 	uint32_t reg;
   11719 
   11720 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11721 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11722 	reg |= HV_OEM_BITS_ANEGNOW;
   11723 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11724 }
   11725 
   11726 /* EEE */
   11727 
   11728 static void
   11729 wm_set_eee_i350(struct wm_softc *sc)
   11730 {
   11731 	uint32_t ipcnfg, eeer;
   11732 
   11733 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11734 	eeer = CSR_READ(sc, WMREG_EEER);
   11735 
   11736 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11737 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11738 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11739 		    | EEER_LPI_FC);
   11740 	} else {
   11741 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11742 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11743 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11744 		    | EEER_LPI_FC);
   11745 	}
   11746 
   11747 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11748 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11749 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11750 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11751 }
   11752 
   11753 /*
   11754  * Workarounds (mainly PHY related).
   11755  * Basically, PHY's workarounds are in the PHY drivers.
   11756  */
   11757 
   11758 /* Work-around for 82566 Kumeran PCS lock loss */
   11759 static void
   11760 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11761 {
   11762 #if 0
   11763 	int miistatus, active, i;
   11764 	int reg;
   11765 
   11766 	miistatus = sc->sc_mii.mii_media_status;
   11767 
   11768 	/* If the link is not up, do nothing */
   11769 	if ((miistatus & IFM_ACTIVE) == 0)
   11770 		return;
   11771 
   11772 	active = sc->sc_mii.mii_media_active;
   11773 
   11774 	/* Nothing to do if the link is other than 1Gbps */
   11775 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11776 		return;
   11777 
   11778 	for (i = 0; i < 10; i++) {
   11779 		/* read twice */
   11780 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11781 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11782 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11783 			goto out;	/* GOOD! */
   11784 
   11785 		/* Reset the PHY */
   11786 		wm_gmii_reset(sc);
   11787 		delay(5*1000);
   11788 	}
   11789 
   11790 	/* Disable GigE link negotiation */
   11791 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11792 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11793 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11794 
   11795 	/*
   11796 	 * Call gig speed drop workaround on Gig disable before accessing
   11797 	 * any PHY registers.
   11798 	 */
   11799 	wm_gig_downshift_workaround_ich8lan(sc);
   11800 
   11801 out:
   11802 	return;
   11803 #endif
   11804 }
   11805 
   11806 /* WOL from S5 stops working */
   11807 static void
   11808 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11809 {
   11810 	uint16_t kmrn_reg;
   11811 
   11812 	/* Only for igp3 */
   11813 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11814 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11815 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11816 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11817 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11818 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11819 	}
   11820 }
   11821 
   11822 /*
   11823  * Workaround for pch's PHYs
   11824  * XXX should be moved to new PHY driver?
   11825  */
   11826 static void
   11827 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11828 {
   11829 	if (sc->sc_phytype == WMPHY_82577)
   11830 		wm_set_mdio_slow_mode_hv(sc);
   11831 
   11832 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11833 
   11834 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11835 
   11836 	/* 82578 */
   11837 	if (sc->sc_phytype == WMPHY_82578) {
   11838 		/* PCH rev. < 3 */
   11839 		if (sc->sc_rev < 3) {
   11840 			/* XXX 6 bit shift? Why? Is it page2? */
   11841 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11842 			    0x66c0);
   11843 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11844 			    0xffff);
   11845 		}
   11846 
   11847 		/* XXX phy rev. < 2 */
   11848 	}
   11849 
   11850 	/* Select page 0 */
   11851 
   11852 	/* XXX acquire semaphore */
   11853 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11854 	/* XXX release semaphore */
   11855 
   11856 	/*
   11857 	 * Configure the K1 Si workaround during phy reset assuming there is
   11858 	 * link so that it disables K1 if link is in 1Gbps.
   11859 	 */
   11860 	wm_k1_gig_workaround_hv(sc, 1);
   11861 }
   11862 
   11863 static void
   11864 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11865 {
   11866 
   11867 	wm_set_mdio_slow_mode_hv(sc);
   11868 }
   11869 
   11870 static void
   11871 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11872 {
   11873 	int k1_enable = sc->sc_nvm_k1_enabled;
   11874 
   11875 	/* XXX acquire semaphore */
   11876 
   11877 	if (link) {
   11878 		k1_enable = 0;
   11879 
   11880 		/* Link stall fix for link up */
   11881 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11882 	} else {
   11883 		/* Link stall fix for link down */
   11884 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11885 	}
   11886 
   11887 	wm_configure_k1_ich8lan(sc, k1_enable);
   11888 
   11889 	/* XXX release semaphore */
   11890 }
   11891 
   11892 static void
   11893 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11894 {
   11895 	uint32_t reg;
   11896 
   11897 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11898 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11899 	    reg | HV_KMRN_MDIO_SLOW);
   11900 }
   11901 
   11902 static void
   11903 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11904 {
   11905 	uint32_t ctrl, ctrl_ext, tmp;
   11906 	uint16_t kmrn_reg;
   11907 
   11908 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11909 
   11910 	if (k1_enable)
   11911 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11912 	else
   11913 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11914 
   11915 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11916 
   11917 	delay(20);
   11918 
   11919 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11920 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11921 
   11922 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11923 	tmp |= CTRL_FRCSPD;
   11924 
   11925 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11926 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11927 	CSR_WRITE_FLUSH(sc);
   11928 	delay(20);
   11929 
   11930 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11931 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11932 	CSR_WRITE_FLUSH(sc);
   11933 	delay(20);
   11934 }
   11935 
   11936 /* special case - for 82575 - need to do manual init ... */
   11937 static void
   11938 wm_reset_init_script_82575(struct wm_softc *sc)
   11939 {
   11940 	/*
   11941 	 * remark: this is untested code - we have no board without EEPROM
   11942 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11943 	 */
   11944 
   11945 	/* SerDes configuration via SERDESCTRL */
   11946 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11947 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11948 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11949 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11950 
   11951 	/* CCM configuration via CCMCTL register */
   11952 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11953 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11954 
   11955 	/* PCIe lanes configuration */
   11956 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11957 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11958 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11959 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11960 
   11961 	/* PCIe PLL Configuration */
   11962 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11963 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11964 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11965 }
   11966 
   11967 static void
   11968 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11969 {
   11970 	uint32_t reg;
   11971 	uint16_t nvmword;
   11972 	int rv;
   11973 
   11974 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11975 		return;
   11976 
   11977 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   11978 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   11979 	if (rv != 0) {
   11980 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   11981 		    __func__);
   11982 		return;
   11983 	}
   11984 
   11985 	reg = CSR_READ(sc, WMREG_MDICNFG);
   11986 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   11987 		reg |= MDICNFG_DEST;
   11988 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   11989 		reg |= MDICNFG_COM_MDIO;
   11990 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11991 }
   11992 
   11993 /*
   11994  * I210 Errata 25 and I211 Errata 10
   11995  * Slow System Clock.
   11996  */
   11997 static void
   11998 wm_pll_workaround_i210(struct wm_softc *sc)
   11999 {
   12000 	uint32_t mdicnfg, wuc;
   12001 	uint32_t reg;
   12002 	pcireg_t pcireg;
   12003 	uint32_t pmreg;
   12004 	uint16_t nvmword, tmp_nvmword;
   12005 	int phyval;
   12006 	bool wa_done = false;
   12007 	int i;
   12008 
   12009 	/* Save WUC and MDICNFG registers */
   12010 	wuc = CSR_READ(sc, WMREG_WUC);
   12011 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12012 
   12013 	reg = mdicnfg & ~MDICNFG_DEST;
   12014 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12015 
   12016 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12017 		nvmword = INVM_DEFAULT_AL;
   12018 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12019 
   12020 	/* Get Power Management cap offset */
   12021 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12022 		&pmreg, NULL) == 0)
   12023 		return;
   12024 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12025 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12026 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12027 
   12028 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12029 			break; /* OK */
   12030 		}
   12031 
   12032 		wa_done = true;
   12033 		/* Directly reset the internal PHY */
   12034 		reg = CSR_READ(sc, WMREG_CTRL);
   12035 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12036 
   12037 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12038 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12039 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12040 
   12041 		CSR_WRITE(sc, WMREG_WUC, 0);
   12042 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12043 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12044 
   12045 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12046 		    pmreg + PCI_PMCSR);
   12047 		pcireg |= PCI_PMCSR_STATE_D3;
   12048 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12049 		    pmreg + PCI_PMCSR, pcireg);
   12050 		delay(1000);
   12051 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12052 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12053 		    pmreg + PCI_PMCSR, pcireg);
   12054 
   12055 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12056 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12057 
   12058 		/* Restore WUC register */
   12059 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12060 	}
   12061 
   12062 	/* Restore MDICNFG setting */
   12063 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12064 	if (wa_done)
   12065 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12066 }
   12067