Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.155
      1 /*	$NetBSD: if_wm.c,v 1.155 2008/02/23 06:12:30 rafal Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Rework how parameters are loaded from the EEPROM.
     76  *	- Figure out what to do with the i82545GM and i82546GB
     77  *	  SERDES controllers.
     78  *	- Fix hw VLAN assist.
     79  */
     80 
     81 #include <sys/cdefs.h>
     82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.155 2008/02/23 06:12:30 rafal Exp $");
     83 
     84 #include "bpfilter.h"
     85 #include "rnd.h"
     86 
     87 #include <sys/param.h>
     88 #include <sys/systm.h>
     89 #include <sys/callout.h>
     90 #include <sys/mbuf.h>
     91 #include <sys/malloc.h>
     92 #include <sys/kernel.h>
     93 #include <sys/socket.h>
     94 #include <sys/ioctl.h>
     95 #include <sys/errno.h>
     96 #include <sys/device.h>
     97 #include <sys/queue.h>
     98 #include <sys/syslog.h>
     99 
    100 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
    101 
    102 #if NRND > 0
    103 #include <sys/rnd.h>
    104 #endif
    105 
    106 #include <net/if.h>
    107 #include <net/if_dl.h>
    108 #include <net/if_media.h>
    109 #include <net/if_ether.h>
    110 
    111 #if NBPFILTER > 0
    112 #include <net/bpf.h>
    113 #endif
    114 
    115 #include <netinet/in.h>			/* XXX for struct ip */
    116 #include <netinet/in_systm.h>		/* XXX for struct ip */
    117 #include <netinet/ip.h>			/* XXX for struct ip */
    118 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    119 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    120 
    121 #include <sys/bus.h>
    122 #include <sys/intr.h>
    123 #include <machine/endian.h>
    124 
    125 #include <dev/mii/mii.h>
    126 #include <dev/mii/miivar.h>
    127 #include <dev/mii/mii_bitbang.h>
    128 #include <dev/mii/ikphyreg.h>
    129 
    130 #include <dev/pci/pcireg.h>
    131 #include <dev/pci/pcivar.h>
    132 #include <dev/pci/pcidevs.h>
    133 
    134 #include <dev/pci/if_wmreg.h>
    135 
    136 #ifdef WM_DEBUG
    137 #define	WM_DEBUG_LINK		0x01
    138 #define	WM_DEBUG_TX		0x02
    139 #define	WM_DEBUG_RX		0x04
    140 #define	WM_DEBUG_GMII		0x08
    141 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
    142 
    143 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    144 #else
    145 #define	DPRINTF(x, y)	/* nothing */
    146 #endif /* WM_DEBUG */
    147 
    148 /*
    149  * Transmit descriptor list size.  Due to errata, we can only have
    150  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    151  * on >= 82544.  We tell the upper layers that they can queue a lot
    152  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    153  * of them at a time.
    154  *
    155  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    156  * chains containing many small mbufs have been observed in zero-copy
    157  * situations with jumbo frames.
    158  */
    159 #define	WM_NTXSEGS		256
    160 #define	WM_IFQUEUELEN		256
    161 #define	WM_TXQUEUELEN_MAX	64
    162 #define	WM_TXQUEUELEN_MAX_82547	16
    163 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
    164 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
    165 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
    166 #define	WM_NTXDESC_82542	256
    167 #define	WM_NTXDESC_82544	4096
    168 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
    169 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
    170 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
    171 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
    172 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
    173 
    174 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
    175 
    176 /*
    177  * Receive descriptor list size.  We have one Rx buffer for normal
    178  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    179  * packet.  We allocate 256 receive descriptors, each with a 2k
    180  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    181  */
    182 #define	WM_NRXDESC		256
    183 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    184 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    185 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    186 
    187 /*
    188  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    189  * a single clump that maps to a single DMA segment to make several things
    190  * easier.
    191  */
    192 struct wm_control_data_82544 {
    193 	/*
    194 	 * The receive descriptors.
    195 	 */
    196 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    197 
    198 	/*
    199 	 * The transmit descriptors.  Put these at the end, because
    200 	 * we might use a smaller number of them.
    201 	 */
    202 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
    203 };
    204 
    205 struct wm_control_data_82542 {
    206 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    207 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
    208 };
    209 
    210 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
    211 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
    212 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    213 
    214 /*
    215  * Software state for transmit jobs.
    216  */
    217 struct wm_txsoft {
    218 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    219 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    220 	int txs_firstdesc;		/* first descriptor in packet */
    221 	int txs_lastdesc;		/* last descriptor in packet */
    222 	int txs_ndesc;			/* # of descriptors used */
    223 };
    224 
    225 /*
    226  * Software state for receive buffers.  Each descriptor gets a
    227  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    228  * more than one buffer, we chain them together.
    229  */
    230 struct wm_rxsoft {
    231 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    232 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    233 };
    234 
    235 typedef enum {
    236 	WM_T_unknown		= 0,
    237 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
    238 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
    239 	WM_T_82543,			/* i82543 */
    240 	WM_T_82544,			/* i82544 */
    241 	WM_T_82540,			/* i82540 */
    242 	WM_T_82545,			/* i82545 */
    243 	WM_T_82545_3,			/* i82545 3.0+ */
    244 	WM_T_82546,			/* i82546 */
    245 	WM_T_82546_3,			/* i82546 3.0+ */
    246 	WM_T_82541,			/* i82541 */
    247 	WM_T_82541_2,			/* i82541 2.0+ */
    248 	WM_T_82547,			/* i82547 */
    249 	WM_T_82547_2,			/* i82547 2.0+ */
    250 	WM_T_82571,			/* i82571 */
    251 	WM_T_82572,			/* i82572 */
    252 	WM_T_82573,			/* i82573 */
    253 	WM_T_80003,			/* i80003 */
    254 	WM_T_ICH8,			/* ICH8 LAN */
    255 	WM_T_ICH9,			/* ICH9 LAN */
    256 } wm_chip_type;
    257 
    258 /*
    259  * Software state per device.
    260  */
    261 struct wm_softc {
    262 	struct device sc_dev;		/* generic device information */
    263 	bus_space_tag_t sc_st;		/* bus space tag */
    264 	bus_space_handle_t sc_sh;	/* bus space handle */
    265 	bus_space_tag_t sc_iot;		/* I/O space tag */
    266 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    267 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    268 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    269 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    270 	struct ethercom sc_ethercom;	/* ethernet common data */
    271 	pci_chipset_tag_t sc_pc;
    272 	pcitag_t sc_pcitag;
    273 
    274 	wm_chip_type sc_type;		/* chip type */
    275 	int sc_flags;			/* flags; see below */
    276 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    277 	int sc_pcix_offset;		/* PCIX capability register offset */
    278 	int sc_flowflags;		/* 802.3x flow control flags */
    279 
    280 	void *sc_ih;			/* interrupt cookie */
    281 
    282 	int sc_ee_addrbits;		/* EEPROM address bits */
    283 
    284 	struct mii_data sc_mii;		/* MII/media information */
    285 
    286 	callout_t sc_tick_ch;		/* tick callout */
    287 
    288 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    289 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    290 
    291 	int		sc_align_tweak;
    292 
    293 	/*
    294 	 * Software state for the transmit and receive descriptors.
    295 	 */
    296 	int			sc_txnum;	/* must be a power of two */
    297 	struct wm_txsoft	sc_txsoft[WM_TXQUEUELEN_MAX];
    298 	struct wm_rxsoft	sc_rxsoft[WM_NRXDESC];
    299 
    300 	/*
    301 	 * Control data structures.
    302 	 */
    303 	int			sc_ntxdesc;	/* must be a power of two */
    304 	struct wm_control_data_82544 *sc_control_data;
    305 #define	sc_txdescs	sc_control_data->wcd_txdescs
    306 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    307 
    308 #ifdef WM_EVENT_COUNTERS
    309 	/* Event counters. */
    310 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    311 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    312 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    313 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    314 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    315 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    316 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    317 
    318 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    319 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    320 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    321 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    322 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    323 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    324 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    325 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    326 
    327 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    328 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    329 
    330 	struct evcnt sc_ev_tu;		/* Tx underrun */
    331 
    332 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    333 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    334 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    335 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    336 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    337 #endif /* WM_EVENT_COUNTERS */
    338 
    339 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    340 
    341 	int	sc_txfree;		/* number of free Tx descriptors */
    342 	int	sc_txnext;		/* next ready Tx descriptor */
    343 
    344 	int	sc_txsfree;		/* number of free Tx jobs */
    345 	int	sc_txsnext;		/* next free Tx job */
    346 	int	sc_txsdirty;		/* dirty Tx jobs */
    347 
    348 	/* These 5 variables are used only on the 82547. */
    349 	int	sc_txfifo_size;		/* Tx FIFO size */
    350 	int	sc_txfifo_head;		/* current head of FIFO */
    351 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
    352 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
    353 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    354 
    355 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    356 
    357 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    358 	int	sc_rxdiscard;
    359 	int	sc_rxlen;
    360 	struct mbuf *sc_rxhead;
    361 	struct mbuf *sc_rxtail;
    362 	struct mbuf **sc_rxtailp;
    363 
    364 	uint32_t sc_ctrl;		/* prototype CTRL register */
    365 #if 0
    366 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    367 #endif
    368 	uint32_t sc_icr;		/* prototype interrupt bits */
    369 	uint32_t sc_itr;		/* prototype intr throttling reg */
    370 	uint32_t sc_tctl;		/* prototype TCTL register */
    371 	uint32_t sc_rctl;		/* prototype RCTL register */
    372 	uint32_t sc_txcw;		/* prototype TXCW register */
    373 	uint32_t sc_tipg;		/* prototype TIPG register */
    374 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    375 	uint32_t sc_pba;		/* prototype PBA register */
    376 
    377 	int sc_tbi_linkup;		/* TBI link status */
    378 	int sc_tbi_anstate;		/* autonegotiation state */
    379 
    380 	int sc_mchash_type;		/* multicast filter offset */
    381 
    382 #if NRND > 0
    383 	rndsource_element_t rnd_source;	/* random source */
    384 #endif
    385 	int sc_ich8_flash_base;
    386 	int sc_ich8_flash_bank_size;
    387 };
    388 
    389 #define	WM_RXCHAIN_RESET(sc)						\
    390 do {									\
    391 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    392 	*(sc)->sc_rxtailp = NULL;					\
    393 	(sc)->sc_rxlen = 0;						\
    394 } while (/*CONSTCOND*/0)
    395 
    396 #define	WM_RXCHAIN_LINK(sc, m)						\
    397 do {									\
    398 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    399 	(sc)->sc_rxtailp = &(m)->m_next;				\
    400 } while (/*CONSTCOND*/0)
    401 
    402 /* sc_flags */
    403 #define	WM_F_HAS_MII		0x0001	/* has MII */
    404 #define	WM_F_EEPROM_HANDSHAKE	0x0002	/* requires EEPROM handshake */
    405 #define	WM_F_EEPROM_SEMAPHORE	0x0004	/* EEPROM with semaphore */
    406 #define	WM_F_EEPROM_EERDEEWR	0x0008	/* EEPROM access via EERD/EEWR */
    407 #define	WM_F_EEPROM_SPI		0x0010	/* EEPROM is SPI */
    408 #define	WM_F_EEPROM_FLASH	0x0020	/* EEPROM is FLASH */
    409 #define	WM_F_EEPROM_INVALID	0x0040	/* EEPROM not present (bad checksum) */
    410 #define	WM_F_IOH_VALID		0x0080	/* I/O handle is valid */
    411 #define	WM_F_BUS64		0x0100	/* bus is 64-bit */
    412 #define	WM_F_PCIX		0x0200	/* bus is PCI-X */
    413 #define	WM_F_CSA		0x0400	/* bus is CSA */
    414 #define	WM_F_PCIE		0x0800	/* bus is PCI-Express */
    415 #define WM_F_SWFW_SYNC		0x1000  /* Software-Firmware synchronisation */
    416 #define WM_F_SWFWHW_SYNC	0x2000  /* Software-Firmware synchronisation */
    417 
    418 #ifdef WM_EVENT_COUNTERS
    419 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    420 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    421 #else
    422 #define	WM_EVCNT_INCR(ev)	/* nothing */
    423 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    424 #endif
    425 
    426 #define	CSR_READ(sc, reg)						\
    427 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    428 #define	CSR_WRITE(sc, reg, val)						\
    429 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    430 #define	CSR_WRITE_FLUSH(sc)						\
    431 	(void) CSR_READ((sc), WMREG_STATUS)
    432 
    433 #define ICH8_FLASH_READ32(sc, reg) \
    434 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    435 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    436 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    437 
    438 #define ICH8_FLASH_READ16(sc, reg) \
    439 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    440 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    441 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    442 
    443 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    444 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    445 
    446 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    447 #define	WM_CDTXADDR_HI(sc, x)						\
    448 	(sizeof(bus_addr_t) == 8 ?					\
    449 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    450 
    451 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    452 #define	WM_CDRXADDR_HI(sc, x)						\
    453 	(sizeof(bus_addr_t) == 8 ?					\
    454 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    455 
    456 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    457 do {									\
    458 	int __x, __n;							\
    459 									\
    460 	__x = (x);							\
    461 	__n = (n);							\
    462 									\
    463 	/* If it will wrap around, sync to the end of the ring. */	\
    464 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
    465 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    466 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    467 		    (WM_NTXDESC(sc) - __x), (ops));			\
    468 		__n -= (WM_NTXDESC(sc) - __x);				\
    469 		__x = 0;						\
    470 	}								\
    471 									\
    472 	/* Now sync whatever is left. */				\
    473 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    474 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    475 } while (/*CONSTCOND*/0)
    476 
    477 #define	WM_CDRXSYNC(sc, x, ops)						\
    478 do {									\
    479 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    480 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    481 } while (/*CONSTCOND*/0)
    482 
    483 #define	WM_INIT_RXDESC(sc, x)						\
    484 do {									\
    485 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    486 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    487 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    488 									\
    489 	/*								\
    490 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    491 	 * so that the payload after the Ethernet header is aligned	\
    492 	 * to a 4-byte boundary.					\
    493 	 *								\
    494 	 * XXX BRAINDAMAGE ALERT!					\
    495 	 * The stupid chip uses the same size for every buffer, which	\
    496 	 * is set in the Receive Control register.  We are using the 2K	\
    497 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    498 	 * reason, we can't "scoot" packets longer than the standard	\
    499 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    500 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    501 	 * the upper layer copy the headers.				\
    502 	 */								\
    503 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    504 									\
    505 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    506 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    507 	__rxd->wrx_len = 0;						\
    508 	__rxd->wrx_cksum = 0;						\
    509 	__rxd->wrx_status = 0;						\
    510 	__rxd->wrx_errors = 0;						\
    511 	__rxd->wrx_special = 0;						\
    512 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    513 									\
    514 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    515 } while (/*CONSTCOND*/0)
    516 
    517 static void	wm_start(struct ifnet *);
    518 static void	wm_watchdog(struct ifnet *);
    519 static int	wm_ioctl(struct ifnet *, u_long, void *);
    520 static int	wm_init(struct ifnet *);
    521 static void	wm_stop(struct ifnet *, int);
    522 
    523 static void	wm_reset(struct wm_softc *);
    524 static void	wm_rxdrain(struct wm_softc *);
    525 static int	wm_add_rxbuf(struct wm_softc *, int);
    526 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
    527 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
    528 static int	wm_validate_eeprom_checksum(struct wm_softc *);
    529 static void	wm_tick(void *);
    530 
    531 static void	wm_set_filter(struct wm_softc *);
    532 
    533 static int	wm_intr(void *);
    534 static void	wm_txintr(struct wm_softc *);
    535 static void	wm_rxintr(struct wm_softc *);
    536 static void	wm_linkintr(struct wm_softc *, uint32_t);
    537 
    538 static void	wm_tbi_mediainit(struct wm_softc *);
    539 static int	wm_tbi_mediachange(struct ifnet *);
    540 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    541 
    542 static void	wm_tbi_set_linkled(struct wm_softc *);
    543 static void	wm_tbi_check_link(struct wm_softc *);
    544 
    545 static void	wm_gmii_reset(struct wm_softc *);
    546 
    547 static int	wm_gmii_i82543_readreg(struct device *, int, int);
    548 static void	wm_gmii_i82543_writereg(struct device *, int, int, int);
    549 
    550 static int	wm_gmii_i82544_readreg(struct device *, int, int);
    551 static void	wm_gmii_i82544_writereg(struct device *, int, int, int);
    552 
    553 static int	wm_gmii_i80003_readreg(struct device *, int, int);
    554 static void	wm_gmii_i80003_writereg(struct device *, int, int, int);
    555 
    556 static void	wm_gmii_statchg(struct device *);
    557 
    558 static void	wm_gmii_mediainit(struct wm_softc *);
    559 static int	wm_gmii_mediachange(struct ifnet *);
    560 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    561 
    562 static int	wm_kmrn_i80003_readreg(struct wm_softc *, int);
    563 static void	wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
    564 
    565 static int	wm_match(struct device *, struct cfdata *, void *);
    566 static void	wm_attach(struct device *, struct device *, void *);
    567 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
    568 static void	wm_get_auto_rd_done(struct wm_softc *);
    569 static int	wm_get_swsm_semaphore(struct wm_softc *);
    570 static void	wm_put_swsm_semaphore(struct wm_softc *);
    571 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    572 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    573 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    574 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    575 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    576 
    577 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
    578 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    579 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    580 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
    581 		     uint32_t, uint16_t *);
    582 static int32_t	wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
    583 
    584 CFATTACH_DECL(wm, sizeof(struct wm_softc),
    585     wm_match, wm_attach, NULL, NULL);
    586 
    587 static void	wm_82547_txfifo_stall(void *);
    588 
    589 /*
    590  * Devices supported by this driver.
    591  */
    592 static const struct wm_product {
    593 	pci_vendor_id_t		wmp_vendor;
    594 	pci_product_id_t	wmp_product;
    595 	const char		*wmp_name;
    596 	wm_chip_type		wmp_type;
    597 	int			wmp_flags;
    598 #define	WMP_F_1000X		0x01
    599 #define	WMP_F_1000T		0x02
    600 } wm_products[] = {
    601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    602 	  "Intel i82542 1000BASE-X Ethernet",
    603 	  WM_T_82542_2_1,	WMP_F_1000X },
    604 
    605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    606 	  "Intel i82543GC 1000BASE-X Ethernet",
    607 	  WM_T_82543,		WMP_F_1000X },
    608 
    609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    610 	  "Intel i82543GC 1000BASE-T Ethernet",
    611 	  WM_T_82543,		WMP_F_1000T },
    612 
    613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    614 	  "Intel i82544EI 1000BASE-T Ethernet",
    615 	  WM_T_82544,		WMP_F_1000T },
    616 
    617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    618 	  "Intel i82544EI 1000BASE-X Ethernet",
    619 	  WM_T_82544,		WMP_F_1000X },
    620 
    621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    622 	  "Intel i82544GC 1000BASE-T Ethernet",
    623 	  WM_T_82544,		WMP_F_1000T },
    624 
    625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    626 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    627 	  WM_T_82544,		WMP_F_1000T },
    628 
    629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    630 	  "Intel i82540EM 1000BASE-T Ethernet",
    631 	  WM_T_82540,		WMP_F_1000T },
    632 
    633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    634 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    635 	  WM_T_82540,		WMP_F_1000T },
    636 
    637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    638 	  "Intel i82540EP 1000BASE-T Ethernet",
    639 	  WM_T_82540,		WMP_F_1000T },
    640 
    641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    642 	  "Intel i82540EP 1000BASE-T Ethernet",
    643 	  WM_T_82540,		WMP_F_1000T },
    644 
    645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    646 	  "Intel i82540EP 1000BASE-T Ethernet",
    647 	  WM_T_82540,		WMP_F_1000T },
    648 
    649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    650 	  "Intel i82545EM 1000BASE-T Ethernet",
    651 	  WM_T_82545,		WMP_F_1000T },
    652 
    653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    654 	  "Intel i82545GM 1000BASE-T Ethernet",
    655 	  WM_T_82545_3,		WMP_F_1000T },
    656 
    657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    658 	  "Intel i82545GM 1000BASE-X Ethernet",
    659 	  WM_T_82545_3,		WMP_F_1000X },
    660 #if 0
    661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    662 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    663 	  WM_T_82545_3,		WMP_F_SERDES },
    664 #endif
    665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    666 	  "Intel i82546EB 1000BASE-T Ethernet",
    667 	  WM_T_82546,		WMP_F_1000T },
    668 
    669 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
    670 	  "Intel i82546EB 1000BASE-T Ethernet",
    671 	  WM_T_82546,		WMP_F_1000T },
    672 
    673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    674 	  "Intel i82545EM 1000BASE-X Ethernet",
    675 	  WM_T_82545,		WMP_F_1000X },
    676 
    677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    678 	  "Intel i82546EB 1000BASE-X Ethernet",
    679 	  WM_T_82546,		WMP_F_1000X },
    680 
    681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    682 	  "Intel i82546GB 1000BASE-T Ethernet",
    683 	  WM_T_82546_3,		WMP_F_1000T },
    684 
    685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    686 	  "Intel i82546GB 1000BASE-X Ethernet",
    687 	  WM_T_82546_3,		WMP_F_1000X },
    688 #if 0
    689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    690 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    691 	  WM_T_82546_3,		WMP_F_SERDES },
    692 #endif
    693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    694 	  "i82546GB quad-port Gigabit Ethernet",
    695 	  WM_T_82546_3,		WMP_F_1000T },
    696 
    697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    698 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    699 	  WM_T_82546_3,		WMP_F_1000T },
    700 
    701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    702 	  "Intel PRO/1000MT (82546GB)",
    703 	  WM_T_82546_3,		WMP_F_1000T },
    704 
    705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    706 	  "Intel i82541EI 1000BASE-T Ethernet",
    707 	  WM_T_82541,		WMP_F_1000T },
    708 
    709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    710 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    711 	  WM_T_82541,		WMP_F_1000T },
    712 
    713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    714 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    715 	  WM_T_82541,		WMP_F_1000T },
    716 
    717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    718 	  "Intel i82541ER 1000BASE-T Ethernet",
    719 	  WM_T_82541_2,		WMP_F_1000T },
    720 
    721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    722 	  "Intel i82541GI 1000BASE-T Ethernet",
    723 	  WM_T_82541_2,		WMP_F_1000T },
    724 
    725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    726 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    727 	  WM_T_82541_2,		WMP_F_1000T },
    728 
    729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    730 	  "Intel i82541PI 1000BASE-T Ethernet",
    731 	  WM_T_82541_2,		WMP_F_1000T },
    732 
    733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    734 	  "Intel i82547EI 1000BASE-T Ethernet",
    735 	  WM_T_82547,		WMP_F_1000T },
    736 
    737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    738 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    739 	  WM_T_82547,		WMP_F_1000T },
    740 
    741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    742 	  "Intel i82547GI 1000BASE-T Ethernet",
    743 	  WM_T_82547_2,		WMP_F_1000T },
    744 
    745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    746 	  "Intel PRO/1000 PT (82571EB)",
    747 	  WM_T_82571,		WMP_F_1000T },
    748 
    749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    750 	  "Intel PRO/1000 PF (82571EB)",
    751 	  WM_T_82571,		WMP_F_1000X },
    752 #if 0
    753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    754 	  "Intel PRO/1000 PB (82571EB)",
    755 	  WM_T_82571,		WMP_F_SERDES },
    756 #endif
    757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    758 	  "Intel PRO/1000 QT (82571EB)",
    759 	  WM_T_82571,		WMP_F_1000T },
    760 
    761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    762 	  "Intel i82572EI 1000baseT Ethernet",
    763 	  WM_T_82572,		WMP_F_1000T },
    764 
    765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    766 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    767 	  WM_T_82571,		WMP_F_1000T, },
    768 
    769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    770 	  "Intel i82572EI 1000baseX Ethernet",
    771 	  WM_T_82572,		WMP_F_1000X },
    772 #if 0
    773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    774 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    775 	  WM_T_82572,		WMP_F_SERDES },
    776 #endif
    777 
    778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    779 	  "Intel i82572EI 1000baseT Ethernet",
    780 	  WM_T_82572,		WMP_F_1000T },
    781 
    782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    783 	  "Intel i82573E",
    784 	  WM_T_82573,		WMP_F_1000T },
    785 
    786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    787 	  "Intel i82573E IAMT",
    788 	  WM_T_82573,		WMP_F_1000T },
    789 
    790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    791 	  "Intel i82573L Gigabit Ethernet",
    792 	  WM_T_82573,		WMP_F_1000T },
    793 
    794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    795 	  "i80003 dual 1000baseT Ethernet",
    796 	  WM_T_80003,		WMP_F_1000T },
    797 
    798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
    799 	  "i80003 dual 1000baseX Ethernet",
    800 	  WM_T_80003,		WMP_F_1000T },
    801 #if 0
    802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
    803 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
    804 	  WM_T_80003,		WMP_F_SERDES },
    805 #endif
    806 
    807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
    808 	  "Intel i80003 1000baseT Ethernet",
    809 	  WM_T_80003,		WMP_F_1000T },
    810 #if 0
    811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
    812 	  "Intel i80003 Gigabit Ethernet (SERDES)",
    813 	  WM_T_80003,		WMP_F_SERDES },
    814 #endif
    815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
    816 	  "Intel i82801H (M_AMT) LAN Controller",
    817 	  WM_T_ICH8,		WMP_F_1000T },
    818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
    819 	  "Intel i82801H (AMT) LAN Controller",
    820 	  WM_T_ICH8,		WMP_F_1000T },
    821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
    822 	  "Intel i82801H LAN Controller",
    823 	  WM_T_ICH8,		WMP_F_1000T },
    824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
    825 	  "Intel i82801H (IFE) LAN Controller",
    826 	  WM_T_ICH8,		WMP_F_1000T },
    827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
    828 	  "Intel i82801H (M) LAN Controller",
    829 	  WM_T_ICH8,		WMP_F_1000T },
    830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
    831 	  "Intel i82801H IFE (GT) LAN Controller",
    832 	  WM_T_ICH8,		WMP_F_1000T },
    833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
    834 	  "Intel i82801H IFE (G) LAN Controller",
    835 	  WM_T_ICH8,		WMP_F_1000T },
    836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
    837 	  "82801I (AMT) LAN Controller",
    838 	  WM_T_ICH9,		WMP_F_1000T },
    839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
    840 	  "82801I LAN Controller",
    841 	  WM_T_ICH9,		WMP_F_1000T },
    842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
    843 	  "82801I (G) LAN Controller",
    844 	  WM_T_ICH9,		WMP_F_1000T },
    845 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
    846 	  "82801I (GT) LAN Controller",
    847 	  WM_T_ICH9,		WMP_F_1000T },
    848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
    849 	  "82801I (C) LAN Controller",
    850 	  WM_T_ICH9,		WMP_F_1000T },
    851 	{ 0,			0,
    852 	  NULL,
    853 	  0,			0 },
    854 };
    855 
    856 #ifdef WM_EVENT_COUNTERS
    857 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
    858 #endif /* WM_EVENT_COUNTERS */
    859 
    860 #if 0 /* Not currently used */
    861 static inline uint32_t
    862 wm_io_read(struct wm_softc *sc, int reg)
    863 {
    864 
    865 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    866 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
    867 }
    868 #endif
    869 
    870 static inline void
    871 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
    872 {
    873 
    874 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    875 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
    876 }
    877 
    878 static inline void
    879 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
    880 {
    881 	wa->wa_low = htole32(v & 0xffffffffU);
    882 	if (sizeof(bus_addr_t) == 8)
    883 		wa->wa_high = htole32((uint64_t) v >> 32);
    884 	else
    885 		wa->wa_high = 0;
    886 }
    887 
    888 static const struct wm_product *
    889 wm_lookup(const struct pci_attach_args *pa)
    890 {
    891 	const struct wm_product *wmp;
    892 
    893 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
    894 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
    895 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
    896 			return (wmp);
    897 	}
    898 	return (NULL);
    899 }
    900 
    901 static int
    902 wm_match(struct device *parent, struct cfdata *cf, void *aux)
    903 {
    904 	struct pci_attach_args *pa = aux;
    905 
    906 	if (wm_lookup(pa) != NULL)
    907 		return (1);
    908 
    909 	return (0);
    910 }
    911 
    912 static void
    913 wm_attach(struct device *parent, struct device *self, void *aux)
    914 {
    915 	struct wm_softc *sc = (void *) self;
    916 	struct pci_attach_args *pa = aux;
    917 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    918 	pci_chipset_tag_t pc = pa->pa_pc;
    919 	pci_intr_handle_t ih;
    920 	size_t cdata_size;
    921 	const char *intrstr = NULL;
    922 	const char *eetype;
    923 	bus_space_tag_t memt;
    924 	bus_space_handle_t memh;
    925 	bus_dma_segment_t seg;
    926 	int memh_valid;
    927 	int i, rseg, error;
    928 	const struct wm_product *wmp;
    929 	prop_data_t ea;
    930 	prop_number_t pn;
    931 	uint8_t enaddr[ETHER_ADDR_LEN];
    932 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
    933 	pcireg_t preg, memtype;
    934 	uint32_t reg;
    935 
    936 	callout_init(&sc->sc_tick_ch, 0);
    937 
    938 	wmp = wm_lookup(pa);
    939 	if (wmp == NULL) {
    940 		printf("\n");
    941 		panic("wm_attach: impossible");
    942 	}
    943 
    944 	sc->sc_pc = pa->pa_pc;
    945 	sc->sc_pcitag = pa->pa_tag;
    946 
    947 	if (pci_dma64_available(pa))
    948 		sc->sc_dmat = pa->pa_dmat64;
    949 	else
    950 		sc->sc_dmat = pa->pa_dmat;
    951 
    952 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
    953 	aprint_naive(": Ethernet controller\n");
    954 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
    955 
    956 	sc->sc_type = wmp->wmp_type;
    957 	if (sc->sc_type < WM_T_82543) {
    958 		if (preg < 2) {
    959 			aprint_error("%s: i82542 must be at least rev. 2\n",
    960 			    sc->sc_dev.dv_xname);
    961 			return;
    962 		}
    963 		if (preg < 3)
    964 			sc->sc_type = WM_T_82542_2_0;
    965 	}
    966 
    967 	/*
    968 	 * Map the device.  All devices support memory-mapped acccess,
    969 	 * and it is really required for normal operation.
    970 	 */
    971 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
    972 	switch (memtype) {
    973 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
    974 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
    975 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
    976 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
    977 		break;
    978 	default:
    979 		memh_valid = 0;
    980 	}
    981 
    982 	if (memh_valid) {
    983 		sc->sc_st = memt;
    984 		sc->sc_sh = memh;
    985 	} else {
    986 		aprint_error("%s: unable to map device registers\n",
    987 		    sc->sc_dev.dv_xname);
    988 		return;
    989 	}
    990 
    991 	/*
    992 	 * In addition, i82544 and later support I/O mapped indirect
    993 	 * register access.  It is not desirable (nor supported in
    994 	 * this driver) to use it for normal operation, though it is
    995 	 * required to work around bugs in some chip versions.
    996 	 */
    997 	if (sc->sc_type >= WM_T_82544) {
    998 		/* First we have to find the I/O BAR. */
    999 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1000 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
   1001 			    PCI_MAPREG_TYPE_IO)
   1002 				break;
   1003 		}
   1004 		if (i == PCI_MAPREG_END)
   1005 			aprint_error("%s: WARNING: unable to find I/O BAR\n",
   1006 			    sc->sc_dev.dv_xname);
   1007 		else {
   1008 			/*
   1009 			 * The i8254x doesn't apparently respond when the
   1010 			 * I/O BAR is 0, which looks somewhat like it's not
   1011 			 * been configured.
   1012 			 */
   1013 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1014 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1015 				aprint_error("%s: WARNING: I/O BAR at zero.\n",
   1016 				    sc->sc_dev.dv_xname);
   1017 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1018 					0, &sc->sc_iot, &sc->sc_ioh,
   1019 					NULL, NULL) == 0) {
   1020 				sc->sc_flags |= WM_F_IOH_VALID;
   1021 			} else {
   1022 				aprint_error("%s: WARNING: unable to map "
   1023 				    "I/O space\n", sc->sc_dev.dv_xname);
   1024 			}
   1025 		}
   1026 
   1027 	}
   1028 
   1029 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1030 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1031 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1032 	if (sc->sc_type < WM_T_82542_2_1)
   1033 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1034 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1035 
   1036 	/* power up chip */
   1037 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc,
   1038 	    NULL)) && error != EOPNOTSUPP) {
   1039 		aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname,
   1040 		    error);
   1041 		return;
   1042 	}
   1043 
   1044 	/*
   1045 	 * Map and establish our interrupt.
   1046 	 */
   1047 	if (pci_intr_map(pa, &ih)) {
   1048 		aprint_error("%s: unable to map interrupt\n",
   1049 		    sc->sc_dev.dv_xname);
   1050 		return;
   1051 	}
   1052 	intrstr = pci_intr_string(pc, ih);
   1053 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
   1054 	if (sc->sc_ih == NULL) {
   1055 		aprint_error("%s: unable to establish interrupt",
   1056 		    sc->sc_dev.dv_xname);
   1057 		if (intrstr != NULL)
   1058 			aprint_normal(" at %s", intrstr);
   1059 		aprint_normal("\n");
   1060 		return;
   1061 	}
   1062 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
   1063 
   1064 	/*
   1065 	 * Determine a few things about the bus we're connected to.
   1066 	 */
   1067 	if (sc->sc_type < WM_T_82543) {
   1068 		/* We don't really know the bus characteristics here. */
   1069 		sc->sc_bus_speed = 33;
   1070 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1071 		/*
   1072 		 * CSA (Communication Streaming Architecture) is about as fast
   1073 		 * a 32-bit 66MHz PCI Bus.
   1074 		 */
   1075 		sc->sc_flags |= WM_F_CSA;
   1076 		sc->sc_bus_speed = 66;
   1077 		aprint_verbose("%s: Communication Streaming Architecture\n",
   1078 		    sc->sc_dev.dv_xname);
   1079 		if (sc->sc_type == WM_T_82547) {
   1080 			callout_init(&sc->sc_txfifo_ch, 0);
   1081 			callout_setfunc(&sc->sc_txfifo_ch,
   1082 					wm_82547_txfifo_stall, sc);
   1083 			aprint_verbose("%s: using 82547 Tx FIFO stall "
   1084 				       "work-around\n", sc->sc_dev.dv_xname);
   1085 		}
   1086 	} else if (sc->sc_type >= WM_T_82571) {
   1087 		sc->sc_flags |= WM_F_PCIE;
   1088 		if ((sc->sc_type != WM_T_ICH8) || (sc->sc_type != WM_T_ICH9))
   1089 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
   1090 		aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname);
   1091 	} else {
   1092 		reg = CSR_READ(sc, WMREG_STATUS);
   1093 		if (reg & STATUS_BUS64)
   1094 			sc->sc_flags |= WM_F_BUS64;
   1095 		if (sc->sc_type >= WM_T_82544 &&
   1096 		    (reg & STATUS_PCIX_MODE) != 0) {
   1097 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1098 
   1099 			sc->sc_flags |= WM_F_PCIX;
   1100 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1101 					       PCI_CAP_PCIX,
   1102 					       &sc->sc_pcix_offset, NULL) == 0)
   1103 				aprint_error("%s: unable to find PCIX "
   1104 				    "capability\n", sc->sc_dev.dv_xname);
   1105 			else if (sc->sc_type != WM_T_82545_3 &&
   1106 				 sc->sc_type != WM_T_82546_3) {
   1107 				/*
   1108 				 * Work around a problem caused by the BIOS
   1109 				 * setting the max memory read byte count
   1110 				 * incorrectly.
   1111 				 */
   1112 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1113 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
   1114 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1115 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
   1116 
   1117 				bytecnt =
   1118 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
   1119 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
   1120 				maxb =
   1121 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
   1122 				    PCI_PCIX_STATUS_MAXB_SHIFT;
   1123 				if (bytecnt > maxb) {
   1124 					aprint_verbose("%s: resetting PCI-X "
   1125 					    "MMRBC: %d -> %d\n",
   1126 					    sc->sc_dev.dv_xname,
   1127 					    512 << bytecnt, 512 << maxb);
   1128 					pcix_cmd = (pcix_cmd &
   1129 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
   1130 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
   1131 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1132 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
   1133 					    pcix_cmd);
   1134 				}
   1135 			}
   1136 		}
   1137 		/*
   1138 		 * The quad port adapter is special; it has a PCIX-PCIX
   1139 		 * bridge on the board, and can run the secondary bus at
   1140 		 * a higher speed.
   1141 		 */
   1142 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1143 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1144 								      : 66;
   1145 		} else if (sc->sc_flags & WM_F_PCIX) {
   1146 			switch (reg & STATUS_PCIXSPD_MASK) {
   1147 			case STATUS_PCIXSPD_50_66:
   1148 				sc->sc_bus_speed = 66;
   1149 				break;
   1150 			case STATUS_PCIXSPD_66_100:
   1151 				sc->sc_bus_speed = 100;
   1152 				break;
   1153 			case STATUS_PCIXSPD_100_133:
   1154 				sc->sc_bus_speed = 133;
   1155 				break;
   1156 			default:
   1157 				aprint_error(
   1158 				    "%s: unknown PCIXSPD %d; assuming 66MHz\n",
   1159 				    sc->sc_dev.dv_xname,
   1160 				    reg & STATUS_PCIXSPD_MASK);
   1161 				sc->sc_bus_speed = 66;
   1162 			}
   1163 		} else
   1164 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1165 		aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
   1166 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1167 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1168 	}
   1169 
   1170 	/*
   1171 	 * Allocate the control data structures, and create and load the
   1172 	 * DMA map for it.
   1173 	 *
   1174 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   1175 	 * memory.  So must Rx descriptors.  We simplify by allocating
   1176 	 * both sets within the same 4G segment.
   1177 	 */
   1178 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
   1179 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
   1180 	cdata_size = sc->sc_type < WM_T_82544 ?
   1181 	    sizeof(struct wm_control_data_82542) :
   1182 	    sizeof(struct wm_control_data_82544);
   1183 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
   1184 				      (bus_size_t) 0x100000000ULL,
   1185 				      &seg, 1, &rseg, 0)) != 0) {
   1186 		aprint_error(
   1187 		    "%s: unable to allocate control data, error = %d\n",
   1188 		    sc->sc_dev.dv_xname, error);
   1189 		goto fail_0;
   1190 	}
   1191 
   1192 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
   1193 				    (void **)&sc->sc_control_data,
   1194 				    BUS_DMA_COHERENT)) != 0) {
   1195 		aprint_error("%s: unable to map control data, error = %d\n",
   1196 		    sc->sc_dev.dv_xname, error);
   1197 		goto fail_1;
   1198 	}
   1199 
   1200 	if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
   1201 				       0, 0, &sc->sc_cddmamap)) != 0) {
   1202 		aprint_error("%s: unable to create control data DMA map, "
   1203 		    "error = %d\n", sc->sc_dev.dv_xname, error);
   1204 		goto fail_2;
   1205 	}
   1206 
   1207 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
   1208 				     sc->sc_control_data, cdata_size, NULL,
   1209 				     0)) != 0) {
   1210 		aprint_error(
   1211 		    "%s: unable to load control data DMA map, error = %d\n",
   1212 		    sc->sc_dev.dv_xname, error);
   1213 		goto fail_3;
   1214 	}
   1215 
   1216 
   1217 	/*
   1218 	 * Create the transmit buffer DMA maps.
   1219 	 */
   1220 	WM_TXQUEUELEN(sc) =
   1221 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   1222 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   1223 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1224 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   1225 					       WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   1226 					  &sc->sc_txsoft[i].txs_dmamap)) != 0) {
   1227 			aprint_error("%s: unable to create Tx DMA map %d, "
   1228 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
   1229 			goto fail_4;
   1230 		}
   1231 	}
   1232 
   1233 	/*
   1234 	 * Create the receive buffer DMA maps.
   1235 	 */
   1236 	for (i = 0; i < WM_NRXDESC; i++) {
   1237 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   1238 					       MCLBYTES, 0, 0,
   1239 					  &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
   1240 			aprint_error("%s: unable to create Rx DMA map %d, "
   1241 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
   1242 			goto fail_5;
   1243 		}
   1244 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1245 	}
   1246 
   1247 	/* clear interesting stat counters */
   1248 	CSR_READ(sc, WMREG_COLC);
   1249 	CSR_READ(sc, WMREG_RXERRC);
   1250 
   1251 	/*
   1252 	 * Reset the chip to a known state.
   1253 	 */
   1254 	wm_reset(sc);
   1255 
   1256 	/*
   1257 	 * Get some information about the EEPROM.
   1258 	 */
   1259 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
   1260 		uint32_t flash_size;
   1261 		sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
   1262 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1263 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1264 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
   1265 			printf("%s: can't map FLASH registers\n",
   1266 			    sc->sc_dev.dv_xname);
   1267 			return;
   1268 		}
   1269 		flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1270 		sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
   1271 						ICH_FLASH_SECTOR_SIZE;
   1272 		sc->sc_ich8_flash_bank_size =
   1273 			((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1274 		sc->sc_ich8_flash_bank_size -=
   1275 			(flash_size & ICH_GFPREG_BASE_MASK);
   1276 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1277 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1278 	} else if (sc->sc_type == WM_T_80003)
   1279 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR |  WM_F_SWFW_SYNC;
   1280 	else if (sc->sc_type == WM_T_82573)
   1281 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1282 	else if (sc->sc_type > WM_T_82544)
   1283 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
   1284 
   1285 	if (sc->sc_type <= WM_T_82544)
   1286 		sc->sc_ee_addrbits = 6;
   1287 	else if (sc->sc_type <= WM_T_82546_3) {
   1288 		reg = CSR_READ(sc, WMREG_EECD);
   1289 		if (reg & EECD_EE_SIZE)
   1290 			sc->sc_ee_addrbits = 8;
   1291 		else
   1292 			sc->sc_ee_addrbits = 6;
   1293 	} else if (sc->sc_type <= WM_T_82547_2) {
   1294 		reg = CSR_READ(sc, WMREG_EECD);
   1295 		if (reg & EECD_EE_TYPE) {
   1296 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1297 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   1298 		} else
   1299 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
   1300 	} else if ((sc->sc_type == WM_T_82573) &&
   1301 	    (wm_is_onboard_nvm_eeprom(sc) == 0)) {
   1302 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   1303 	} else {
   1304 		/* Assume everything else is SPI. */
   1305 		reg = CSR_READ(sc, WMREG_EECD);
   1306 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1307 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   1308 	}
   1309 
   1310 	/*
   1311 	 * Defer printing the EEPROM type until after verifying the checksum
   1312 	 * This allows the EEPROM type to be printed correctly in the case
   1313 	 * that no EEPROM is attached.
   1314 	 */
   1315 
   1316 
   1317 	/*
   1318 	 * Validate the EEPROM checksum. If the checksum fails, flag this for
   1319 	 * later, so we can fail future reads from the EEPROM.
   1320 	 */
   1321 	if (wm_validate_eeprom_checksum(sc))
   1322 		sc->sc_flags |= WM_F_EEPROM_INVALID;
   1323 
   1324 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1325 		aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname);
   1326 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   1327 		aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname);
   1328 	} else {
   1329 		if (sc->sc_flags & WM_F_EEPROM_SPI)
   1330 			eetype = "SPI";
   1331 		else
   1332 			eetype = "MicroWire";
   1333 		aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
   1334 		    sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
   1335 		    sc->sc_ee_addrbits, eetype);
   1336 	}
   1337 
   1338 	/*
   1339 	 * Read the Ethernet address from the EEPROM, if not first found
   1340 	 * in device properties.
   1341 	 */
   1342 	ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
   1343 	if (ea != NULL) {
   1344 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   1345 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   1346 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   1347 	} else {
   1348 		if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
   1349 		    sizeof(myea) / sizeof(myea[0]), myea)) {
   1350 			aprint_error("%s: unable to read Ethernet address\n",
   1351 			    sc->sc_dev.dv_xname);
   1352 			return;
   1353 		}
   1354 		enaddr[0] = myea[0] & 0xff;
   1355 		enaddr[1] = myea[0] >> 8;
   1356 		enaddr[2] = myea[1] & 0xff;
   1357 		enaddr[3] = myea[1] >> 8;
   1358 		enaddr[4] = myea[2] & 0xff;
   1359 		enaddr[5] = myea[2] >> 8;
   1360 	}
   1361 
   1362 	/*
   1363 	 * Toggle the LSB of the MAC address on the second port
   1364 	 * of the dual port controller.
   1365 	 */
   1366 	if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
   1367 	    || sc->sc_type ==  WM_T_82571 || sc->sc_type == WM_T_80003) {
   1368 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
   1369 			enaddr[5] ^= 1;
   1370 	}
   1371 
   1372 	aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
   1373 	    ether_sprintf(enaddr));
   1374 
   1375 	/*
   1376 	 * Read the config info from the EEPROM, and set up various
   1377 	 * bits in the control registers based on their contents.
   1378 	 */
   1379 	pn = prop_dictionary_get(device_properties(&sc->sc_dev),
   1380 				 "i82543-cfg1");
   1381 	if (pn != NULL) {
   1382 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1383 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   1384 	} else {
   1385 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
   1386 			aprint_error("%s: unable to read CFG1\n",
   1387 			    sc->sc_dev.dv_xname);
   1388 			return;
   1389 		}
   1390 	}
   1391 
   1392 	pn = prop_dictionary_get(device_properties(&sc->sc_dev),
   1393 				 "i82543-cfg2");
   1394 	if (pn != NULL) {
   1395 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1396 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   1397 	} else {
   1398 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
   1399 			aprint_error("%s: unable to read CFG2\n",
   1400 			    sc->sc_dev.dv_xname);
   1401 			return;
   1402 		}
   1403 	}
   1404 
   1405 	if (sc->sc_type >= WM_T_82544) {
   1406 		pn = prop_dictionary_get(device_properties(&sc->sc_dev),
   1407 					 "i82543-swdpin");
   1408 		if (pn != NULL) {
   1409 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   1410 			swdpin = (uint16_t) prop_number_integer_value(pn);
   1411 		} else {
   1412 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
   1413 				aprint_error("%s: unable to read SWDPIN\n",
   1414 				    sc->sc_dev.dv_xname);
   1415 				return;
   1416 			}
   1417 		}
   1418 	}
   1419 
   1420 	if (cfg1 & EEPROM_CFG1_ILOS)
   1421 		sc->sc_ctrl |= CTRL_ILOS;
   1422 	if (sc->sc_type >= WM_T_82544) {
   1423 		sc->sc_ctrl |=
   1424 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1425 		    CTRL_SWDPIO_SHIFT;
   1426 		sc->sc_ctrl |=
   1427 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1428 		    CTRL_SWDPINS_SHIFT;
   1429 	} else {
   1430 		sc->sc_ctrl |=
   1431 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1432 		    CTRL_SWDPIO_SHIFT;
   1433 	}
   1434 
   1435 #if 0
   1436 	if (sc->sc_type >= WM_T_82544) {
   1437 		if (cfg1 & EEPROM_CFG1_IPS0)
   1438 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1439 		if (cfg1 & EEPROM_CFG1_IPS1)
   1440 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1441 		sc->sc_ctrl_ext |=
   1442 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1443 		    CTRL_EXT_SWDPIO_SHIFT;
   1444 		sc->sc_ctrl_ext |=
   1445 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1446 		    CTRL_EXT_SWDPINS_SHIFT;
   1447 	} else {
   1448 		sc->sc_ctrl_ext |=
   1449 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1450 		    CTRL_EXT_SWDPIO_SHIFT;
   1451 	}
   1452 #endif
   1453 
   1454 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1455 #if 0
   1456 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1457 #endif
   1458 
   1459 	/*
   1460 	 * Set up some register offsets that are different between
   1461 	 * the i82542 and the i82543 and later chips.
   1462 	 */
   1463 	if (sc->sc_type < WM_T_82543) {
   1464 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1465 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1466 	} else {
   1467 		sc->sc_rdt_reg = WMREG_RDT;
   1468 		sc->sc_tdt_reg = WMREG_TDT;
   1469 	}
   1470 
   1471 	/*
   1472 	 * Determine if we're TBI or GMII mode, and initialize the
   1473 	 * media structures accordingly.
   1474 	 */
   1475 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   1476 	    || sc->sc_type == WM_T_82573) {
   1477 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   1478 		wm_gmii_mediainit(sc);
   1479 	} else if (sc->sc_type < WM_T_82543 ||
   1480 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   1481 		if (wmp->wmp_flags & WMP_F_1000T)
   1482 			aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
   1483 			    "product!\n", sc->sc_dev.dv_xname);
   1484 		wm_tbi_mediainit(sc);
   1485 	} else {
   1486 		if (wmp->wmp_flags & WMP_F_1000X)
   1487 			aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
   1488 			    "product!\n", sc->sc_dev.dv_xname);
   1489 		wm_gmii_mediainit(sc);
   1490 	}
   1491 
   1492 	ifp = &sc->sc_ethercom.ec_if;
   1493 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
   1494 	ifp->if_softc = sc;
   1495 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1496 	ifp->if_ioctl = wm_ioctl;
   1497 	ifp->if_start = wm_start;
   1498 	ifp->if_watchdog = wm_watchdog;
   1499 	ifp->if_init = wm_init;
   1500 	ifp->if_stop = wm_stop;
   1501 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   1502 	IFQ_SET_READY(&ifp->if_snd);
   1503 
   1504 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
   1505 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1506 
   1507 	/*
   1508 	 * If we're a i82543 or greater, we can support VLANs.
   1509 	 */
   1510 	if (sc->sc_type >= WM_T_82543)
   1511 		sc->sc_ethercom.ec_capabilities |=
   1512 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
   1513 
   1514 	/*
   1515 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   1516 	 * on i82543 and later.
   1517 	 */
   1518 	if (sc->sc_type >= WM_T_82543) {
   1519 		ifp->if_capabilities |=
   1520 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   1521 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   1522 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   1523 		    IFCAP_CSUM_TCPv6_Tx |
   1524 		    IFCAP_CSUM_UDPv6_Tx;
   1525 	}
   1526 
   1527 	/*
   1528 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   1529 	 *
   1530 	 *	82541GI (8086:1076) ... no
   1531 	 *	82572EI (8086:10b9) ... yes
   1532 	 */
   1533 	if (sc->sc_type >= WM_T_82571) {
   1534 		ifp->if_capabilities |=
   1535 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   1536 	}
   1537 
   1538 	/*
   1539 	 * If we're a i82544 or greater (except i82547), we can do
   1540 	 * TCP segmentation offload.
   1541 	 */
   1542 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   1543 		ifp->if_capabilities |= IFCAP_TSOv4;
   1544 	}
   1545 
   1546 	if (sc->sc_type >= WM_T_82571) {
   1547 		ifp->if_capabilities |= IFCAP_TSOv6;
   1548 	}
   1549 
   1550 	/*
   1551 	 * Attach the interface.
   1552 	 */
   1553 	if_attach(ifp);
   1554 	ether_ifattach(ifp, enaddr);
   1555 #if NRND > 0
   1556 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
   1557 	    RND_TYPE_NET, 0);
   1558 #endif
   1559 
   1560 #ifdef WM_EVENT_COUNTERS
   1561 	/* Attach event counters. */
   1562 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   1563 	    NULL, sc->sc_dev.dv_xname, "txsstall");
   1564 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   1565 	    NULL, sc->sc_dev.dv_xname, "txdstall");
   1566 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   1567 	    NULL, sc->sc_dev.dv_xname, "txfifo_stall");
   1568 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   1569 	    NULL, sc->sc_dev.dv_xname, "txdw");
   1570 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   1571 	    NULL, sc->sc_dev.dv_xname, "txqe");
   1572 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   1573 	    NULL, sc->sc_dev.dv_xname, "rxintr");
   1574 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   1575 	    NULL, sc->sc_dev.dv_xname, "linkintr");
   1576 
   1577 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   1578 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
   1579 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   1580 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
   1581 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   1582 	    NULL, sc->sc_dev.dv_xname, "txipsum");
   1583 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   1584 	    NULL, sc->sc_dev.dv_xname, "txtusum");
   1585 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   1586 	    NULL, sc->sc_dev.dv_xname, "txtusum6");
   1587 
   1588 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   1589 	    NULL, sc->sc_dev.dv_xname, "txtso");
   1590 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   1591 	    NULL, sc->sc_dev.dv_xname, "txtso6");
   1592 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   1593 	    NULL, sc->sc_dev.dv_xname, "txtsopain");
   1594 
   1595 	for (i = 0; i < WM_NTXSEGS; i++) {
   1596 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
   1597 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   1598 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
   1599 	}
   1600 
   1601 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   1602 	    NULL, sc->sc_dev.dv_xname, "txdrop");
   1603 
   1604 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   1605 	    NULL, sc->sc_dev.dv_xname, "tu");
   1606 
   1607 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   1608 	    NULL, sc->sc_dev.dv_xname, "tx_xoff");
   1609 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   1610 	    NULL, sc->sc_dev.dv_xname, "tx_xon");
   1611 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   1612 	    NULL, sc->sc_dev.dv_xname, "rx_xoff");
   1613 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   1614 	    NULL, sc->sc_dev.dv_xname, "rx_xon");
   1615 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   1616 	    NULL, sc->sc_dev.dv_xname, "rx_macctl");
   1617 #endif /* WM_EVENT_COUNTERS */
   1618 
   1619 	if (!pmf_device_register(self, NULL, NULL))
   1620 		aprint_error_dev(self, "couldn't establish power handler\n");
   1621 	else
   1622 		pmf_class_network_register(self, ifp);
   1623 
   1624 	return;
   1625 
   1626 	/*
   1627 	 * Free any resources we've allocated during the failed attach
   1628 	 * attempt.  Do this in reverse order and fall through.
   1629 	 */
   1630  fail_5:
   1631 	for (i = 0; i < WM_NRXDESC; i++) {
   1632 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   1633 			bus_dmamap_destroy(sc->sc_dmat,
   1634 			    sc->sc_rxsoft[i].rxs_dmamap);
   1635 	}
   1636  fail_4:
   1637 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   1638 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   1639 			bus_dmamap_destroy(sc->sc_dmat,
   1640 			    sc->sc_txsoft[i].txs_dmamap);
   1641 	}
   1642 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   1643  fail_3:
   1644 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   1645  fail_2:
   1646 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
   1647 	    cdata_size);
   1648  fail_1:
   1649 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
   1650  fail_0:
   1651 	return;
   1652 }
   1653 
   1654 /*
   1655  * wm_tx_offload:
   1656  *
   1657  *	Set up TCP/IP checksumming parameters for the
   1658  *	specified packet.
   1659  */
   1660 static int
   1661 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   1662     uint8_t *fieldsp)
   1663 {
   1664 	struct mbuf *m0 = txs->txs_mbuf;
   1665 	struct livengood_tcpip_ctxdesc *t;
   1666 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   1667 	uint32_t ipcse;
   1668 	struct ether_header *eh;
   1669 	int offset, iphl;
   1670 	uint8_t fields;
   1671 
   1672 	/*
   1673 	 * XXX It would be nice if the mbuf pkthdr had offset
   1674 	 * fields for the protocol headers.
   1675 	 */
   1676 
   1677 	eh = mtod(m0, struct ether_header *);
   1678 	switch (htons(eh->ether_type)) {
   1679 	case ETHERTYPE_IP:
   1680 	case ETHERTYPE_IPV6:
   1681 		offset = ETHER_HDR_LEN;
   1682 		break;
   1683 
   1684 	case ETHERTYPE_VLAN:
   1685 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1686 		break;
   1687 
   1688 	default:
   1689 		/*
   1690 		 * Don't support this protocol or encapsulation.
   1691 		 */
   1692 		*fieldsp = 0;
   1693 		*cmdp = 0;
   1694 		return (0);
   1695 	}
   1696 
   1697 	if ((m0->m_pkthdr.csum_flags &
   1698 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   1699 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   1700 	} else {
   1701 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   1702 	}
   1703 	ipcse = offset + iphl - 1;
   1704 
   1705 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   1706 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   1707 	seg = 0;
   1708 	fields = 0;
   1709 
   1710 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   1711 		int hlen = offset + iphl;
   1712 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   1713 
   1714 		if (__predict_false(m0->m_len <
   1715 				    (hlen + sizeof(struct tcphdr)))) {
   1716 			/*
   1717 			 * TCP/IP headers are not in the first mbuf; we need
   1718 			 * to do this the slow and painful way.  Let's just
   1719 			 * hope this doesn't happen very often.
   1720 			 */
   1721 			struct tcphdr th;
   1722 
   1723 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   1724 
   1725 			m_copydata(m0, hlen, sizeof(th), &th);
   1726 			if (v4) {
   1727 				struct ip ip;
   1728 
   1729 				m_copydata(m0, offset, sizeof(ip), &ip);
   1730 				ip.ip_len = 0;
   1731 				m_copyback(m0,
   1732 				    offset + offsetof(struct ip, ip_len),
   1733 				    sizeof(ip.ip_len), &ip.ip_len);
   1734 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   1735 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   1736 			} else {
   1737 				struct ip6_hdr ip6;
   1738 
   1739 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   1740 				ip6.ip6_plen = 0;
   1741 				m_copyback(m0,
   1742 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   1743 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   1744 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   1745 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   1746 			}
   1747 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   1748 			    sizeof(th.th_sum), &th.th_sum);
   1749 
   1750 			hlen += th.th_off << 2;
   1751 		} else {
   1752 			/*
   1753 			 * TCP/IP headers are in the first mbuf; we can do
   1754 			 * this the easy way.
   1755 			 */
   1756 			struct tcphdr *th;
   1757 
   1758 			if (v4) {
   1759 				struct ip *ip =
   1760 				    (void *)(mtod(m0, char *) + offset);
   1761 				th = (void *)(mtod(m0, char *) + hlen);
   1762 
   1763 				ip->ip_len = 0;
   1764 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   1765 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   1766 			} else {
   1767 				struct ip6_hdr *ip6 =
   1768 				    (void *)(mtod(m0, char *) + offset);
   1769 				th = (void *)(mtod(m0, char *) + hlen);
   1770 
   1771 				ip6->ip6_plen = 0;
   1772 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   1773 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   1774 			}
   1775 			hlen += th->th_off << 2;
   1776 		}
   1777 
   1778 		if (v4) {
   1779 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   1780 			cmdlen |= WTX_TCPIP_CMD_IP;
   1781 		} else {
   1782 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   1783 			ipcse = 0;
   1784 		}
   1785 		cmd |= WTX_TCPIP_CMD_TSE;
   1786 		cmdlen |= WTX_TCPIP_CMD_TSE |
   1787 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   1788 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   1789 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   1790 	}
   1791 
   1792 	/*
   1793 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   1794 	 * offload feature, if we load the context descriptor, we
   1795 	 * MUST provide valid values for IPCSS and TUCSS fields.
   1796 	 */
   1797 
   1798 	ipcs = WTX_TCPIP_IPCSS(offset) |
   1799 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1800 	    WTX_TCPIP_IPCSE(ipcse);
   1801 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   1802 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   1803 		fields |= WTX_IXSM;
   1804 	}
   1805 
   1806 	offset += iphl;
   1807 
   1808 	if (m0->m_pkthdr.csum_flags &
   1809 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   1810 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   1811 		fields |= WTX_TXSM;
   1812 		tucs = WTX_TCPIP_TUCSS(offset) |
   1813 		    WTX_TCPIP_TUCSO(offset +
   1814 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   1815 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   1816 	} else if ((m0->m_pkthdr.csum_flags &
   1817 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   1818 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   1819 		fields |= WTX_TXSM;
   1820 		tucs = WTX_TCPIP_TUCSS(offset) |
   1821 		    WTX_TCPIP_TUCSO(offset +
   1822 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   1823 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   1824 	} else {
   1825 		/* Just initialize it to a valid TCP context. */
   1826 		tucs = WTX_TCPIP_TUCSS(offset) |
   1827 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   1828 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   1829 	}
   1830 
   1831 	/* Fill in the context descriptor. */
   1832 	t = (struct livengood_tcpip_ctxdesc *)
   1833 	    &sc->sc_txdescs[sc->sc_txnext];
   1834 	t->tcpip_ipcs = htole32(ipcs);
   1835 	t->tcpip_tucs = htole32(tucs);
   1836 	t->tcpip_cmdlen = htole32(cmdlen);
   1837 	t->tcpip_seg = htole32(seg);
   1838 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   1839 
   1840 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
   1841 	txs->txs_ndesc++;
   1842 
   1843 	*cmdp = cmd;
   1844 	*fieldsp = fields;
   1845 
   1846 	return (0);
   1847 }
   1848 
   1849 static void
   1850 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   1851 {
   1852 	struct mbuf *m;
   1853 	int i;
   1854 
   1855 	log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname);
   1856 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   1857 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   1858 		    "m_flags = 0x%08x\n", sc->sc_dev.dv_xname,
   1859 		    m->m_data, m->m_len, m->m_flags);
   1860 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname,
   1861 	    i, i == 1 ? "" : "s");
   1862 }
   1863 
   1864 /*
   1865  * wm_82547_txfifo_stall:
   1866  *
   1867  *	Callout used to wait for the 82547 Tx FIFO to drain,
   1868  *	reset the FIFO pointers, and restart packet transmission.
   1869  */
   1870 static void
   1871 wm_82547_txfifo_stall(void *arg)
   1872 {
   1873 	struct wm_softc *sc = arg;
   1874 	int s;
   1875 
   1876 	s = splnet();
   1877 
   1878 	if (sc->sc_txfifo_stall) {
   1879 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
   1880 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   1881 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   1882 			/*
   1883 			 * Packets have drained.  Stop transmitter, reset
   1884 			 * FIFO pointers, restart transmitter, and kick
   1885 			 * the packet queue.
   1886 			 */
   1887 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   1888 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   1889 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
   1890 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
   1891 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
   1892 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
   1893 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   1894 			CSR_WRITE_FLUSH(sc);
   1895 
   1896 			sc->sc_txfifo_head = 0;
   1897 			sc->sc_txfifo_stall = 0;
   1898 			wm_start(&sc->sc_ethercom.ec_if);
   1899 		} else {
   1900 			/*
   1901 			 * Still waiting for packets to drain; try again in
   1902 			 * another tick.
   1903 			 */
   1904 			callout_schedule(&sc->sc_txfifo_ch, 1);
   1905 		}
   1906 	}
   1907 
   1908 	splx(s);
   1909 }
   1910 
   1911 /*
   1912  * wm_82547_txfifo_bugchk:
   1913  *
   1914  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   1915  *	prevent enqueueing a packet that would wrap around the end
   1916  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   1917  *
   1918  *	We do this by checking the amount of space before the end
   1919  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   1920  *	the Tx FIFO, wait for all remaining packets to drain, reset
   1921  *	the internal FIFO pointers to the beginning, and restart
   1922  *	transmission on the interface.
   1923  */
   1924 #define	WM_FIFO_HDR		0x10
   1925 #define	WM_82547_PAD_LEN	0x3e0
   1926 static int
   1927 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   1928 {
   1929 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
   1930 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   1931 
   1932 	/* Just return if already stalled. */
   1933 	if (sc->sc_txfifo_stall)
   1934 		return (1);
   1935 
   1936 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   1937 		/* Stall only occurs in half-duplex mode. */
   1938 		goto send_packet;
   1939 	}
   1940 
   1941 	if (len >= WM_82547_PAD_LEN + space) {
   1942 		sc->sc_txfifo_stall = 1;
   1943 		callout_schedule(&sc->sc_txfifo_ch, 1);
   1944 		return (1);
   1945 	}
   1946 
   1947  send_packet:
   1948 	sc->sc_txfifo_head += len;
   1949 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
   1950 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
   1951 
   1952 	return (0);
   1953 }
   1954 
   1955 /*
   1956  * wm_start:		[ifnet interface function]
   1957  *
   1958  *	Start packet transmission on the interface.
   1959  */
   1960 static void
   1961 wm_start(struct ifnet *ifp)
   1962 {
   1963 	struct wm_softc *sc = ifp->if_softc;
   1964 	struct mbuf *m0;
   1965 #if 0 /* XXXJRT */
   1966 	struct m_tag *mtag;
   1967 #endif
   1968 	struct wm_txsoft *txs;
   1969 	bus_dmamap_t dmamap;
   1970 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   1971 	bus_addr_t curaddr;
   1972 	bus_size_t seglen, curlen;
   1973 	uint32_t cksumcmd;
   1974 	uint8_t cksumfields;
   1975 
   1976 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   1977 		return;
   1978 
   1979 	/*
   1980 	 * Remember the previous number of free descriptors.
   1981 	 */
   1982 	ofree = sc->sc_txfree;
   1983 
   1984 	/*
   1985 	 * Loop through the send queue, setting up transmit descriptors
   1986 	 * until we drain the queue, or use up all available transmit
   1987 	 * descriptors.
   1988 	 */
   1989 	for (;;) {
   1990 		/* Grab a packet off the queue. */
   1991 		IFQ_POLL(&ifp->if_snd, m0);
   1992 		if (m0 == NULL)
   1993 			break;
   1994 
   1995 		DPRINTF(WM_DEBUG_TX,
   1996 		    ("%s: TX: have packet to transmit: %p\n",
   1997 		    sc->sc_dev.dv_xname, m0));
   1998 
   1999 		/* Get a work queue entry. */
   2000 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
   2001 			wm_txintr(sc);
   2002 			if (sc->sc_txsfree == 0) {
   2003 				DPRINTF(WM_DEBUG_TX,
   2004 				    ("%s: TX: no free job descriptors\n",
   2005 					sc->sc_dev.dv_xname));
   2006 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   2007 				break;
   2008 			}
   2009 		}
   2010 
   2011 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   2012 		dmamap = txs->txs_dmamap;
   2013 
   2014 		use_tso = (m0->m_pkthdr.csum_flags &
   2015 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   2016 
   2017 		/*
   2018 		 * So says the Linux driver:
   2019 		 * The controller does a simple calculation to make sure
   2020 		 * there is enough room in the FIFO before initiating the
   2021 		 * DMA for each buffer.  The calc is:
   2022 		 *	4 = ceil(buffer len / MSS)
   2023 		 * To make sure we don't overrun the FIFO, adjust the max
   2024 		 * buffer len if the MSS drops.
   2025 		 */
   2026 		dmamap->dm_maxsegsz =
   2027 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   2028 		    ? m0->m_pkthdr.segsz << 2
   2029 		    : WTX_MAX_LEN;
   2030 
   2031 		/*
   2032 		 * Load the DMA map.  If this fails, the packet either
   2033 		 * didn't fit in the allotted number of segments, or we
   2034 		 * were short on resources.  For the too-many-segments
   2035 		 * case, we simply report an error and drop the packet,
   2036 		 * since we can't sanely copy a jumbo packet to a single
   2037 		 * buffer.
   2038 		 */
   2039 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   2040 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   2041 		if (error) {
   2042 			if (error == EFBIG) {
   2043 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   2044 				log(LOG_ERR, "%s: Tx packet consumes too many "
   2045 				    "DMA segments, dropping...\n",
   2046 				    sc->sc_dev.dv_xname);
   2047 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   2048 				wm_dump_mbuf_chain(sc, m0);
   2049 				m_freem(m0);
   2050 				continue;
   2051 			}
   2052 			/*
   2053 			 * Short on resources, just stop for now.
   2054 			 */
   2055 			DPRINTF(WM_DEBUG_TX,
   2056 			    ("%s: TX: dmamap load failed: %d\n",
   2057 			    sc->sc_dev.dv_xname, error));
   2058 			break;
   2059 		}
   2060 
   2061 		segs_needed = dmamap->dm_nsegs;
   2062 		if (use_tso) {
   2063 			/* For sentinel descriptor; see below. */
   2064 			segs_needed++;
   2065 		}
   2066 
   2067 		/*
   2068 		 * Ensure we have enough descriptors free to describe
   2069 		 * the packet.  Note, we always reserve one descriptor
   2070 		 * at the end of the ring due to the semantics of the
   2071 		 * TDT register, plus one more in the event we need
   2072 		 * to load offload context.
   2073 		 */
   2074 		if (segs_needed > sc->sc_txfree - 2) {
   2075 			/*
   2076 			 * Not enough free descriptors to transmit this
   2077 			 * packet.  We haven't committed anything yet,
   2078 			 * so just unload the DMA map, put the packet
   2079 			 * pack on the queue, and punt.  Notify the upper
   2080 			 * layer that there are no more slots left.
   2081 			 */
   2082 			DPRINTF(WM_DEBUG_TX,
   2083 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   2084 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed,
   2085 			    sc->sc_txfree - 1));
   2086 			ifp->if_flags |= IFF_OACTIVE;
   2087 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   2088 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   2089 			break;
   2090 		}
   2091 
   2092 		/*
   2093 		 * Check for 82547 Tx FIFO bug.  We need to do this
   2094 		 * once we know we can transmit the packet, since we
   2095 		 * do some internal FIFO space accounting here.
   2096 		 */
   2097 		if (sc->sc_type == WM_T_82547 &&
   2098 		    wm_82547_txfifo_bugchk(sc, m0)) {
   2099 			DPRINTF(WM_DEBUG_TX,
   2100 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   2101 			    sc->sc_dev.dv_xname));
   2102 			ifp->if_flags |= IFF_OACTIVE;
   2103 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   2104 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   2105 			break;
   2106 		}
   2107 
   2108 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   2109 
   2110 		/*
   2111 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   2112 		 */
   2113 
   2114 		DPRINTF(WM_DEBUG_TX,
   2115 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   2116 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed));
   2117 
   2118 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   2119 
   2120 		/*
   2121 		 * Store a pointer to the packet so that we can free it
   2122 		 * later.
   2123 		 *
   2124 		 * Initially, we consider the number of descriptors the
   2125 		 * packet uses the number of DMA segments.  This may be
   2126 		 * incremented by 1 if we do checksum offload (a descriptor
   2127 		 * is used to set the checksum context).
   2128 		 */
   2129 		txs->txs_mbuf = m0;
   2130 		txs->txs_firstdesc = sc->sc_txnext;
   2131 		txs->txs_ndesc = segs_needed;
   2132 
   2133 		/* Set up offload parameters for this packet. */
   2134 		if (m0->m_pkthdr.csum_flags &
   2135 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   2136 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   2137 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   2138 			if (wm_tx_offload(sc, txs, &cksumcmd,
   2139 					  &cksumfields) != 0) {
   2140 				/* Error message already displayed. */
   2141 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   2142 				continue;
   2143 			}
   2144 		} else {
   2145 			cksumcmd = 0;
   2146 			cksumfields = 0;
   2147 		}
   2148 
   2149 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   2150 
   2151 		/* Sync the DMA map. */
   2152 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   2153 		    BUS_DMASYNC_PREWRITE);
   2154 
   2155 		/*
   2156 		 * Initialize the transmit descriptor.
   2157 		 */
   2158 		for (nexttx = sc->sc_txnext, seg = 0;
   2159 		     seg < dmamap->dm_nsegs; seg++) {
   2160 			for (seglen = dmamap->dm_segs[seg].ds_len,
   2161 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   2162 			     seglen != 0;
   2163 			     curaddr += curlen, seglen -= curlen,
   2164 			     nexttx = WM_NEXTTX(sc, nexttx)) {
   2165 				curlen = seglen;
   2166 
   2167 				/*
   2168 				 * So says the Linux driver:
   2169 				 * Work around for premature descriptor
   2170 				 * write-backs in TSO mode.  Append a
   2171 				 * 4-byte sentinel descriptor.
   2172 				 */
   2173 				if (use_tso &&
   2174 				    seg == dmamap->dm_nsegs - 1 &&
   2175 				    curlen > 8)
   2176 					curlen -= 4;
   2177 
   2178 				wm_set_dma_addr(
   2179 				    &sc->sc_txdescs[nexttx].wtx_addr,
   2180 				    curaddr);
   2181 				sc->sc_txdescs[nexttx].wtx_cmdlen =
   2182 				    htole32(cksumcmd | curlen);
   2183 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
   2184 				    0;
   2185 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   2186 				    cksumfields;
   2187 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   2188 				lasttx = nexttx;
   2189 
   2190 				DPRINTF(WM_DEBUG_TX,
   2191 				    ("%s: TX: desc %d: low 0x%08lx, "
   2192 				     "len 0x%04x\n",
   2193 				    sc->sc_dev.dv_xname, nexttx,
   2194 				    curaddr & 0xffffffffUL, (unsigned)curlen));
   2195 			}
   2196 		}
   2197 
   2198 		KASSERT(lasttx != -1);
   2199 
   2200 		/*
   2201 		 * Set up the command byte on the last descriptor of
   2202 		 * the packet.  If we're in the interrupt delay window,
   2203 		 * delay the interrupt.
   2204 		 */
   2205 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   2206 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   2207 
   2208 #if 0 /* XXXJRT */
   2209 		/*
   2210 		 * If VLANs are enabled and the packet has a VLAN tag, set
   2211 		 * up the descriptor to encapsulate the packet for us.
   2212 		 *
   2213 		 * This is only valid on the last descriptor of the packet.
   2214 		 */
   2215 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   2216 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   2217 			    htole32(WTX_CMD_VLE);
   2218 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   2219 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   2220 		}
   2221 #endif /* XXXJRT */
   2222 
   2223 		txs->txs_lastdesc = lasttx;
   2224 
   2225 		DPRINTF(WM_DEBUG_TX,
   2226 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
   2227 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   2228 
   2229 		/* Sync the descriptors we're using. */
   2230 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
   2231 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   2232 
   2233 		/* Give the packet to the chip. */
   2234 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   2235 
   2236 		DPRINTF(WM_DEBUG_TX,
   2237 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
   2238 
   2239 		DPRINTF(WM_DEBUG_TX,
   2240 		    ("%s: TX: finished transmitting packet, job %d\n",
   2241 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
   2242 
   2243 		/* Advance the tx pointer. */
   2244 		sc->sc_txfree -= txs->txs_ndesc;
   2245 		sc->sc_txnext = nexttx;
   2246 
   2247 		sc->sc_txsfree--;
   2248 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
   2249 
   2250 #if NBPFILTER > 0
   2251 		/* Pass the packet to any BPF listeners. */
   2252 		if (ifp->if_bpf)
   2253 			bpf_mtap(ifp->if_bpf, m0);
   2254 #endif /* NBPFILTER > 0 */
   2255 	}
   2256 
   2257 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   2258 		/* No more slots; notify upper layer. */
   2259 		ifp->if_flags |= IFF_OACTIVE;
   2260 	}
   2261 
   2262 	if (sc->sc_txfree != ofree) {
   2263 		/* Set a watchdog timer in case the chip flakes out. */
   2264 		ifp->if_timer = 5;
   2265 	}
   2266 }
   2267 
   2268 /*
   2269  * wm_watchdog:		[ifnet interface function]
   2270  *
   2271  *	Watchdog timer handler.
   2272  */
   2273 static void
   2274 wm_watchdog(struct ifnet *ifp)
   2275 {
   2276 	struct wm_softc *sc = ifp->if_softc;
   2277 
   2278 	/*
   2279 	 * Since we're using delayed interrupts, sweep up
   2280 	 * before we report an error.
   2281 	 */
   2282 	wm_txintr(sc);
   2283 
   2284 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
   2285 		log(LOG_ERR,
   2286 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2287 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
   2288 		    sc->sc_txnext);
   2289 		ifp->if_oerrors++;
   2290 
   2291 		/* Reset the interface. */
   2292 		(void) wm_init(ifp);
   2293 	}
   2294 
   2295 	/* Try to get more packets going. */
   2296 	wm_start(ifp);
   2297 }
   2298 
   2299 /*
   2300  * wm_ioctl:		[ifnet interface function]
   2301  *
   2302  *	Handle control requests from the operator.
   2303  */
   2304 static int
   2305 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2306 {
   2307 	struct wm_softc *sc = ifp->if_softc;
   2308 	struct ifreq *ifr = (struct ifreq *) data;
   2309 	int s, error;
   2310 
   2311 	s = splnet();
   2312 
   2313 	switch (cmd) {
   2314 	case SIOCSIFMEDIA:
   2315 	case SIOCGIFMEDIA:
   2316 		/* Flow control requires full-duplex mode. */
   2317 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2318 		    (ifr->ifr_media & IFM_FDX) == 0)
   2319 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2320 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2321 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2322 				/* We can do both TXPAUSE and RXPAUSE. */
   2323 				ifr->ifr_media |=
   2324 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2325 			}
   2326 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2327 		}
   2328 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2329 		break;
   2330 	default:
   2331 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
   2332 			break;
   2333 
   2334 		error = 0;
   2335 
   2336 		if (cmd == SIOCSIFCAP)
   2337 			error = (*ifp->if_init)(ifp);
   2338 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2339 			;
   2340 		else if (ifp->if_flags & IFF_RUNNING) {
   2341 			/*
   2342 			 * Multicast list has changed; set the hardware filter
   2343 			 * accordingly.
   2344 			 */
   2345 			wm_set_filter(sc);
   2346 		}
   2347 		break;
   2348 	}
   2349 
   2350 	/* Try to get more packets going. */
   2351 	wm_start(ifp);
   2352 
   2353 	splx(s);
   2354 	return (error);
   2355 }
   2356 
   2357 /*
   2358  * wm_intr:
   2359  *
   2360  *	Interrupt service routine.
   2361  */
   2362 static int
   2363 wm_intr(void *arg)
   2364 {
   2365 	struct wm_softc *sc = arg;
   2366 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2367 	uint32_t icr;
   2368 	int handled = 0;
   2369 
   2370 	while (1 /* CONSTCOND */) {
   2371 		icr = CSR_READ(sc, WMREG_ICR);
   2372 		if ((icr & sc->sc_icr) == 0)
   2373 			break;
   2374 #if 0 /*NRND > 0*/
   2375 		if (RND_ENABLED(&sc->rnd_source))
   2376 			rnd_add_uint32(&sc->rnd_source, icr);
   2377 #endif
   2378 
   2379 		handled = 1;
   2380 
   2381 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   2382 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   2383 			DPRINTF(WM_DEBUG_RX,
   2384 			    ("%s: RX: got Rx intr 0x%08x\n",
   2385 			    sc->sc_dev.dv_xname,
   2386 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   2387 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   2388 		}
   2389 #endif
   2390 		wm_rxintr(sc);
   2391 
   2392 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   2393 		if (icr & ICR_TXDW) {
   2394 			DPRINTF(WM_DEBUG_TX,
   2395 			    ("%s: TX: got TXDW interrupt\n",
   2396 			    sc->sc_dev.dv_xname));
   2397 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   2398 		}
   2399 #endif
   2400 		wm_txintr(sc);
   2401 
   2402 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   2403 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   2404 			wm_linkintr(sc, icr);
   2405 		}
   2406 
   2407 		if (icr & ICR_RXO) {
   2408 			ifp->if_ierrors++;
   2409 #if defined(WM_DEBUG)
   2410 			log(LOG_WARNING, "%s: Receive overrun\n",
   2411 			    sc->sc_dev.dv_xname);
   2412 #endif /* defined(WM_DEBUG) */
   2413 		}
   2414 	}
   2415 
   2416 	if (handled) {
   2417 		/* Try to get more packets going. */
   2418 		wm_start(ifp);
   2419 	}
   2420 
   2421 	return (handled);
   2422 }
   2423 
   2424 /*
   2425  * wm_txintr:
   2426  *
   2427  *	Helper; handle transmit interrupts.
   2428  */
   2429 static void
   2430 wm_txintr(struct wm_softc *sc)
   2431 {
   2432 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2433 	struct wm_txsoft *txs;
   2434 	uint8_t status;
   2435 	int i;
   2436 
   2437 	ifp->if_flags &= ~IFF_OACTIVE;
   2438 
   2439 	/*
   2440 	 * Go through the Tx list and free mbufs for those
   2441 	 * frames which have been transmitted.
   2442 	 */
   2443 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
   2444 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
   2445 		txs = &sc->sc_txsoft[i];
   2446 
   2447 		DPRINTF(WM_DEBUG_TX,
   2448 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
   2449 
   2450 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   2451 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   2452 
   2453 		status =
   2454 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   2455 		if ((status & WTX_ST_DD) == 0) {
   2456 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   2457 			    BUS_DMASYNC_PREREAD);
   2458 			break;
   2459 		}
   2460 
   2461 		DPRINTF(WM_DEBUG_TX,
   2462 		    ("%s: TX: job %d done: descs %d..%d\n",
   2463 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
   2464 		    txs->txs_lastdesc));
   2465 
   2466 		/*
   2467 		 * XXX We should probably be using the statistics
   2468 		 * XXX registers, but I don't know if they exist
   2469 		 * XXX on chips before the i82544.
   2470 		 */
   2471 
   2472 #ifdef WM_EVENT_COUNTERS
   2473 		if (status & WTX_ST_TU)
   2474 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   2475 #endif /* WM_EVENT_COUNTERS */
   2476 
   2477 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   2478 			ifp->if_oerrors++;
   2479 			if (status & WTX_ST_LC)
   2480 				log(LOG_WARNING, "%s: late collision\n",
   2481 				    sc->sc_dev.dv_xname);
   2482 			else if (status & WTX_ST_EC) {
   2483 				ifp->if_collisions += 16;
   2484 				log(LOG_WARNING, "%s: excessive collisions\n",
   2485 				    sc->sc_dev.dv_xname);
   2486 			}
   2487 		} else
   2488 			ifp->if_opackets++;
   2489 
   2490 		sc->sc_txfree += txs->txs_ndesc;
   2491 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   2492 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   2493 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   2494 		m_freem(txs->txs_mbuf);
   2495 		txs->txs_mbuf = NULL;
   2496 	}
   2497 
   2498 	/* Update the dirty transmit buffer pointer. */
   2499 	sc->sc_txsdirty = i;
   2500 	DPRINTF(WM_DEBUG_TX,
   2501 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
   2502 
   2503 	/*
   2504 	 * If there are no more pending transmissions, cancel the watchdog
   2505 	 * timer.
   2506 	 */
   2507 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
   2508 		ifp->if_timer = 0;
   2509 }
   2510 
   2511 /*
   2512  * wm_rxintr:
   2513  *
   2514  *	Helper; handle receive interrupts.
   2515  */
   2516 static void
   2517 wm_rxintr(struct wm_softc *sc)
   2518 {
   2519 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2520 	struct wm_rxsoft *rxs;
   2521 	struct mbuf *m;
   2522 	int i, len;
   2523 	uint8_t status, errors;
   2524 
   2525 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   2526 		rxs = &sc->sc_rxsoft[i];
   2527 
   2528 		DPRINTF(WM_DEBUG_RX,
   2529 		    ("%s: RX: checking descriptor %d\n",
   2530 		    sc->sc_dev.dv_xname, i));
   2531 
   2532 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   2533 
   2534 		status = sc->sc_rxdescs[i].wrx_status;
   2535 		errors = sc->sc_rxdescs[i].wrx_errors;
   2536 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   2537 
   2538 		if ((status & WRX_ST_DD) == 0) {
   2539 			/*
   2540 			 * We have processed all of the receive descriptors.
   2541 			 */
   2542 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   2543 			break;
   2544 		}
   2545 
   2546 		if (__predict_false(sc->sc_rxdiscard)) {
   2547 			DPRINTF(WM_DEBUG_RX,
   2548 			    ("%s: RX: discarding contents of descriptor %d\n",
   2549 			    sc->sc_dev.dv_xname, i));
   2550 			WM_INIT_RXDESC(sc, i);
   2551 			if (status & WRX_ST_EOP) {
   2552 				/* Reset our state. */
   2553 				DPRINTF(WM_DEBUG_RX,
   2554 				    ("%s: RX: resetting rxdiscard -> 0\n",
   2555 				    sc->sc_dev.dv_xname));
   2556 				sc->sc_rxdiscard = 0;
   2557 			}
   2558 			continue;
   2559 		}
   2560 
   2561 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   2562 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   2563 
   2564 		m = rxs->rxs_mbuf;
   2565 
   2566 		/*
   2567 		 * Add a new receive buffer to the ring, unless of
   2568 		 * course the length is zero. Treat the latter as a
   2569 		 * failed mapping.
   2570 		 */
   2571 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
   2572 			/*
   2573 			 * Failed, throw away what we've done so
   2574 			 * far, and discard the rest of the packet.
   2575 			 */
   2576 			ifp->if_ierrors++;
   2577 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   2578 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   2579 			WM_INIT_RXDESC(sc, i);
   2580 			if ((status & WRX_ST_EOP) == 0)
   2581 				sc->sc_rxdiscard = 1;
   2582 			if (sc->sc_rxhead != NULL)
   2583 				m_freem(sc->sc_rxhead);
   2584 			WM_RXCHAIN_RESET(sc);
   2585 			DPRINTF(WM_DEBUG_RX,
   2586 			    ("%s: RX: Rx buffer allocation failed, "
   2587 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
   2588 			    sc->sc_rxdiscard ? " (discard)" : ""));
   2589 			continue;
   2590 		}
   2591 
   2592 		WM_RXCHAIN_LINK(sc, m);
   2593 
   2594 		m->m_len = len;
   2595 
   2596 		DPRINTF(WM_DEBUG_RX,
   2597 		    ("%s: RX: buffer at %p len %d\n",
   2598 		    sc->sc_dev.dv_xname, m->m_data, len));
   2599 
   2600 		/*
   2601 		 * If this is not the end of the packet, keep
   2602 		 * looking.
   2603 		 */
   2604 		if ((status & WRX_ST_EOP) == 0) {
   2605 			sc->sc_rxlen += len;
   2606 			DPRINTF(WM_DEBUG_RX,
   2607 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   2608 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
   2609 			continue;
   2610 		}
   2611 
   2612 		/*
   2613 		 * Okay, we have the entire packet now.  The chip is
   2614 		 * configured to include the FCS (not all chips can
   2615 		 * be configured to strip it), so we need to trim it.
   2616 		 */
   2617 		m->m_len -= ETHER_CRC_LEN;
   2618 
   2619 		*sc->sc_rxtailp = NULL;
   2620 		len = m->m_len + sc->sc_rxlen;
   2621 		m = sc->sc_rxhead;
   2622 
   2623 		WM_RXCHAIN_RESET(sc);
   2624 
   2625 		DPRINTF(WM_DEBUG_RX,
   2626 		    ("%s: RX: have entire packet, len -> %d\n",
   2627 		    sc->sc_dev.dv_xname, len));
   2628 
   2629 		/*
   2630 		 * If an error occurred, update stats and drop the packet.
   2631 		 */
   2632 		if (errors &
   2633 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   2634 			ifp->if_ierrors++;
   2635 			if (errors & WRX_ER_SE)
   2636 				log(LOG_WARNING, "%s: symbol error\n",
   2637 				    sc->sc_dev.dv_xname);
   2638 			else if (errors & WRX_ER_SEQ)
   2639 				log(LOG_WARNING, "%s: receive sequence error\n",
   2640 				    sc->sc_dev.dv_xname);
   2641 			else if (errors & WRX_ER_CE)
   2642 				log(LOG_WARNING, "%s: CRC error\n",
   2643 				    sc->sc_dev.dv_xname);
   2644 			m_freem(m);
   2645 			continue;
   2646 		}
   2647 
   2648 		/*
   2649 		 * No errors.  Receive the packet.
   2650 		 */
   2651 		m->m_pkthdr.rcvif = ifp;
   2652 		m->m_pkthdr.len = len;
   2653 
   2654 #if 0 /* XXXJRT */
   2655 		/*
   2656 		 * If VLANs are enabled, VLAN packets have been unwrapped
   2657 		 * for us.  Associate the tag with the packet.
   2658 		 */
   2659 		if ((status & WRX_ST_VP) != 0) {
   2660 			VLAN_INPUT_TAG(ifp, m,
   2661 			    le16toh(sc->sc_rxdescs[i].wrx_special,
   2662 			    continue);
   2663 		}
   2664 #endif /* XXXJRT */
   2665 
   2666 		/*
   2667 		 * Set up checksum info for this packet.
   2668 		 */
   2669 		if ((status & WRX_ST_IXSM) == 0) {
   2670 			if (status & WRX_ST_IPCS) {
   2671 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   2672 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   2673 				if (errors & WRX_ER_IPE)
   2674 					m->m_pkthdr.csum_flags |=
   2675 					    M_CSUM_IPv4_BAD;
   2676 			}
   2677 			if (status & WRX_ST_TCPCS) {
   2678 				/*
   2679 				 * Note: we don't know if this was TCP or UDP,
   2680 				 * so we just set both bits, and expect the
   2681 				 * upper layers to deal.
   2682 				 */
   2683 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   2684 				m->m_pkthdr.csum_flags |=
   2685 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   2686 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   2687 				if (errors & WRX_ER_TCPE)
   2688 					m->m_pkthdr.csum_flags |=
   2689 					    M_CSUM_TCP_UDP_BAD;
   2690 			}
   2691 		}
   2692 
   2693 		ifp->if_ipackets++;
   2694 
   2695 #if NBPFILTER > 0
   2696 		/* Pass this up to any BPF listeners. */
   2697 		if (ifp->if_bpf)
   2698 			bpf_mtap(ifp->if_bpf, m);
   2699 #endif /* NBPFILTER > 0 */
   2700 
   2701 		/* Pass it on. */
   2702 		(*ifp->if_input)(ifp, m);
   2703 	}
   2704 
   2705 	/* Update the receive pointer. */
   2706 	sc->sc_rxptr = i;
   2707 
   2708 	DPRINTF(WM_DEBUG_RX,
   2709 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
   2710 }
   2711 
   2712 /*
   2713  * wm_linkintr:
   2714  *
   2715  *	Helper; handle link interrupts.
   2716  */
   2717 static void
   2718 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   2719 {
   2720 	uint32_t status;
   2721 
   2722 	/*
   2723 	 * If we get a link status interrupt on a 1000BASE-T
   2724 	 * device, just fall into the normal MII tick path.
   2725 	 */
   2726 	if (sc->sc_flags & WM_F_HAS_MII) {
   2727 		if (icr & ICR_LSC) {
   2728 			DPRINTF(WM_DEBUG_LINK,
   2729 			    ("%s: LINK: LSC -> mii_tick\n",
   2730 			    sc->sc_dev.dv_xname));
   2731 			mii_tick(&sc->sc_mii);
   2732 		} else if (icr & ICR_RXSEQ) {
   2733 			DPRINTF(WM_DEBUG_LINK,
   2734 			    ("%s: LINK Receive sequence error\n",
   2735 			    sc->sc_dev.dv_xname));
   2736 		}
   2737 		return;
   2738 	}
   2739 
   2740 	/*
   2741 	 * If we are now receiving /C/, check for link again in
   2742 	 * a couple of link clock ticks.
   2743 	 */
   2744 	if (icr & ICR_RXCFG) {
   2745 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   2746 		    sc->sc_dev.dv_xname));
   2747 		sc->sc_tbi_anstate = 2;
   2748 	}
   2749 
   2750 	if (icr & ICR_LSC) {
   2751 		status = CSR_READ(sc, WMREG_STATUS);
   2752 		if (status & STATUS_LU) {
   2753 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   2754 			    sc->sc_dev.dv_xname,
   2755 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   2756 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2757 			sc->sc_fcrtl &= ~FCRTL_XONE;
   2758 			if (status & STATUS_FD)
   2759 				sc->sc_tctl |=
   2760 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2761 			else
   2762 				sc->sc_tctl |=
   2763 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2764 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   2765 				sc->sc_fcrtl |= FCRTL_XONE;
   2766 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2767 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   2768 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   2769 				      sc->sc_fcrtl);
   2770 			sc->sc_tbi_linkup = 1;
   2771 		} else {
   2772 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   2773 			    sc->sc_dev.dv_xname));
   2774 			sc->sc_tbi_linkup = 0;
   2775 		}
   2776 		sc->sc_tbi_anstate = 2;
   2777 		wm_tbi_set_linkled(sc);
   2778 	} else if (icr & ICR_RXSEQ) {
   2779 		DPRINTF(WM_DEBUG_LINK,
   2780 		    ("%s: LINK: Receive sequence error\n",
   2781 		    sc->sc_dev.dv_xname));
   2782 	}
   2783 }
   2784 
   2785 /*
   2786  * wm_tick:
   2787  *
   2788  *	One second timer, used to check link status, sweep up
   2789  *	completed transmit jobs, etc.
   2790  */
   2791 static void
   2792 wm_tick(void *arg)
   2793 {
   2794 	struct wm_softc *sc = arg;
   2795 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2796 	int s;
   2797 
   2798 	s = splnet();
   2799 
   2800 	if (sc->sc_type >= WM_T_82542_2_1) {
   2801 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2802 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2803 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2804 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2805 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2806 	}
   2807 
   2808 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2809 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   2810 
   2811 
   2812 	if (sc->sc_flags & WM_F_HAS_MII)
   2813 		mii_tick(&sc->sc_mii);
   2814 	else
   2815 		wm_tbi_check_link(sc);
   2816 
   2817 	splx(s);
   2818 
   2819 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2820 }
   2821 
   2822 /*
   2823  * wm_reset:
   2824  *
   2825  *	Reset the i82542 chip.
   2826  */
   2827 static void
   2828 wm_reset(struct wm_softc *sc)
   2829 {
   2830 	uint32_t reg;
   2831 
   2832 	/*
   2833 	 * Allocate on-chip memory according to the MTU size.
   2834 	 * The Packet Buffer Allocation register must be written
   2835 	 * before the chip is reset.
   2836 	 */
   2837 	switch (sc->sc_type) {
   2838 	case WM_T_82547:
   2839 	case WM_T_82547_2:
   2840 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   2841 		    PBA_22K : PBA_30K;
   2842 		sc->sc_txfifo_head = 0;
   2843 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   2844 		sc->sc_txfifo_size =
   2845 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   2846 		sc->sc_txfifo_stall = 0;
   2847 		break;
   2848 	case WM_T_82571:
   2849 	case WM_T_82572:
   2850 	case WM_T_80003:
   2851 		sc->sc_pba = PBA_32K;
   2852 		break;
   2853 	case WM_T_82573:
   2854 		sc->sc_pba = PBA_12K;
   2855 		break;
   2856 	case WM_T_ICH8:
   2857 		sc->sc_pba = PBA_8K;
   2858 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   2859 		break;
   2860 	case WM_T_ICH9:
   2861 		sc->sc_pba = PBA_10K;
   2862 		break;
   2863 	default:
   2864 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   2865 		    PBA_40K : PBA_48K;
   2866 		break;
   2867 	}
   2868 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   2869 
   2870 	if (sc->sc_flags & WM_F_PCIE) {
   2871 		int timeout = 800;
   2872 
   2873 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   2874 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2875 
   2876 		while (timeout) {
   2877 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
   2878 				break;
   2879 			delay(100);
   2880 		}
   2881 	}
   2882 
   2883 	/* clear interrupt */
   2884 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   2885 
   2886 	/*
   2887 	 * 82541 Errata 29? & 82547 Errata 28?
   2888 	 * See also the description about PHY_RST bit in CTRL register
   2889 	 * in 8254x_GBe_SDM.pdf.
   2890 	 */
   2891 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   2892 		CSR_WRITE(sc, WMREG_CTRL,
   2893 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   2894 		delay(5000);
   2895 	}
   2896 
   2897 	switch (sc->sc_type) {
   2898 	case WM_T_82544:
   2899 	case WM_T_82540:
   2900 	case WM_T_82545:
   2901 	case WM_T_82546:
   2902 	case WM_T_82541:
   2903 	case WM_T_82541_2:
   2904 		/*
   2905 		 * On some chipsets, a reset through a memory-mapped write
   2906 		 * cycle can cause the chip to reset before completing the
   2907 		 * write cycle.  This causes major headache that can be
   2908 		 * avoided by issuing the reset via indirect register writes
   2909 		 * through I/O space.
   2910 		 *
   2911 		 * So, if we successfully mapped the I/O BAR at attach time,
   2912 		 * use that.  Otherwise, try our luck with a memory-mapped
   2913 		 * reset.
   2914 		 */
   2915 		if (sc->sc_flags & WM_F_IOH_VALID)
   2916 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   2917 		else
   2918 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2919 		break;
   2920 
   2921 	case WM_T_82545_3:
   2922 	case WM_T_82546_3:
   2923 		/* Use the shadow control register on these chips. */
   2924 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   2925 		break;
   2926 
   2927 	case WM_T_ICH8:
   2928 	case WM_T_ICH9:
   2929 		wm_get_swfwhw_semaphore(sc);
   2930 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
   2931 		delay(10000);
   2932 
   2933 	default:
   2934 		/* Everything else can safely use the documented method. */
   2935 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2936 		break;
   2937 	}
   2938 	delay(10000);
   2939 
   2940 	/* reload EEPROM */
   2941 	switch(sc->sc_type) {
   2942 	case WM_T_82542_2_0:
   2943 	case WM_T_82542_2_1:
   2944 	case WM_T_82543:
   2945 	case WM_T_82544:
   2946 		delay(10);
   2947 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   2948 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2949 		delay(2000);
   2950 		break;
   2951 	case WM_T_82541:
   2952 	case WM_T_82541_2:
   2953 	case WM_T_82547:
   2954 	case WM_T_82547_2:
   2955 		delay(20000);
   2956 		break;
   2957 	case WM_T_82573:
   2958 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   2959 			delay(10);
   2960 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   2961 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2962 		}
   2963 		/* FALLTHROUGH */
   2964 	default:
   2965 		/* check EECD_EE_AUTORD */
   2966 		wm_get_auto_rd_done(sc);
   2967 	}
   2968 
   2969 #if 0
   2970 	for (i = 0; i < 1000; i++) {
   2971 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
   2972 			return;
   2973 		}
   2974 		delay(20);
   2975 	}
   2976 
   2977 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
   2978 		log(LOG_ERR, "%s: reset failed to complete\n",
   2979 		    sc->sc_dev.dv_xname);
   2980 #endif
   2981 }
   2982 
   2983 /*
   2984  * wm_init:		[ifnet interface function]
   2985  *
   2986  *	Initialize the interface.  Must be called at splnet().
   2987  */
   2988 static int
   2989 wm_init(struct ifnet *ifp)
   2990 {
   2991 	struct wm_softc *sc = ifp->if_softc;
   2992 	struct wm_rxsoft *rxs;
   2993 	int i, error = 0;
   2994 	uint32_t reg;
   2995 
   2996 	/*
   2997 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   2998 	 * There is a small but measurable benefit to avoiding the adjusment
   2999 	 * of the descriptor so that the headers are aligned, for normal mtu,
   3000 	 * on such platforms.  One possibility is that the DMA itself is
   3001 	 * slightly more efficient if the front of the entire packet (instead
   3002 	 * of the front of the headers) is aligned.
   3003 	 *
   3004 	 * Note we must always set align_tweak to 0 if we are using
   3005 	 * jumbo frames.
   3006 	 */
   3007 #ifdef __NO_STRICT_ALIGNMENT
   3008 	sc->sc_align_tweak = 0;
   3009 #else
   3010 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   3011 		sc->sc_align_tweak = 0;
   3012 	else
   3013 		sc->sc_align_tweak = 2;
   3014 #endif /* __NO_STRICT_ALIGNMENT */
   3015 
   3016 	/* Cancel any pending I/O. */
   3017 	wm_stop(ifp, 0);
   3018 
   3019 	/* update statistics before reset */
   3020 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3021 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   3022 
   3023 	/* Reset the chip to a known state. */
   3024 	wm_reset(sc);
   3025 
   3026 	/* Initialize the transmit descriptor ring. */
   3027 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
   3028 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
   3029 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   3030 	sc->sc_txfree = WM_NTXDESC(sc);
   3031 	sc->sc_txnext = 0;
   3032 
   3033 	if (sc->sc_type < WM_T_82543) {
   3034 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
   3035 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
   3036 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
   3037 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   3038 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   3039 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   3040 	} else {
   3041 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
   3042 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
   3043 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
   3044 		CSR_WRITE(sc, WMREG_TDH, 0);
   3045 		CSR_WRITE(sc, WMREG_TDT, 0);
   3046 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
   3047 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
   3048 
   3049 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   3050 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   3051 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   3052 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   3053 	}
   3054 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   3055 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   3056 
   3057 	/* Initialize the transmit job descriptors. */
   3058 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
   3059 		sc->sc_txsoft[i].txs_mbuf = NULL;
   3060 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
   3061 	sc->sc_txsnext = 0;
   3062 	sc->sc_txsdirty = 0;
   3063 
   3064 	/*
   3065 	 * Initialize the receive descriptor and receive job
   3066 	 * descriptor rings.
   3067 	 */
   3068 	if (sc->sc_type < WM_T_82543) {
   3069 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   3070 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   3071 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   3072 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   3073 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   3074 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   3075 
   3076 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   3077 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   3078 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   3079 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   3080 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   3081 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   3082 	} else {
   3083 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   3084 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   3085 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   3086 		CSR_WRITE(sc, WMREG_RDH, 0);
   3087 		CSR_WRITE(sc, WMREG_RDT, 0);
   3088 		CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
   3089 		CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
   3090 	}
   3091 	for (i = 0; i < WM_NRXDESC; i++) {
   3092 		rxs = &sc->sc_rxsoft[i];
   3093 		if (rxs->rxs_mbuf == NULL) {
   3094 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   3095 				log(LOG_ERR, "%s: unable to allocate or map rx "
   3096 				    "buffer %d, error = %d\n",
   3097 				    sc->sc_dev.dv_xname, i, error);
   3098 				/*
   3099 				 * XXX Should attempt to run with fewer receive
   3100 				 * XXX buffers instead of just failing.
   3101 				 */
   3102 				wm_rxdrain(sc);
   3103 				goto out;
   3104 			}
   3105 		} else
   3106 			WM_INIT_RXDESC(sc, i);
   3107 	}
   3108 	sc->sc_rxptr = 0;
   3109 	sc->sc_rxdiscard = 0;
   3110 	WM_RXCHAIN_RESET(sc);
   3111 
   3112 	/*
   3113 	 * Clear out the VLAN table -- we don't use it (yet).
   3114 	 */
   3115 	CSR_WRITE(sc, WMREG_VET, 0);
   3116 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   3117 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   3118 
   3119 	/*
   3120 	 * Set up flow-control parameters.
   3121 	 *
   3122 	 * XXX Values could probably stand some tuning.
   3123 	 */
   3124 	if (sc->sc_type != WM_T_ICH8) {
   3125 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   3126 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   3127 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   3128 	}
   3129 
   3130 	sc->sc_fcrtl = FCRTL_DFLT;
   3131 	if (sc->sc_type < WM_T_82543) {
   3132 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   3133 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   3134 	} else {
   3135 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   3136 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   3137 	}
   3138 	CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   3139 
   3140 #if 0 /* XXXJRT */
   3141 	/* Deal with VLAN enables. */
   3142 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3143 		sc->sc_ctrl |= CTRL_VME;
   3144 	else
   3145 #endif /* XXXJRT */
   3146 		sc->sc_ctrl &= ~CTRL_VME;
   3147 
   3148 	/* Write the control registers. */
   3149 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3150 	if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
   3151 		int val;
   3152 		val = CSR_READ(sc, WMREG_CTRL_EXT);
   3153 		val &= ~CTRL_EXT_LINK_MODE_MASK;
   3154 		CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   3155 
   3156 		/* Bypass RX and TX FIFO's */
   3157 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   3158 		    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
   3159 		    KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   3160 
   3161 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   3162 		    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   3163 		    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   3164 		/*
   3165 		 * Set the mac to wait the maximum time between each
   3166 		 * iteration and increase the max iterations when
   3167 		 * polling the phy; this fixes erroneous timeouts at 10Mbps.
   3168 		 */
   3169 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
   3170 		val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   3171 		val |= 0x3F;
   3172 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
   3173 	}
   3174 #if 0
   3175 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   3176 #endif
   3177 
   3178 	/*
   3179 	 * Set up checksum offload parameters.
   3180 	 */
   3181 	reg = CSR_READ(sc, WMREG_RXCSUM);
   3182 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   3183 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   3184 		reg |= RXCSUM_IPOFL;
   3185 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   3186 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   3187 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   3188 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   3189 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   3190 
   3191 	/*
   3192 	 * Set up the interrupt registers.
   3193 	 */
   3194 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3195 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   3196 	    ICR_RXO | ICR_RXT0;
   3197 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   3198 		sc->sc_icr |= ICR_RXCFG;
   3199 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   3200 
   3201 	/* Set up the inter-packet gap. */
   3202 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   3203 
   3204 	if (sc->sc_type >= WM_T_82543) {
   3205 		/*
   3206 		 * Set up the interrupt throttling register (units of 256ns)
   3207 		 * Note that a footnote in Intel's documentation says this
   3208 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   3209 		 * or 10Mbit mode.  Empirically, it appears to be the case
   3210 		 * that that is also true for the 1024ns units of the other
   3211 		 * interrupt-related timer registers -- so, really, we ought
   3212 		 * to divide this value by 4 when the link speed is low.
   3213 		 *
   3214 		 * XXX implement this division at link speed change!
   3215 		 */
   3216 
   3217 		 /*
   3218 		  * For N interrupts/sec, set this value to:
   3219 		  * 1000000000 / (N * 256).  Note that we set the
   3220 		  * absolute and packet timer values to this value
   3221 		  * divided by 4 to get "simple timer" behavior.
   3222 		  */
   3223 
   3224 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   3225 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   3226 	}
   3227 
   3228 #if 0 /* XXXJRT */
   3229 	/* Set the VLAN ethernetype. */
   3230 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   3231 #endif
   3232 
   3233 	/*
   3234 	 * Set up the transmit control register; we start out with
   3235 	 * a collision distance suitable for FDX, but update it whe
   3236 	 * we resolve the media type.
   3237 	 */
   3238 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
   3239 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3240 	if (sc->sc_type >= WM_T_82571)
   3241 		sc->sc_tctl |= TCTL_MULR;
   3242 	if (sc->sc_type >= WM_T_80003)
   3243 		sc->sc_tctl |= TCTL_RTLC;
   3244 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3245 
   3246 	/* Set the media. */
   3247 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   3248 		goto out;
   3249 
   3250 	/*
   3251 	 * Set up the receive control register; we actually program
   3252 	 * the register when we set the receive filter.  Use multicast
   3253 	 * address offset type 0.
   3254 	 *
   3255 	 * Only the i82544 has the ability to strip the incoming
   3256 	 * CRC, so we don't enable that feature.
   3257 	 */
   3258 	sc->sc_mchash_type = 0;
   3259 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   3260 	    | RCTL_MO(sc->sc_mchash_type);
   3261 
   3262 	/* 82573 doesn't support jumbo frame */
   3263 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
   3264 		sc->sc_rctl |= RCTL_LPE;
   3265 
   3266 	if (MCLBYTES == 2048) {
   3267 		sc->sc_rctl |= RCTL_2k;
   3268 	} else {
   3269 		if (sc->sc_type >= WM_T_82543) {
   3270 			switch(MCLBYTES) {
   3271 			case 4096:
   3272 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   3273 				break;
   3274 			case 8192:
   3275 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   3276 				break;
   3277 			case 16384:
   3278 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   3279 				break;
   3280 			default:
   3281 				panic("wm_init: MCLBYTES %d unsupported",
   3282 				    MCLBYTES);
   3283 				break;
   3284 			}
   3285 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   3286 	}
   3287 
   3288 	/* Set the receive filter. */
   3289 	wm_set_filter(sc);
   3290 
   3291 	/* Start the one second link check clock. */
   3292 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3293 
   3294 	/* ...all done! */
   3295 	ifp->if_flags |= IFF_RUNNING;
   3296 	ifp->if_flags &= ~IFF_OACTIVE;
   3297 
   3298  out:
   3299 	if (error)
   3300 		log(LOG_ERR, "%s: interface not running\n",
   3301 		    sc->sc_dev.dv_xname);
   3302 	return (error);
   3303 }
   3304 
   3305 /*
   3306  * wm_rxdrain:
   3307  *
   3308  *	Drain the receive queue.
   3309  */
   3310 static void
   3311 wm_rxdrain(struct wm_softc *sc)
   3312 {
   3313 	struct wm_rxsoft *rxs;
   3314 	int i;
   3315 
   3316 	for (i = 0; i < WM_NRXDESC; i++) {
   3317 		rxs = &sc->sc_rxsoft[i];
   3318 		if (rxs->rxs_mbuf != NULL) {
   3319 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3320 			m_freem(rxs->rxs_mbuf);
   3321 			rxs->rxs_mbuf = NULL;
   3322 		}
   3323 	}
   3324 }
   3325 
   3326 /*
   3327  * wm_stop:		[ifnet interface function]
   3328  *
   3329  *	Stop transmission on the interface.
   3330  */
   3331 static void
   3332 wm_stop(struct ifnet *ifp, int disable)
   3333 {
   3334 	struct wm_softc *sc = ifp->if_softc;
   3335 	struct wm_txsoft *txs;
   3336 	int i;
   3337 
   3338 	/* Stop the one second clock. */
   3339 	callout_stop(&sc->sc_tick_ch);
   3340 
   3341 	/* Stop the 82547 Tx FIFO stall check timer. */
   3342 	if (sc->sc_type == WM_T_82547)
   3343 		callout_stop(&sc->sc_txfifo_ch);
   3344 
   3345 	if (sc->sc_flags & WM_F_HAS_MII) {
   3346 		/* Down the MII. */
   3347 		mii_down(&sc->sc_mii);
   3348 	}
   3349 
   3350 	/* Stop the transmit and receive processes. */
   3351 	CSR_WRITE(sc, WMREG_TCTL, 0);
   3352 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3353 
   3354 	/*
   3355 	 * Clear the interrupt mask to ensure the device cannot assert its
   3356 	 * interrupt line.
   3357 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
   3358 	 * any currently pending or shared interrupt.
   3359 	 */
   3360 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3361 	sc->sc_icr = 0;
   3362 
   3363 	/* Release any queued transmit buffers. */
   3364 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
   3365 		txs = &sc->sc_txsoft[i];
   3366 		if (txs->txs_mbuf != NULL) {
   3367 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   3368 			m_freem(txs->txs_mbuf);
   3369 			txs->txs_mbuf = NULL;
   3370 		}
   3371 	}
   3372 
   3373 	if (disable)
   3374 		wm_rxdrain(sc);
   3375 
   3376 	/* Mark the interface as down and cancel the watchdog timer. */
   3377 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   3378 	ifp->if_timer = 0;
   3379 }
   3380 
   3381 void
   3382 wm_get_auto_rd_done(struct wm_softc *sc)
   3383 {
   3384 	int i;
   3385 
   3386 	/* wait for eeprom to reload */
   3387 	switch (sc->sc_type) {
   3388 	case WM_T_82571:
   3389 	case WM_T_82572:
   3390 	case WM_T_82573:
   3391 	case WM_T_80003:
   3392 	case WM_T_ICH8:
   3393 	case WM_T_ICH9:
   3394 		for (i = 10; i > 0; i--) {
   3395 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3396 				break;
   3397 			delay(1000);
   3398 		}
   3399 		if (i == 0) {
   3400 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3401 			    "complete\n", sc->sc_dev.dv_xname);
   3402 		}
   3403 		break;
   3404 	default:
   3405 		delay(5000);
   3406 		break;
   3407 	}
   3408 
   3409 	/* Phy configuration starts after EECD_AUTO_RD is set */
   3410 	if (sc->sc_type == WM_T_82573)
   3411 		delay(25000);
   3412 }
   3413 
   3414 /*
   3415  * wm_acquire_eeprom:
   3416  *
   3417  *	Perform the EEPROM handshake required on some chips.
   3418  */
   3419 static int
   3420 wm_acquire_eeprom(struct wm_softc *sc)
   3421 {
   3422 	uint32_t reg;
   3423 	int x;
   3424 	int ret = 0;
   3425 
   3426 	/* always success */
   3427 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   3428 		return 0;
   3429 
   3430 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
   3431 		ret = wm_get_swfwhw_semaphore(sc);
   3432 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
   3433 		/* this will also do wm_get_swsm_semaphore() if needed */
   3434 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   3435 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
   3436 		ret = wm_get_swsm_semaphore(sc);
   3437 	}
   3438 
   3439 	if (ret)
   3440 		return 1;
   3441 
   3442 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
   3443 		reg = CSR_READ(sc, WMREG_EECD);
   3444 
   3445 		/* Request EEPROM access. */
   3446 		reg |= EECD_EE_REQ;
   3447 		CSR_WRITE(sc, WMREG_EECD, reg);
   3448 
   3449 		/* ..and wait for it to be granted. */
   3450 		for (x = 0; x < 1000; x++) {
   3451 			reg = CSR_READ(sc, WMREG_EECD);
   3452 			if (reg & EECD_EE_GNT)
   3453 				break;
   3454 			delay(5);
   3455 		}
   3456 		if ((reg & EECD_EE_GNT) == 0) {
   3457 			aprint_error("%s: could not acquire EEPROM GNT\n",
   3458 			    sc->sc_dev.dv_xname);
   3459 			reg &= ~EECD_EE_REQ;
   3460 			CSR_WRITE(sc, WMREG_EECD, reg);
   3461 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
   3462 				wm_put_swfwhw_semaphore(sc);
   3463 			if (sc->sc_flags & WM_F_SWFW_SYNC)
   3464 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   3465 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   3466 				wm_put_swsm_semaphore(sc);
   3467 			return (1);
   3468 		}
   3469 	}
   3470 
   3471 	return (0);
   3472 }
   3473 
   3474 /*
   3475  * wm_release_eeprom:
   3476  *
   3477  *	Release the EEPROM mutex.
   3478  */
   3479 static void
   3480 wm_release_eeprom(struct wm_softc *sc)
   3481 {
   3482 	uint32_t reg;
   3483 
   3484 	/* always success */
   3485 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   3486 		return;
   3487 
   3488 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
   3489 		reg = CSR_READ(sc, WMREG_EECD);
   3490 		reg &= ~EECD_EE_REQ;
   3491 		CSR_WRITE(sc, WMREG_EECD, reg);
   3492 	}
   3493 
   3494 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
   3495 		wm_put_swfwhw_semaphore(sc);
   3496 	if (sc->sc_flags & WM_F_SWFW_SYNC)
   3497 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   3498 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   3499 		wm_put_swsm_semaphore(sc);
   3500 }
   3501 
   3502 /*
   3503  * wm_eeprom_sendbits:
   3504  *
   3505  *	Send a series of bits to the EEPROM.
   3506  */
   3507 static void
   3508 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   3509 {
   3510 	uint32_t reg;
   3511 	int x;
   3512 
   3513 	reg = CSR_READ(sc, WMREG_EECD);
   3514 
   3515 	for (x = nbits; x > 0; x--) {
   3516 		if (bits & (1U << (x - 1)))
   3517 			reg |= EECD_DI;
   3518 		else
   3519 			reg &= ~EECD_DI;
   3520 		CSR_WRITE(sc, WMREG_EECD, reg);
   3521 		delay(2);
   3522 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   3523 		delay(2);
   3524 		CSR_WRITE(sc, WMREG_EECD, reg);
   3525 		delay(2);
   3526 	}
   3527 }
   3528 
   3529 /*
   3530  * wm_eeprom_recvbits:
   3531  *
   3532  *	Receive a series of bits from the EEPROM.
   3533  */
   3534 static void
   3535 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   3536 {
   3537 	uint32_t reg, val;
   3538 	int x;
   3539 
   3540 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   3541 
   3542 	val = 0;
   3543 	for (x = nbits; x > 0; x--) {
   3544 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   3545 		delay(2);
   3546 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   3547 			val |= (1U << (x - 1));
   3548 		CSR_WRITE(sc, WMREG_EECD, reg);
   3549 		delay(2);
   3550 	}
   3551 	*valp = val;
   3552 }
   3553 
   3554 /*
   3555  * wm_read_eeprom_uwire:
   3556  *
   3557  *	Read a word from the EEPROM using the MicroWire protocol.
   3558  */
   3559 static int
   3560 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   3561 {
   3562 	uint32_t reg, val;
   3563 	int i;
   3564 
   3565 	for (i = 0; i < wordcnt; i++) {
   3566 		/* Clear SK and DI. */
   3567 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   3568 		CSR_WRITE(sc, WMREG_EECD, reg);
   3569 
   3570 		/* Set CHIP SELECT. */
   3571 		reg |= EECD_CS;
   3572 		CSR_WRITE(sc, WMREG_EECD, reg);
   3573 		delay(2);
   3574 
   3575 		/* Shift in the READ command. */
   3576 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   3577 
   3578 		/* Shift in address. */
   3579 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   3580 
   3581 		/* Shift out the data. */
   3582 		wm_eeprom_recvbits(sc, &val, 16);
   3583 		data[i] = val & 0xffff;
   3584 
   3585 		/* Clear CHIP SELECT. */
   3586 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   3587 		CSR_WRITE(sc, WMREG_EECD, reg);
   3588 		delay(2);
   3589 	}
   3590 
   3591 	return (0);
   3592 }
   3593 
   3594 /*
   3595  * wm_spi_eeprom_ready:
   3596  *
   3597  *	Wait for a SPI EEPROM to be ready for commands.
   3598  */
   3599 static int
   3600 wm_spi_eeprom_ready(struct wm_softc *sc)
   3601 {
   3602 	uint32_t val;
   3603 	int usec;
   3604 
   3605 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   3606 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   3607 		wm_eeprom_recvbits(sc, &val, 8);
   3608 		if ((val & SPI_SR_RDY) == 0)
   3609 			break;
   3610 	}
   3611 	if (usec >= SPI_MAX_RETRIES) {
   3612 		aprint_error("%s: EEPROM failed to become ready\n",
   3613 		    sc->sc_dev.dv_xname);
   3614 		return (1);
   3615 	}
   3616 	return (0);
   3617 }
   3618 
   3619 /*
   3620  * wm_read_eeprom_spi:
   3621  *
   3622  *	Read a work from the EEPROM using the SPI protocol.
   3623  */
   3624 static int
   3625 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   3626 {
   3627 	uint32_t reg, val;
   3628 	int i;
   3629 	uint8_t opc;
   3630 
   3631 	/* Clear SK and CS. */
   3632 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   3633 	CSR_WRITE(sc, WMREG_EECD, reg);
   3634 	delay(2);
   3635 
   3636 	if (wm_spi_eeprom_ready(sc))
   3637 		return (1);
   3638 
   3639 	/* Toggle CS to flush commands. */
   3640 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   3641 	delay(2);
   3642 	CSR_WRITE(sc, WMREG_EECD, reg);
   3643 	delay(2);
   3644 
   3645 	opc = SPI_OPC_READ;
   3646 	if (sc->sc_ee_addrbits == 8 && word >= 128)
   3647 		opc |= SPI_OPC_A8;
   3648 
   3649 	wm_eeprom_sendbits(sc, opc, 8);
   3650 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
   3651 
   3652 	for (i = 0; i < wordcnt; i++) {
   3653 		wm_eeprom_recvbits(sc, &val, 16);
   3654 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   3655 	}
   3656 
   3657 	/* Raise CS and clear SK. */
   3658 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   3659 	CSR_WRITE(sc, WMREG_EECD, reg);
   3660 	delay(2);
   3661 
   3662 	return (0);
   3663 }
   3664 
   3665 #define EEPROM_CHECKSUM		0xBABA
   3666 #define EEPROM_SIZE		0x0040
   3667 
   3668 /*
   3669  * wm_validate_eeprom_checksum
   3670  *
   3671  * The checksum is defined as the sum of the first 64 (16 bit) words.
   3672  */
   3673 static int
   3674 wm_validate_eeprom_checksum(struct wm_softc *sc)
   3675 {
   3676 	uint16_t checksum;
   3677 	uint16_t eeprom_data;
   3678 	int i;
   3679 
   3680 	checksum = 0;
   3681 
   3682 	for (i = 0; i < EEPROM_SIZE; i++) {
   3683 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
   3684 			return 1;
   3685 		checksum += eeprom_data;
   3686 	}
   3687 
   3688 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
   3689 		return 1;
   3690 
   3691 	return 0;
   3692 }
   3693 
   3694 /*
   3695  * wm_read_eeprom:
   3696  *
   3697  *	Read data from the serial EEPROM.
   3698  */
   3699 static int
   3700 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   3701 {
   3702 	int rv;
   3703 
   3704 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   3705 		return 1;
   3706 
   3707 	if (wm_acquire_eeprom(sc))
   3708 		return 1;
   3709 
   3710 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
   3711 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
   3712 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   3713 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
   3714 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   3715 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
   3716 	else
   3717 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
   3718 
   3719 	wm_release_eeprom(sc);
   3720 	return rv;
   3721 }
   3722 
   3723 static int
   3724 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
   3725     uint16_t *data)
   3726 {
   3727 	int i, eerd = 0;
   3728 	int error = 0;
   3729 
   3730 	for (i = 0; i < wordcnt; i++) {
   3731 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   3732 
   3733 		CSR_WRITE(sc, WMREG_EERD, eerd);
   3734 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   3735 		if (error != 0)
   3736 			break;
   3737 
   3738 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   3739 	}
   3740 
   3741 	return error;
   3742 }
   3743 
   3744 static int
   3745 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   3746 {
   3747 	uint32_t attempts = 100000;
   3748 	uint32_t i, reg = 0;
   3749 	int32_t done = -1;
   3750 
   3751 	for (i = 0; i < attempts; i++) {
   3752 		reg = CSR_READ(sc, rw);
   3753 
   3754 		if (reg & EERD_DONE) {
   3755 			done = 0;
   3756 			break;
   3757 		}
   3758 		delay(5);
   3759 	}
   3760 
   3761 	return done;
   3762 }
   3763 
   3764 /*
   3765  * wm_add_rxbuf:
   3766  *
   3767  *	Add a receive buffer to the indiciated descriptor.
   3768  */
   3769 static int
   3770 wm_add_rxbuf(struct wm_softc *sc, int idx)
   3771 {
   3772 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   3773 	struct mbuf *m;
   3774 	int error;
   3775 
   3776 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3777 	if (m == NULL)
   3778 		return (ENOBUFS);
   3779 
   3780 	MCLGET(m, M_DONTWAIT);
   3781 	if ((m->m_flags & M_EXT) == 0) {
   3782 		m_freem(m);
   3783 		return (ENOBUFS);
   3784 	}
   3785 
   3786 	if (rxs->rxs_mbuf != NULL)
   3787 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3788 
   3789 	rxs->rxs_mbuf = m;
   3790 
   3791 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3792 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3793 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3794 	if (error) {
   3795 		/* XXX XXX XXX */
   3796 		printf("%s: unable to load rx DMA map %d, error = %d\n",
   3797 		    sc->sc_dev.dv_xname, idx, error);
   3798 		panic("wm_add_rxbuf");
   3799 	}
   3800 
   3801 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3802 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3803 
   3804 	WM_INIT_RXDESC(sc, idx);
   3805 
   3806 	return (0);
   3807 }
   3808 
   3809 /*
   3810  * wm_set_ral:
   3811  *
   3812  *	Set an entery in the receive address list.
   3813  */
   3814 static void
   3815 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3816 {
   3817 	uint32_t ral_lo, ral_hi;
   3818 
   3819 	if (enaddr != NULL) {
   3820 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3821 		    (enaddr[3] << 24);
   3822 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3823 		ral_hi |= RAL_AV;
   3824 	} else {
   3825 		ral_lo = 0;
   3826 		ral_hi = 0;
   3827 	}
   3828 
   3829 	if (sc->sc_type >= WM_T_82544) {
   3830 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3831 		    ral_lo);
   3832 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3833 		    ral_hi);
   3834 	} else {
   3835 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3836 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3837 	}
   3838 }
   3839 
   3840 /*
   3841  * wm_mchash:
   3842  *
   3843  *	Compute the hash of the multicast address for the 4096-bit
   3844  *	multicast filter.
   3845  */
   3846 static uint32_t
   3847 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3848 {
   3849 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3850 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3851 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3852 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3853 	uint32_t hash;
   3854 
   3855 	if (sc->sc_type == WM_T_ICH8) {
   3856 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3857 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3858 		return (hash & 0x3ff);
   3859 	}
   3860 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3861 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3862 
   3863 	return (hash & 0xfff);
   3864 }
   3865 
   3866 /*
   3867  * wm_set_filter:
   3868  *
   3869  *	Set up the receive filter.
   3870  */
   3871 static void
   3872 wm_set_filter(struct wm_softc *sc)
   3873 {
   3874 	struct ethercom *ec = &sc->sc_ethercom;
   3875 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3876 	struct ether_multi *enm;
   3877 	struct ether_multistep step;
   3878 	bus_addr_t mta_reg;
   3879 	uint32_t hash, reg, bit;
   3880 	int i, size;
   3881 
   3882 	if (sc->sc_type >= WM_T_82544)
   3883 		mta_reg = WMREG_CORDOVA_MTA;
   3884 	else
   3885 		mta_reg = WMREG_MTA;
   3886 
   3887 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3888 
   3889 	if (ifp->if_flags & IFF_BROADCAST)
   3890 		sc->sc_rctl |= RCTL_BAM;
   3891 	if (ifp->if_flags & IFF_PROMISC) {
   3892 		sc->sc_rctl |= RCTL_UPE;
   3893 		goto allmulti;
   3894 	}
   3895 
   3896 	/*
   3897 	 * Set the station address in the first RAL slot, and
   3898 	 * clear the remaining slots.
   3899 	 */
   3900 	if (sc->sc_type == WM_T_ICH8)
   3901 		size = WM_ICH8_RAL_TABSIZE;
   3902 	else
   3903 		size = WM_RAL_TABSIZE;
   3904 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3905 	for (i = 1; i < size; i++)
   3906 		wm_set_ral(sc, NULL, i);
   3907 
   3908 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
   3909 		size = WM_ICH8_MC_TABSIZE;
   3910 	else
   3911 		size = WM_MC_TABSIZE;
   3912 	/* Clear out the multicast table. */
   3913 	for (i = 0; i < size; i++)
   3914 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3915 
   3916 	ETHER_FIRST_MULTI(step, ec, enm);
   3917 	while (enm != NULL) {
   3918 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3919 			/*
   3920 			 * We must listen to a range of multicast addresses.
   3921 			 * For now, just accept all multicasts, rather than
   3922 			 * trying to set only those filter bits needed to match
   3923 			 * the range.  (At this time, the only use of address
   3924 			 * ranges is for IP multicast routing, for which the
   3925 			 * range is big enough to require all bits set.)
   3926 			 */
   3927 			goto allmulti;
   3928 		}
   3929 
   3930 		hash = wm_mchash(sc, enm->enm_addrlo);
   3931 
   3932 		reg = (hash >> 5);
   3933 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
   3934 			reg &= 0x1f;
   3935 		else
   3936 			reg &= 0x7f;
   3937 		bit = hash & 0x1f;
   3938 
   3939 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3940 		hash |= 1U << bit;
   3941 
   3942 		/* XXX Hardware bug?? */
   3943 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   3944 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3945 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3946 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3947 		} else
   3948 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3949 
   3950 		ETHER_NEXT_MULTI(step, enm);
   3951 	}
   3952 
   3953 	ifp->if_flags &= ~IFF_ALLMULTI;
   3954 	goto setit;
   3955 
   3956  allmulti:
   3957 	ifp->if_flags |= IFF_ALLMULTI;
   3958 	sc->sc_rctl |= RCTL_MPE;
   3959 
   3960  setit:
   3961 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3962 }
   3963 
   3964 /*
   3965  * wm_tbi_mediainit:
   3966  *
   3967  *	Initialize media for use on 1000BASE-X devices.
   3968  */
   3969 static void
   3970 wm_tbi_mediainit(struct wm_softc *sc)
   3971 {
   3972 	const char *sep = "";
   3973 
   3974 	if (sc->sc_type < WM_T_82543)
   3975 		sc->sc_tipg = TIPG_WM_DFLT;
   3976 	else
   3977 		sc->sc_tipg = TIPG_LG_DFLT;
   3978 
   3979 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   3980 	    wm_tbi_mediastatus);
   3981 
   3982 	/*
   3983 	 * SWD Pins:
   3984 	 *
   3985 	 *	0 = Link LED (output)
   3986 	 *	1 = Loss Of Signal (input)
   3987 	 */
   3988 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   3989 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   3990 
   3991 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3992 
   3993 #define	ADD(ss, mm, dd)							\
   3994 do {									\
   3995 	aprint_normal("%s%s", sep, ss);					\
   3996 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   3997 	sep = ", ";							\
   3998 } while (/*CONSTCOND*/0)
   3999 
   4000 	aprint_normal("%s: ", sc->sc_dev.dv_xname);
   4001 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   4002 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   4003 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   4004 	aprint_normal("\n");
   4005 
   4006 #undef ADD
   4007 
   4008 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   4009 }
   4010 
   4011 /*
   4012  * wm_tbi_mediastatus:	[ifmedia interface function]
   4013  *
   4014  *	Get the current interface media status on a 1000BASE-X device.
   4015  */
   4016 static void
   4017 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   4018 {
   4019 	struct wm_softc *sc = ifp->if_softc;
   4020 	uint32_t ctrl;
   4021 
   4022 	ifmr->ifm_status = IFM_AVALID;
   4023 	ifmr->ifm_active = IFM_ETHER;
   4024 
   4025 	if (sc->sc_tbi_linkup == 0) {
   4026 		ifmr->ifm_active |= IFM_NONE;
   4027 		return;
   4028 	}
   4029 
   4030 	ifmr->ifm_status |= IFM_ACTIVE;
   4031 	ifmr->ifm_active |= IFM_1000_SX;
   4032 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   4033 		ifmr->ifm_active |= IFM_FDX;
   4034 	ctrl = CSR_READ(sc, WMREG_CTRL);
   4035 	if (ctrl & CTRL_RFCE)
   4036 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   4037 	if (ctrl & CTRL_TFCE)
   4038 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   4039 }
   4040 
   4041 /*
   4042  * wm_tbi_mediachange:	[ifmedia interface function]
   4043  *
   4044  *	Set hardware to newly-selected media on a 1000BASE-X device.
   4045  */
   4046 static int
   4047 wm_tbi_mediachange(struct ifnet *ifp)
   4048 {
   4049 	struct wm_softc *sc = ifp->if_softc;
   4050 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   4051 	uint32_t status;
   4052 	int i;
   4053 
   4054 	sc->sc_txcw = ife->ifm_data;
   4055 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n",
   4056 		    sc->sc_dev.dv_xname,sc->sc_txcw));
   4057 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
   4058 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   4059 		sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
   4060 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   4061 		sc->sc_txcw |= TXCW_ANE;
   4062 	} else {
   4063 		/*If autonegotiation is turned off, force link up and turn on full duplex*/
   4064 		sc->sc_txcw &= ~TXCW_ANE;
   4065 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
   4066 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4067 		delay(1000);
   4068 	}
   4069 
   4070 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   4071 		    sc->sc_dev.dv_xname,sc->sc_txcw));
   4072 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   4073 	delay(10000);
   4074 
   4075 	/* NOTE: CTRL will update TFCE and RFCE automatically. */
   4076 
   4077 	sc->sc_tbi_anstate = 0;
   4078 
   4079 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   4080 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", sc->sc_dev.dv_xname,i));
   4081 
   4082 	/*
   4083 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   4084 	 * optics detect a signal, 0 if they don't.
   4085 	 */
   4086 	if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) {
   4087 		/* Have signal; wait for the link to come up. */
   4088 
   4089 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   4090 			/*
   4091 			 * Reset the link, and let autonegotiation do its thing
   4092 			 */
   4093 			sc->sc_ctrl |= CTRL_LRST;
   4094 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4095 			delay(1000);
   4096 			sc->sc_ctrl &= ~CTRL_LRST;
   4097 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4098 			delay(1000);
   4099 		}
   4100 
   4101 		for (i = 0; i < 50; i++) {
   4102 			delay(10000);
   4103 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   4104 				break;
   4105 		}
   4106 
   4107 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   4108 			    sc->sc_dev.dv_xname,i));
   4109 
   4110 		status = CSR_READ(sc, WMREG_STATUS);
   4111 		DPRINTF(WM_DEBUG_LINK,
   4112 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   4113 			sc->sc_dev.dv_xname,status, STATUS_LU));
   4114 		if (status & STATUS_LU) {
   4115 			/* Link is up. */
   4116 			DPRINTF(WM_DEBUG_LINK,
   4117 			    ("%s: LINK: set media -> link up %s\n",
   4118 			    sc->sc_dev.dv_xname,
   4119 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   4120 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   4121 			sc->sc_fcrtl &= ~FCRTL_XONE;
   4122 			if (status & STATUS_FD)
   4123 				sc->sc_tctl |=
   4124 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4125 			else
   4126 				sc->sc_tctl |=
   4127 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   4128 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   4129 				sc->sc_fcrtl |= FCRTL_XONE;
   4130 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4131 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   4132 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   4133 				      sc->sc_fcrtl);
   4134 			sc->sc_tbi_linkup = 1;
   4135 		} else {
   4136 			/* Link is down. */
   4137 			DPRINTF(WM_DEBUG_LINK,
   4138 			    ("%s: LINK: set media -> link down\n",
   4139 			    sc->sc_dev.dv_xname));
   4140 			sc->sc_tbi_linkup = 0;
   4141 		}
   4142 	} else {
   4143 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   4144 		    sc->sc_dev.dv_xname));
   4145 		sc->sc_tbi_linkup = 0;
   4146 	}
   4147 
   4148 	wm_tbi_set_linkled(sc);
   4149 
   4150 	return (0);
   4151 }
   4152 
   4153 /*
   4154  * wm_tbi_set_linkled:
   4155  *
   4156  *	Update the link LED on 1000BASE-X devices.
   4157  */
   4158 static void
   4159 wm_tbi_set_linkled(struct wm_softc *sc)
   4160 {
   4161 
   4162 	if (sc->sc_tbi_linkup)
   4163 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   4164 	else
   4165 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   4166 
   4167 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4168 }
   4169 
   4170 /*
   4171  * wm_tbi_check_link:
   4172  *
   4173  *	Check the link on 1000BASE-X devices.
   4174  */
   4175 static void
   4176 wm_tbi_check_link(struct wm_softc *sc)
   4177 {
   4178 	uint32_t rxcw, ctrl, status;
   4179 
   4180 	if (sc->sc_tbi_anstate == 0)
   4181 		return;
   4182 	else if (sc->sc_tbi_anstate > 1) {
   4183 		DPRINTF(WM_DEBUG_LINK,
   4184 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
   4185 		    sc->sc_tbi_anstate));
   4186 		sc->sc_tbi_anstate--;
   4187 		return;
   4188 	}
   4189 
   4190 	sc->sc_tbi_anstate = 0;
   4191 
   4192 	rxcw = CSR_READ(sc, WMREG_RXCW);
   4193 	ctrl = CSR_READ(sc, WMREG_CTRL);
   4194 	status = CSR_READ(sc, WMREG_STATUS);
   4195 
   4196 	if ((status & STATUS_LU) == 0) {
   4197 		DPRINTF(WM_DEBUG_LINK,
   4198 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
   4199 		sc->sc_tbi_linkup = 0;
   4200 	} else {
   4201 		DPRINTF(WM_DEBUG_LINK,
   4202 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
   4203 		    (status & STATUS_FD) ? "FDX" : "HDX"));
   4204 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   4205 		sc->sc_fcrtl &= ~FCRTL_XONE;
   4206 		if (status & STATUS_FD)
   4207 			sc->sc_tctl |=
   4208 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4209 		else
   4210 			sc->sc_tctl |=
   4211 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   4212 		if (ctrl & CTRL_TFCE)
   4213 			sc->sc_fcrtl |= FCRTL_XONE;
   4214 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4215 		CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   4216 			      WMREG_OLD_FCRTL : WMREG_FCRTL,
   4217 			      sc->sc_fcrtl);
   4218 		sc->sc_tbi_linkup = 1;
   4219 	}
   4220 
   4221 	wm_tbi_set_linkled(sc);
   4222 }
   4223 
   4224 /*
   4225  * wm_gmii_reset:
   4226  *
   4227  *	Reset the PHY.
   4228  */
   4229 static void
   4230 wm_gmii_reset(struct wm_softc *sc)
   4231 {
   4232 	uint32_t reg;
   4233 	int func = 0; /* XXX gcc */
   4234 
   4235 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
   4236 		if (wm_get_swfwhw_semaphore(sc))
   4237 			return;
   4238 	}
   4239 	if (sc->sc_type == WM_T_80003) {
   4240 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
   4241 		if (wm_get_swfw_semaphore(sc,
   4242 		    func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
   4243 			return;
   4244 	}
   4245 	if (sc->sc_type >= WM_T_82544) {
   4246 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   4247 		delay(20000);
   4248 
   4249 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4250 		delay(20000);
   4251 	} else {
   4252 		/*
   4253 		 * With 82543, we need to force speed and duplex on the MAC
   4254 		 * equal to what the PHY speed and duplex configuration is.
   4255 		 * In addition, we need to perform a hardware reset on the PHY
   4256 		 * to take it out of reset.
   4257 		 */
   4258 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   4259 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4260 
   4261 		/* The PHY reset pin is active-low. */
   4262 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4263 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   4264 		    CTRL_EXT_SWDPIN(4));
   4265 		reg |= CTRL_EXT_SWDPIO(4);
   4266 
   4267 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   4268 		delay(10);
   4269 
   4270 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4271 		delay(10000);
   4272 
   4273 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   4274 		delay(10);
   4275 #if 0
   4276 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   4277 #endif
   4278 	}
   4279 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
   4280 		wm_put_swfwhw_semaphore(sc);
   4281 	if (sc->sc_type == WM_T_80003)
   4282 		wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
   4283 }
   4284 
   4285 /*
   4286  * wm_gmii_mediainit:
   4287  *
   4288  *	Initialize media for use on 1000BASE-T devices.
   4289  */
   4290 static void
   4291 wm_gmii_mediainit(struct wm_softc *sc)
   4292 {
   4293 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4294 
   4295 	/* We have MII. */
   4296 	sc->sc_flags |= WM_F_HAS_MII;
   4297 
   4298 	if (sc->sc_type >= WM_T_80003)
   4299 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   4300 	else
   4301 		sc->sc_tipg = TIPG_1000T_DFLT;
   4302 
   4303 	/*
   4304 	 * Let the chip set speed/duplex on its own based on
   4305 	 * signals from the PHY.
   4306 	 * XXXbouyer - I'm not sure this is right for the 80003,
   4307 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   4308 	 */
   4309 	sc->sc_ctrl |= CTRL_SLU;
   4310 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4311 
   4312 	/* Initialize our media structures and probe the GMII. */
   4313 	sc->sc_mii.mii_ifp = ifp;
   4314 
   4315 	if (sc->sc_type >= WM_T_80003) {
   4316 		sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
   4317 		sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
   4318 	} else if (sc->sc_type >= WM_T_82544) {
   4319 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
   4320 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
   4321 	} else {
   4322 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
   4323 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
   4324 	}
   4325 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
   4326 
   4327 	wm_gmii_reset(sc);
   4328 
   4329 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   4330 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
   4331 	    wm_gmii_mediastatus);
   4332 
   4333 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   4334 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
   4335 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
   4336 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   4337 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
   4338 	} else
   4339 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   4340 }
   4341 
   4342 /*
   4343  * wm_gmii_mediastatus:	[ifmedia interface function]
   4344  *
   4345  *	Get the current interface media status on a 1000BASE-T device.
   4346  */
   4347 static void
   4348 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   4349 {
   4350 	struct wm_softc *sc = ifp->if_softc;
   4351 
   4352 	ether_mediastatus(ifp, ifmr);
   4353 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
   4354 			   sc->sc_flowflags;
   4355 }
   4356 
   4357 /*
   4358  * wm_gmii_mediachange:	[ifmedia interface function]
   4359  *
   4360  *	Set hardware to newly-selected media on a 1000BASE-T device.
   4361  */
   4362 static int
   4363 wm_gmii_mediachange(struct ifnet *ifp)
   4364 {
   4365 	struct wm_softc *sc = ifp->if_softc;
   4366 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   4367 	int rc;
   4368 
   4369 	if ((ifp->if_flags & IFF_UP) == 0)
   4370 		return 0;
   4371 
   4372 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   4373 	sc->sc_ctrl |= CTRL_SLU;
   4374 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   4375 	    || (sc->sc_type > WM_T_82543)) {
   4376 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   4377 	} else {
   4378 		sc->sc_ctrl &= ~CTRL_ASDE;
   4379 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   4380 		if (ife->ifm_media & IFM_FDX)
   4381 			sc->sc_ctrl |= CTRL_FD;
   4382 		switch(IFM_SUBTYPE(ife->ifm_media)) {
   4383 		case IFM_10_T:
   4384 			sc->sc_ctrl |= CTRL_SPEED_10;
   4385 			break;
   4386 		case IFM_100_TX:
   4387 			sc->sc_ctrl |= CTRL_SPEED_100;
   4388 			break;
   4389 		case IFM_1000_T:
   4390 			sc->sc_ctrl |= CTRL_SPEED_1000;
   4391 			break;
   4392 		default:
   4393 			panic("wm_gmii_mediachange: bad media 0x%x",
   4394 			    ife->ifm_media);
   4395 		}
   4396 	}
   4397 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4398 	if (sc->sc_type <= WM_T_82543)
   4399 		wm_gmii_reset(sc);
   4400 
   4401 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   4402 		return 0;
   4403 	return rc;
   4404 }
   4405 
   4406 #define	MDI_IO		CTRL_SWDPIN(2)
   4407 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   4408 #define	MDI_CLK		CTRL_SWDPIN(3)
   4409 
   4410 static void
   4411 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   4412 {
   4413 	uint32_t i, v;
   4414 
   4415 	v = CSR_READ(sc, WMREG_CTRL);
   4416 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   4417 	v |= MDI_DIR | CTRL_SWDPIO(3);
   4418 
   4419 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   4420 		if (data & i)
   4421 			v |= MDI_IO;
   4422 		else
   4423 			v &= ~MDI_IO;
   4424 		CSR_WRITE(sc, WMREG_CTRL, v);
   4425 		delay(10);
   4426 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   4427 		delay(10);
   4428 		CSR_WRITE(sc, WMREG_CTRL, v);
   4429 		delay(10);
   4430 	}
   4431 }
   4432 
   4433 static uint32_t
   4434 i82543_mii_recvbits(struct wm_softc *sc)
   4435 {
   4436 	uint32_t v, i, data = 0;
   4437 
   4438 	v = CSR_READ(sc, WMREG_CTRL);
   4439 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   4440 	v |= CTRL_SWDPIO(3);
   4441 
   4442 	CSR_WRITE(sc, WMREG_CTRL, v);
   4443 	delay(10);
   4444 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   4445 	delay(10);
   4446 	CSR_WRITE(sc, WMREG_CTRL, v);
   4447 	delay(10);
   4448 
   4449 	for (i = 0; i < 16; i++) {
   4450 		data <<= 1;
   4451 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   4452 		delay(10);
   4453 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   4454 			data |= 1;
   4455 		CSR_WRITE(sc, WMREG_CTRL, v);
   4456 		delay(10);
   4457 	}
   4458 
   4459 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   4460 	delay(10);
   4461 	CSR_WRITE(sc, WMREG_CTRL, v);
   4462 	delay(10);
   4463 
   4464 	return (data);
   4465 }
   4466 
   4467 #undef MDI_IO
   4468 #undef MDI_DIR
   4469 #undef MDI_CLK
   4470 
   4471 /*
   4472  * wm_gmii_i82543_readreg:	[mii interface function]
   4473  *
   4474  *	Read a PHY register on the GMII (i82543 version).
   4475  */
   4476 static int
   4477 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
   4478 {
   4479 	struct wm_softc *sc = (void *) self;
   4480 	int rv;
   4481 
   4482 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   4483 	i82543_mii_sendbits(sc, reg | (phy << 5) |
   4484 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   4485 	rv = i82543_mii_recvbits(sc) & 0xffff;
   4486 
   4487 	DPRINTF(WM_DEBUG_GMII,
   4488 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   4489 	    sc->sc_dev.dv_xname, phy, reg, rv));
   4490 
   4491 	return (rv);
   4492 }
   4493 
   4494 /*
   4495  * wm_gmii_i82543_writereg:	[mii interface function]
   4496  *
   4497  *	Write a PHY register on the GMII (i82543 version).
   4498  */
   4499 static void
   4500 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
   4501 {
   4502 	struct wm_softc *sc = (void *) self;
   4503 
   4504 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   4505 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   4506 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   4507 	    (MII_COMMAND_START << 30), 32);
   4508 }
   4509 
   4510 /*
   4511  * wm_gmii_i82544_readreg:	[mii interface function]
   4512  *
   4513  *	Read a PHY register on the GMII.
   4514  */
   4515 static int
   4516 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
   4517 {
   4518 	struct wm_softc *sc = (void *) self;
   4519 	uint32_t mdic = 0;
   4520 	int i, rv;
   4521 
   4522 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   4523 	    MDIC_REGADD(reg));
   4524 
   4525 	for (i = 0; i < 320; i++) {
   4526 		mdic = CSR_READ(sc, WMREG_MDIC);
   4527 		if (mdic & MDIC_READY)
   4528 			break;
   4529 		delay(10);
   4530 	}
   4531 
   4532 	if ((mdic & MDIC_READY) == 0) {
   4533 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   4534 		    sc->sc_dev.dv_xname, phy, reg);
   4535 		rv = 0;
   4536 	} else if (mdic & MDIC_E) {
   4537 #if 0 /* This is normal if no PHY is present. */
   4538 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   4539 		    sc->sc_dev.dv_xname, phy, reg);
   4540 #endif
   4541 		rv = 0;
   4542 	} else {
   4543 		rv = MDIC_DATA(mdic);
   4544 		if (rv == 0xffff)
   4545 			rv = 0;
   4546 	}
   4547 
   4548 	return (rv);
   4549 }
   4550 
   4551 /*
   4552  * wm_gmii_i82544_writereg:	[mii interface function]
   4553  *
   4554  *	Write a PHY register on the GMII.
   4555  */
   4556 static void
   4557 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
   4558 {
   4559 	struct wm_softc *sc = (void *) self;
   4560 	uint32_t mdic = 0;
   4561 	int i;
   4562 
   4563 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   4564 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   4565 
   4566 	for (i = 0; i < 320; i++) {
   4567 		mdic = CSR_READ(sc, WMREG_MDIC);
   4568 		if (mdic & MDIC_READY)
   4569 			break;
   4570 		delay(10);
   4571 	}
   4572 
   4573 	if ((mdic & MDIC_READY) == 0)
   4574 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   4575 		    sc->sc_dev.dv_xname, phy, reg);
   4576 	else if (mdic & MDIC_E)
   4577 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   4578 		    sc->sc_dev.dv_xname, phy, reg);
   4579 }
   4580 
   4581 /*
   4582  * wm_gmii_i80003_readreg:	[mii interface function]
   4583  *
   4584  *	Read a PHY register on the kumeran
   4585  * This could be handled by the PHY layer if we didn't have to lock the
   4586  * ressource ...
   4587  */
   4588 static int
   4589 wm_gmii_i80003_readreg(struct device *self, int phy, int reg)
   4590 {
   4591 	struct wm_softc *sc = (void *) self;
   4592 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
   4593 	int rv;
   4594 
   4595 	if (phy != 1) /* only one PHY on kumeran bus */
   4596 		return 0;
   4597 
   4598 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
   4599 		return 0;
   4600 
   4601 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   4602 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   4603 		    reg >> GG82563_PAGE_SHIFT);
   4604 	} else {
   4605 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   4606 		    reg >> GG82563_PAGE_SHIFT);
   4607 	}
   4608 
   4609 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   4610 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
   4611 	return (rv);
   4612 }
   4613 
   4614 /*
   4615  * wm_gmii_i80003_writereg:	[mii interface function]
   4616  *
   4617  *	Write a PHY register on the kumeran.
   4618  * This could be handled by the PHY layer if we didn't have to lock the
   4619  * ressource ...
   4620  */
   4621 static void
   4622 wm_gmii_i80003_writereg(struct device *self, int phy, int reg, int val)
   4623 {
   4624 	struct wm_softc *sc = (void *) self;
   4625 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
   4626 
   4627 	if (phy != 1) /* only one PHY on kumeran bus */
   4628 		return;
   4629 
   4630 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
   4631 		return;
   4632 
   4633 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   4634 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   4635 		    reg >> GG82563_PAGE_SHIFT);
   4636 	} else {
   4637 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   4638 		    reg >> GG82563_PAGE_SHIFT);
   4639 	}
   4640 
   4641 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   4642 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
   4643 }
   4644 
   4645 /*
   4646  * wm_gmii_statchg:	[mii interface function]
   4647  *
   4648  *	Callback from MII layer when media changes.
   4649  */
   4650 static void
   4651 wm_gmii_statchg(struct device *self)
   4652 {
   4653 	struct wm_softc *sc = (void *) self;
   4654 	struct mii_data *mii = &sc->sc_mii;
   4655 
   4656 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   4657 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   4658 	sc->sc_fcrtl &= ~FCRTL_XONE;
   4659 
   4660 	/*
   4661 	 * Get flow control negotiation result.
   4662 	 */
   4663 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   4664 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   4665 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   4666 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   4667 	}
   4668 
   4669 	if (sc->sc_flowflags & IFM_FLOW) {
   4670 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   4671 			sc->sc_ctrl |= CTRL_TFCE;
   4672 			sc->sc_fcrtl |= FCRTL_XONE;
   4673 		}
   4674 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   4675 			sc->sc_ctrl |= CTRL_RFCE;
   4676 	}
   4677 
   4678 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   4679 		DPRINTF(WM_DEBUG_LINK,
   4680 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
   4681 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4682 	} else  {
   4683 		DPRINTF(WM_DEBUG_LINK,
   4684 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
   4685 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   4686 	}
   4687 
   4688 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4689 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4690 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   4691 						 : WMREG_FCRTL, sc->sc_fcrtl);
   4692 	if (sc->sc_type >= WM_T_80003) {
   4693 		switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   4694 		case IFM_1000_T:
   4695 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   4696 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   4697 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   4698 			break;
   4699 		default:
   4700 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   4701 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   4702 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   4703 			break;
   4704 		}
   4705 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4706 	}
   4707 }
   4708 
   4709 /*
   4710  * wm_kmrn_i80003_readreg:
   4711  *
   4712  *	Read a kumeran register
   4713  */
   4714 static int
   4715 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
   4716 {
   4717 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
   4718 	int rv;
   4719 
   4720 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
   4721 		return 0;
   4722 
   4723 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   4724 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   4725 	    KUMCTRLSTA_REN);
   4726 	delay(2);
   4727 
   4728 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   4729 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
   4730 	return (rv);
   4731 }
   4732 
   4733 /*
   4734  * wm_kmrn_i80003_writereg:
   4735  *
   4736  *	Write a kumeran register
   4737  */
   4738 static void
   4739 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
   4740 {
   4741 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
   4742 
   4743 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
   4744 		return;
   4745 
   4746 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   4747 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   4748 	    (val & KUMCTRLSTA_MASK));
   4749 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
   4750 }
   4751 
   4752 static int
   4753 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
   4754 {
   4755 	uint32_t eecd = 0;
   4756 
   4757 	if (sc->sc_type == WM_T_82573) {
   4758 		eecd = CSR_READ(sc, WMREG_EECD);
   4759 
   4760 		/* Isolate bits 15 & 16 */
   4761 		eecd = ((eecd >> 15) & 0x03);
   4762 
   4763 		/* If both bits are set, device is Flash type */
   4764 		if (eecd == 0x03) {
   4765 			return 0;
   4766 		}
   4767 	}
   4768 	return 1;
   4769 }
   4770 
   4771 static int
   4772 wm_get_swsm_semaphore(struct wm_softc *sc)
   4773 {
   4774 	int32_t timeout;
   4775 	uint32_t swsm;
   4776 
   4777 	/* Get the FW semaphore. */
   4778 	timeout = 1000 + 1; /* XXX */
   4779 	while (timeout) {
   4780 		swsm = CSR_READ(sc, WMREG_SWSM);
   4781 		swsm |= SWSM_SWESMBI;
   4782 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   4783 		/* if we managed to set the bit we got the semaphore. */
   4784 		swsm = CSR_READ(sc, WMREG_SWSM);
   4785 		if (swsm & SWSM_SWESMBI)
   4786 			break;
   4787 
   4788 		delay(50);
   4789 		timeout--;
   4790 	}
   4791 
   4792 	if (timeout == 0) {
   4793 		aprint_error("%s: could not acquire EEPROM GNT\n",
   4794 		    sc->sc_dev.dv_xname);
   4795 		/* Release semaphores */
   4796 		wm_put_swsm_semaphore(sc);
   4797 		return 1;
   4798 	}
   4799 	return 0;
   4800 }
   4801 
   4802 static void
   4803 wm_put_swsm_semaphore(struct wm_softc *sc)
   4804 {
   4805 	uint32_t swsm;
   4806 
   4807 	swsm = CSR_READ(sc, WMREG_SWSM);
   4808 	swsm &= ~(SWSM_SWESMBI);
   4809 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   4810 }
   4811 
   4812 static int
   4813 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   4814 {
   4815 	uint32_t swfw_sync;
   4816 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   4817 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   4818 	int timeout = 200;
   4819 
   4820 	for(timeout = 0; timeout < 200; timeout++) {
   4821 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
   4822 			if (wm_get_swsm_semaphore(sc))
   4823 				return 1;
   4824 		}
   4825 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   4826 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   4827 			swfw_sync |= swmask;
   4828 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   4829 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   4830 				wm_put_swsm_semaphore(sc);
   4831 			return 0;
   4832 		}
   4833 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   4834 			wm_put_swsm_semaphore(sc);
   4835 		delay(5000);
   4836 	}
   4837 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   4838 	    sc->sc_dev.dv_xname, mask, swfw_sync);
   4839 	return 1;
   4840 }
   4841 
   4842 static void
   4843 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   4844 {
   4845 	uint32_t swfw_sync;
   4846 
   4847 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
   4848 		while (wm_get_swsm_semaphore(sc) != 0)
   4849 			continue;
   4850 	}
   4851 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   4852 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   4853 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   4854 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
   4855 		wm_put_swsm_semaphore(sc);
   4856 }
   4857 
   4858 static int
   4859 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   4860 {
   4861 	uint32_t ext_ctrl;
   4862 	int timeout = 200;
   4863 
   4864 	for(timeout = 0; timeout < 200; timeout++) {
   4865 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   4866 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
   4867 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   4868 
   4869 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   4870 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
   4871 			return 0;
   4872 		delay(5000);
   4873 	}
   4874 	printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
   4875 	    sc->sc_dev.dv_xname, ext_ctrl);
   4876 	return 1;
   4877 }
   4878 
   4879 static void
   4880 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   4881 {
   4882 	uint32_t ext_ctrl;
   4883 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   4884 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
   4885 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   4886 }
   4887 
   4888 /******************************************************************************
   4889  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   4890  * register.
   4891  *
   4892  * sc - Struct containing variables accessed by shared code
   4893  * offset - offset of word in the EEPROM to read
   4894  * data - word read from the EEPROM
   4895  * words - number of words to read
   4896  *****************************************************************************/
   4897 static int
   4898 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   4899 {
   4900     int32_t  error = 0;
   4901     uint32_t flash_bank = 0;
   4902     uint32_t act_offset = 0;
   4903     uint32_t bank_offset = 0;
   4904     uint16_t word = 0;
   4905     uint16_t i = 0;
   4906 
   4907     /* We need to know which is the valid flash bank.  In the event
   4908      * that we didn't allocate eeprom_shadow_ram, we may not be
   4909      * managing flash_bank.  So it cannot be trusted and needs
   4910      * to be updated with each read.
   4911      */
   4912     /* Value of bit 22 corresponds to the flash bank we're on. */
   4913     flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
   4914 
   4915     /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
   4916     bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   4917 
   4918     error = wm_get_swfwhw_semaphore(sc);
   4919     if (error)
   4920         return error;
   4921 
   4922     for (i = 0; i < words; i++) {
   4923             /* The NVM part needs a byte offset, hence * 2 */
   4924             act_offset = bank_offset + ((offset + i) * 2);
   4925             error = wm_read_ich8_word(sc, act_offset, &word);
   4926             if (error)
   4927                 break;
   4928             data[i] = word;
   4929     }
   4930 
   4931     wm_put_swfwhw_semaphore(sc);
   4932     return error;
   4933 }
   4934 
   4935 /******************************************************************************
   4936  * This function does initial flash setup so that a new read/write/erase cycle
   4937  * can be started.
   4938  *
   4939  * sc - The pointer to the hw structure
   4940  ****************************************************************************/
   4941 static int32_t
   4942 wm_ich8_cycle_init(struct wm_softc *sc)
   4943 {
   4944     uint16_t hsfsts;
   4945     int32_t error = 1;
   4946     int32_t i     = 0;
   4947 
   4948     hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   4949 
   4950     /* May be check the Flash Des Valid bit in Hw status */
   4951     if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   4952         return error;
   4953     }
   4954 
   4955     /* Clear FCERR in Hw status by writing 1 */
   4956     /* Clear DAEL in Hw status by writing a 1 */
   4957     hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   4958 
   4959     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   4960 
   4961     /* Either we should have a hardware SPI cycle in progress bit to check
   4962      * against, in order to start a new cycle or FDONE bit should be changed
   4963      * in the hardware so that it is 1 after harware reset, which can then be
   4964      * used as an indication whether a cycle is in progress or has been
   4965      * completed .. we should also have some software semaphore mechanism to
   4966      * guard FDONE or the cycle in progress bit so that two threads access to
   4967      * those bits can be sequentiallized or a way so that 2 threads dont
   4968      * start the cycle at the same time */
   4969 
   4970     if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   4971         /* There is no cycle running at present, so we can start a cycle */
   4972         /* Begin by setting Flash Cycle Done. */
   4973         hsfsts |= HSFSTS_DONE;
   4974         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   4975         error = 0;
   4976     } else {
   4977         /* otherwise poll for sometime so the current cycle has a chance
   4978          * to end before giving up. */
   4979         for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   4980             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   4981             if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   4982                 error = 0;
   4983                 break;
   4984             }
   4985             delay(1);
   4986         }
   4987         if (error == 0) {
   4988             /* Successful in waiting for previous cycle to timeout,
   4989              * now set the Flash Cycle Done. */
   4990             hsfsts |= HSFSTS_DONE;
   4991             ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   4992         }
   4993     }
   4994     return error;
   4995 }
   4996 
   4997 /******************************************************************************
   4998  * This function starts a flash cycle and waits for its completion
   4999  *
   5000  * sc - The pointer to the hw structure
   5001  ****************************************************************************/
   5002 static int32_t
   5003 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   5004 {
   5005     uint16_t hsflctl;
   5006     uint16_t hsfsts;
   5007     int32_t error = 1;
   5008     uint32_t i = 0;
   5009 
   5010     /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   5011     hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   5012     hsflctl |= HSFCTL_GO;
   5013     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   5014 
   5015     /* wait till FDONE bit is set to 1 */
   5016     do {
   5017         hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   5018         if (hsfsts & HSFSTS_DONE)
   5019             break;
   5020         delay(1);
   5021         i++;
   5022     } while (i < timeout);
   5023     if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
   5024         error = 0;
   5025     }
   5026     return error;
   5027 }
   5028 
   5029 /******************************************************************************
   5030  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   5031  *
   5032  * sc - The pointer to the hw structure
   5033  * index - The index of the byte or word to read.
   5034  * size - Size of data to read, 1=byte 2=word
   5035  * data - Pointer to the word to store the value read.
   5036  *****************************************************************************/
   5037 static int32_t
   5038 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   5039                      uint32_t size, uint16_t* data)
   5040 {
   5041     uint16_t hsfsts;
   5042     uint16_t hsflctl;
   5043     uint32_t flash_linear_address;
   5044     uint32_t flash_data = 0;
   5045     int32_t error = 1;
   5046     int32_t count = 0;
   5047 
   5048     if (size < 1  || size > 2 || data == 0x0 ||
   5049         index > ICH_FLASH_LINEAR_ADDR_MASK)
   5050         return error;
   5051 
   5052     flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   5053                            sc->sc_ich8_flash_base;
   5054 
   5055     do {
   5056         delay(1);
   5057         /* Steps */
   5058         error = wm_ich8_cycle_init(sc);
   5059         if (error)
   5060             break;
   5061 
   5062         hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   5063         /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   5064         hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
   5065         hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   5066         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   5067 
   5068         /* Write the last 24 bits of index into Flash Linear address field in
   5069          * Flash Address */
   5070         /* TODO: TBD maybe check the index against the size of flash */
   5071 
   5072         ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   5073 
   5074         error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   5075 
   5076         /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
   5077          * sequence a few more times, else read in (shift in) the Flash Data0,
   5078          * the order is least significant byte first msb to lsb */
   5079         if (error == 0) {
   5080             flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   5081             if (size == 1) {
   5082                 *data = (uint8_t)(flash_data & 0x000000FF);
   5083             } else if (size == 2) {
   5084                 *data = (uint16_t)(flash_data & 0x0000FFFF);
   5085             }
   5086             break;
   5087         } else {
   5088             /* If we've gotten here, then things are probably completely hosed,
   5089              * but if the error condition is detected, it won't hurt to give
   5090              * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
   5091              */
   5092             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   5093             if (hsfsts & HSFSTS_ERR) {
   5094                 /* Repeat for some time before giving up. */
   5095                 continue;
   5096             } else if ((hsfsts & HSFSTS_DONE) == 0) {
   5097                 break;
   5098             }
   5099         }
   5100     } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   5101 
   5102     return error;
   5103 }
   5104 
   5105 #if 0
   5106 /******************************************************************************
   5107  * Reads a single byte from the NVM using the ICH8 flash access registers.
   5108  *
   5109  * sc - pointer to wm_hw structure
   5110  * index - The index of the byte to read.
   5111  * data - Pointer to a byte to store the value read.
   5112  *****************************************************************************/
   5113 static int32_t
   5114 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   5115 {
   5116     int32_t status;
   5117     uint16_t word = 0;
   5118 
   5119     status = wm_read_ich8_data(sc, index, 1, &word);
   5120     if (status == 0) {
   5121         *data = (uint8_t)word;
   5122     }
   5123 
   5124     return status;
   5125 }
   5126 #endif
   5127 
   5128 /******************************************************************************
   5129  * Reads a word from the NVM using the ICH8 flash access registers.
   5130  *
   5131  * sc - pointer to wm_hw structure
   5132  * index - The starting byte index of the word to read.
   5133  * data - Pointer to a word to store the value read.
   5134  *****************************************************************************/
   5135 static int32_t
   5136 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   5137 {
   5138     int32_t status;
   5139 
   5140     status = wm_read_ich8_data(sc, index, 2, data);
   5141     return status;
   5142 }
   5143