Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.69
      1 /*	$NetBSD: if_wm.c,v 1.69 2004/04/09 17:51:18 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     40  *
     41  * TODO (in order of importance):
     42  *
     43  *	- Rework how parameters are loaded from the EEPROM.
     44  *	- Figure out performance stability issue on i82547 (fvdl).
     45  *	- Figure out what to do with the i82545GM and i82546GB
     46  *	  SERDES controllers.
     47  *	- Fix hw VLAN assist.
     48  */
     49 
     50 #include <sys/cdefs.h>
     51 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.69 2004/04/09 17:51:18 thorpej Exp $");
     52 
     53 #include "bpfilter.h"
     54 #include "rnd.h"
     55 
     56 #include <sys/param.h>
     57 #include <sys/systm.h>
     58 #include <sys/callout.h>
     59 #include <sys/mbuf.h>
     60 #include <sys/malloc.h>
     61 #include <sys/kernel.h>
     62 #include <sys/socket.h>
     63 #include <sys/ioctl.h>
     64 #include <sys/errno.h>
     65 #include <sys/device.h>
     66 #include <sys/queue.h>
     67 
     68 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
     69 
     70 #if NRND > 0
     71 #include <sys/rnd.h>
     72 #endif
     73 
     74 #include <net/if.h>
     75 #include <net/if_dl.h>
     76 #include <net/if_media.h>
     77 #include <net/if_ether.h>
     78 
     79 #if NBPFILTER > 0
     80 #include <net/bpf.h>
     81 #endif
     82 
     83 #include <netinet/in.h>			/* XXX for struct ip */
     84 #include <netinet/in_systm.h>		/* XXX for struct ip */
     85 #include <netinet/ip.h>			/* XXX for struct ip */
     86 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
     87 
     88 #include <machine/bus.h>
     89 #include <machine/intr.h>
     90 #include <machine/endian.h>
     91 
     92 #include <dev/mii/mii.h>
     93 #include <dev/mii/miivar.h>
     94 #include <dev/mii/mii_bitbang.h>
     95 
     96 #include <dev/pci/pcireg.h>
     97 #include <dev/pci/pcivar.h>
     98 #include <dev/pci/pcidevs.h>
     99 
    100 #include <dev/pci/if_wmreg.h>
    101 
    102 #ifdef WM_DEBUG
    103 #define	WM_DEBUG_LINK		0x01
    104 #define	WM_DEBUG_TX		0x02
    105 #define	WM_DEBUG_RX		0x04
    106 #define	WM_DEBUG_GMII		0x08
    107 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
    108 
    109 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    110 #else
    111 #define	DPRINTF(x, y)	/* nothing */
    112 #endif /* WM_DEBUG */
    113 
    114 /*
    115  * Transmit descriptor list size.  Due to errata, we can only have
    116  * 256 hardware descriptors in the ring.  We tell the upper layers
    117  * that they can queue a lot of packets, and we go ahead and manage
    118  * up to 64 of them at a time.  We allow up to 40 DMA segments per
    119  * packet (there have been reports of jumbo frame packets with as
    120  * many as 30 DMA segments!).
    121  */
    122 #define	WM_NTXSEGS		40
    123 #define	WM_IFQUEUELEN		256
    124 #define	WM_TXQUEUELEN		64
    125 #define	WM_TXQUEUELEN_MASK	(WM_TXQUEUELEN - 1)
    126 #define	WM_TXQUEUE_GC		(WM_TXQUEUELEN / 8)
    127 #define	WM_NTXDESC		256
    128 #define	WM_NTXDESC_MASK		(WM_NTXDESC - 1)
    129 #define	WM_NEXTTX(x)		(((x) + 1) & WM_NTXDESC_MASK)
    130 #define	WM_NEXTTXS(x)		(((x) + 1) & WM_TXQUEUELEN_MASK)
    131 
    132 /*
    133  * Receive descriptor list size.  We have one Rx buffer for normal
    134  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    135  * packet.  We allocate 256 receive descriptors, each with a 2k
    136  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    137  */
    138 #define	WM_NRXDESC		256
    139 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    140 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    141 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    142 
    143 /*
    144  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    145  * a single clump that maps to a single DMA segment to make serveral things
    146  * easier.
    147  */
    148 struct wm_control_data {
    149 	/*
    150 	 * The transmit descriptors.
    151 	 */
    152 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
    153 
    154 	/*
    155 	 * The receive descriptors.
    156 	 */
    157 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    158 };
    159 
    160 #define	WM_CDOFF(x)	offsetof(struct wm_control_data, x)
    161 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
    162 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    163 
    164 /*
    165  * Software state for transmit jobs.
    166  */
    167 struct wm_txsoft {
    168 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    169 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    170 	int txs_firstdesc;		/* first descriptor in packet */
    171 	int txs_lastdesc;		/* last descriptor in packet */
    172 	int txs_ndesc;			/* # of descriptors used */
    173 };
    174 
    175 /*
    176  * Software state for receive buffers.  Each descriptor gets a
    177  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    178  * more than one buffer, we chain them together.
    179  */
    180 struct wm_rxsoft {
    181 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    182 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    183 };
    184 
    185 typedef enum {
    186 	WM_T_unknown		= 0,
    187 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
    188 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
    189 	WM_T_82543,			/* i82543 */
    190 	WM_T_82544,			/* i82544 */
    191 	WM_T_82540,			/* i82540 */
    192 	WM_T_82545,			/* i82545 */
    193 	WM_T_82545_3,			/* i82545 3.0+ */
    194 	WM_T_82546,			/* i82546 */
    195 	WM_T_82546_3,			/* i82546 3.0+ */
    196 	WM_T_82541,			/* i82541 */
    197 	WM_T_82541_2,			/* i82541 2.0+ */
    198 	WM_T_82547,			/* i82547 */
    199 	WM_T_82547_2,			/* i82547 2.0+ */
    200 } wm_chip_type;
    201 
    202 /*
    203  * Software state per device.
    204  */
    205 struct wm_softc {
    206 	struct device sc_dev;		/* generic device information */
    207 	bus_space_tag_t sc_st;		/* bus space tag */
    208 	bus_space_handle_t sc_sh;	/* bus space handle */
    209 	bus_space_tag_t sc_iot;		/* I/O space tag */
    210 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    211 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    212 	struct ethercom sc_ethercom;	/* ethernet common data */
    213 	void *sc_sdhook;		/* shutdown hook */
    214 
    215 	wm_chip_type sc_type;		/* chip type */
    216 	int sc_flags;			/* flags; see below */
    217 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    218 	int sc_pcix_offset;		/* PCIX capability register offset */
    219 
    220 	void *sc_ih;			/* interrupt cookie */
    221 
    222 	int sc_ee_addrbits;		/* EEPROM address bits */
    223 
    224 	struct mii_data sc_mii;		/* MII/media information */
    225 
    226 	struct callout sc_tick_ch;	/* tick callout */
    227 
    228 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    229 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    230 
    231 	int		sc_align_tweak;
    232 
    233 	/*
    234 	 * Software state for the transmit and receive descriptors.
    235 	 */
    236 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
    237 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    238 
    239 	/*
    240 	 * Control data structures.
    241 	 */
    242 	struct wm_control_data *sc_control_data;
    243 #define	sc_txdescs	sc_control_data->wcd_txdescs
    244 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    245 
    246 #ifdef WM_EVENT_COUNTERS
    247 	/* Event counters. */
    248 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    249 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    250 	struct evcnt sc_ev_txforceintr;	/* Tx interrupts forced */
    251 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    252 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    253 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    254 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    255 
    256 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    257 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    258 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    259 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    260 
    261 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
    262 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
    263 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
    264 
    265 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    266 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    267 
    268 	struct evcnt sc_ev_tu;		/* Tx underrun */
    269 #endif /* WM_EVENT_COUNTERS */
    270 
    271 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    272 
    273 	int	sc_txfree;		/* number of free Tx descriptors */
    274 	int	sc_txnext;		/* next ready Tx descriptor */
    275 
    276 	int	sc_txsfree;		/* number of free Tx jobs */
    277 	int	sc_txsnext;		/* next free Tx job */
    278 	int	sc_txsdirty;		/* dirty Tx jobs */
    279 
    280 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
    281 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
    282 
    283 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    284 
    285 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    286 	int	sc_rxdiscard;
    287 	int	sc_rxlen;
    288 	struct mbuf *sc_rxhead;
    289 	struct mbuf *sc_rxtail;
    290 	struct mbuf **sc_rxtailp;
    291 
    292 	uint32_t sc_ctrl;		/* prototype CTRL register */
    293 #if 0
    294 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    295 #endif
    296 	uint32_t sc_icr;		/* prototype interrupt bits */
    297 	uint32_t sc_tctl;		/* prototype TCTL register */
    298 	uint32_t sc_rctl;		/* prototype RCTL register */
    299 	uint32_t sc_txcw;		/* prototype TXCW register */
    300 	uint32_t sc_tipg;		/* prototype TIPG register */
    301 
    302 	int sc_tbi_linkup;		/* TBI link status */
    303 	int sc_tbi_anstate;		/* autonegotiation state */
    304 
    305 	int sc_mchash_type;		/* multicast filter offset */
    306 
    307 #if NRND > 0
    308 	rndsource_element_t rnd_source;	/* random source */
    309 #endif
    310 };
    311 
    312 #define	WM_RXCHAIN_RESET(sc)						\
    313 do {									\
    314 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    315 	*(sc)->sc_rxtailp = NULL;					\
    316 	(sc)->sc_rxlen = 0;						\
    317 } while (/*CONSTCOND*/0)
    318 
    319 #define	WM_RXCHAIN_LINK(sc, m)						\
    320 do {									\
    321 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    322 	(sc)->sc_rxtailp = &(m)->m_next;				\
    323 } while (/*CONSTCOND*/0)
    324 
    325 /* sc_flags */
    326 #define	WM_F_HAS_MII		0x01	/* has MII */
    327 #define	WM_F_EEPROM_HANDSHAKE	0x02	/* requires EEPROM handshake */
    328 #define	WM_F_EEPROM_SPI		0x04	/* EEPROM is SPI */
    329 #define	WM_F_IOH_VALID		0x10	/* I/O handle is valid */
    330 #define	WM_F_BUS64		0x20	/* bus is 64-bit */
    331 #define	WM_F_PCIX		0x40	/* bus is PCI-X */
    332 
    333 #ifdef WM_EVENT_COUNTERS
    334 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    335 #else
    336 #define	WM_EVCNT_INCR(ev)	/* nothing */
    337 #endif
    338 
    339 #define	CSR_READ(sc, reg)						\
    340 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    341 #define	CSR_WRITE(sc, reg, val)						\
    342 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    343 
    344 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    345 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    346 
    347 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
    348 #define	WM_CDTXADDR_HI(sc, x)						\
    349 	(sizeof(bus_addr_t) == 8 ?					\
    350 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
    351 
    352 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
    353 #define	WM_CDRXADDR_HI(sc, x)						\
    354 	(sizeof(bus_addr_t) == 8 ?					\
    355 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
    356 
    357 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    358 do {									\
    359 	int __x, __n;							\
    360 									\
    361 	__x = (x);							\
    362 	__n = (n);							\
    363 									\
    364 	/* If it will wrap around, sync to the end of the ring. */	\
    365 	if ((__x + __n) > WM_NTXDESC) {					\
    366 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    367 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    368 		    (WM_NTXDESC - __x), (ops));				\
    369 		__n -= (WM_NTXDESC - __x);				\
    370 		__x = 0;						\
    371 	}								\
    372 									\
    373 	/* Now sync whatever is left. */				\
    374 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    375 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    376 } while (/*CONSTCOND*/0)
    377 
    378 #define	WM_CDRXSYNC(sc, x, ops)						\
    379 do {									\
    380 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    381 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    382 } while (/*CONSTCOND*/0)
    383 
    384 #define	WM_INIT_RXDESC(sc, x)						\
    385 do {									\
    386 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    387 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    388 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    389 									\
    390 	/*								\
    391 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    392 	 * so that the payload after the Ethernet header is aligned	\
    393 	 * to a 4-byte boundary.					\
    394 	 *								\
    395 	 * XXX BRAINDAMAGE ALERT!					\
    396 	 * The stupid chip uses the same size for every buffer, which	\
    397 	 * is set in the Receive Control register.  We are using the 2K	\
    398 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    399 	 * reason, we can't "scoot" packets longer than the standard	\
    400 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    401 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    402 	 * the upper layer copy the headers.				\
    403 	 */								\
    404 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    405 									\
    406 	wm_set_dma_addr(&__rxd->wrx_addr,				\
    407 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
    408 	__rxd->wrx_len = 0;						\
    409 	__rxd->wrx_cksum = 0;						\
    410 	__rxd->wrx_status = 0;						\
    411 	__rxd->wrx_errors = 0;						\
    412 	__rxd->wrx_special = 0;						\
    413 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    414 									\
    415 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    416 } while (/*CONSTCOND*/0)
    417 
    418 static void	wm_start(struct ifnet *);
    419 static void	wm_watchdog(struct ifnet *);
    420 static int	wm_ioctl(struct ifnet *, u_long, caddr_t);
    421 static int	wm_init(struct ifnet *);
    422 static void	wm_stop(struct ifnet *, int);
    423 
    424 static void	wm_shutdown(void *);
    425 
    426 static void	wm_reset(struct wm_softc *);
    427 static void	wm_rxdrain(struct wm_softc *);
    428 static int	wm_add_rxbuf(struct wm_softc *, int);
    429 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
    430 static void	wm_tick(void *);
    431 
    432 static void	wm_set_filter(struct wm_softc *);
    433 
    434 static int	wm_intr(void *);
    435 static void	wm_txintr(struct wm_softc *);
    436 static void	wm_rxintr(struct wm_softc *);
    437 static void	wm_linkintr(struct wm_softc *, uint32_t);
    438 
    439 static void	wm_tbi_mediainit(struct wm_softc *);
    440 static int	wm_tbi_mediachange(struct ifnet *);
    441 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    442 
    443 static void	wm_tbi_set_linkled(struct wm_softc *);
    444 static void	wm_tbi_check_link(struct wm_softc *);
    445 
    446 static void	wm_gmii_reset(struct wm_softc *);
    447 
    448 static int	wm_gmii_i82543_readreg(struct device *, int, int);
    449 static void	wm_gmii_i82543_writereg(struct device *, int, int, int);
    450 
    451 static int	wm_gmii_i82544_readreg(struct device *, int, int);
    452 static void	wm_gmii_i82544_writereg(struct device *, int, int, int);
    453 
    454 static void	wm_gmii_statchg(struct device *);
    455 
    456 static void	wm_gmii_mediainit(struct wm_softc *);
    457 static int	wm_gmii_mediachange(struct ifnet *);
    458 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    459 
    460 static int	wm_match(struct device *, struct cfdata *, void *);
    461 static void	wm_attach(struct device *, struct device *, void *);
    462 
    463 CFATTACH_DECL(wm, sizeof(struct wm_softc),
    464     wm_match, wm_attach, NULL, NULL);
    465 
    466 /*
    467  * Devices supported by this driver.
    468  */
    469 const struct wm_product {
    470 	pci_vendor_id_t		wmp_vendor;
    471 	pci_product_id_t	wmp_product;
    472 	const char		*wmp_name;
    473 	wm_chip_type		wmp_type;
    474 	int			wmp_flags;
    475 #define	WMP_F_1000X		0x01
    476 #define	WMP_F_1000T		0x02
    477 } wm_products[] = {
    478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    479 	  "Intel i82542 1000BASE-X Ethernet",
    480 	  WM_T_82542_2_1,	WMP_F_1000X },
    481 
    482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    483 	  "Intel i82543GC 1000BASE-X Ethernet",
    484 	  WM_T_82543,		WMP_F_1000X },
    485 
    486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    487 	  "Intel i82543GC 1000BASE-T Ethernet",
    488 	  WM_T_82543,		WMP_F_1000T },
    489 
    490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    491 	  "Intel i82544EI 1000BASE-T Ethernet",
    492 	  WM_T_82544,		WMP_F_1000T },
    493 
    494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    495 	  "Intel i82544EI 1000BASE-X Ethernet",
    496 	  WM_T_82544,		WMP_F_1000X },
    497 
    498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    499 	  "Intel i82544GC 1000BASE-T Ethernet",
    500 	  WM_T_82544,		WMP_F_1000T },
    501 
    502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    503 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    504 	  WM_T_82544,		WMP_F_1000T },
    505 
    506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    507 	  "Intel i82540EM 1000BASE-T Ethernet",
    508 	  WM_T_82540,		WMP_F_1000T },
    509 
    510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    511 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    512 	  WM_T_82540,		WMP_F_1000T },
    513 
    514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    515 	  "Intel i82540EP 1000BASE-T Ethernet",
    516 	  WM_T_82540,		WMP_F_1000T },
    517 
    518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    519 	  "Intel i82540EP 1000BASE-T Ethernet",
    520 	  WM_T_82540,		WMP_F_1000T },
    521 
    522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    523 	  "Intel i82540EP 1000BASE-T Ethernet",
    524 	  WM_T_82540,		WMP_F_1000T },
    525 
    526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    527 	  "Intel i82545EM 1000BASE-T Ethernet",
    528 	  WM_T_82545,		WMP_F_1000T },
    529 
    530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    531 	  "Intel i82545GM 1000BASE-T Ethernet",
    532 	  WM_T_82545_3,		WMP_F_1000T },
    533 
    534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    535 	  "Intel i82545GM 1000BASE-X Ethernet",
    536 	  WM_T_82545_3,		WMP_F_1000X },
    537 #if 0
    538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    539 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    540 	  WM_T_82545_3,		WMP_F_SERDES },
    541 #endif
    542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    543 	  "Intel i82546EB 1000BASE-T Ethernet",
    544 	  WM_T_82546,		WMP_F_1000T },
    545 
    546 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
    547 	  "Intel i82546EB 1000BASE-T Ethernet",
    548 	  WM_T_82546,		WMP_F_1000T },
    549 
    550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    551 	  "Intel i82545EM 1000BASE-X Ethernet",
    552 	  WM_T_82545,		WMP_F_1000X },
    553 
    554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    555 	  "Intel i82546EB 1000BASE-X Ethernet",
    556 	  WM_T_82546,		WMP_F_1000X },
    557 
    558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    559 	  "Intel i82546GB 1000BASE-T Ethernet",
    560 	  WM_T_82546_3,		WMP_F_1000T },
    561 
    562 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    563 	  "Intel i82546GB 1000BASE-X Ethernet",
    564 	  WM_T_82546_3,		WMP_F_1000X },
    565 #if 0
    566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    567 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    568 	  WM_T_82546_3,		WMP_F_SERDES },
    569 #endif
    570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    571 	  "Intel i82541EI 1000BASE-T Ethernet",
    572 	  WM_T_82541,		WMP_F_1000T },
    573 
    574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    575 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    576 	  WM_T_82541,		WMP_F_1000T },
    577 
    578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    579 	  "Intel i82541ER 1000BASE-T Ethernet",
    580 	  WM_T_82541_2,		WMP_F_1000T },
    581 
    582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    583 	  "Intel i82541GI 1000BASE-T Ethernet",
    584 	  WM_T_82541_2,		WMP_F_1000T },
    585 
    586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    587 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    588 	  WM_T_82541_2,		WMP_F_1000T },
    589 
    590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    591 	  "Intel i82547EI 1000BASE-T Ethernet",
    592 	  WM_T_82547,		WMP_F_1000T },
    593 
    594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    595 	  "Intel i82547GI 1000BASE-T Ethernet",
    596 	  WM_T_82547_2,		WMP_F_1000T },
    597 	{ 0,			0,
    598 	  NULL,
    599 	  0,			0 },
    600 };
    601 
    602 #ifdef WM_EVENT_COUNTERS
    603 #if WM_NTXSEGS != 40
    604 #error Update wm_txseg_evcnt_names
    605 #endif
    606 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
    607 	"txseg1",
    608 	"txseg2",
    609 	"txseg3",
    610 	"txseg4",
    611 	"txseg5",
    612 	"txseg6",
    613 	"txseg7",
    614 	"txseg8",
    615 	"txseg9",
    616 	"txseg10",
    617 	"txseg11",
    618 	"txseg12",
    619 	"txseg13",
    620 	"txseg14",
    621 	"txseg15",
    622 	"txseg16",
    623 	"txseg17",
    624 	"txseg18",
    625 	"txseg19",
    626 	"txseg20",
    627 	"txseg21",
    628 	"txseg22",
    629 	"txseg23",
    630 	"txseg24",
    631 	"txseg25",
    632 	"txseg26",
    633 	"txseg27",
    634 	"txseg28",
    635 	"txseg29",
    636 	"txseg30",
    637 	"txseg31",
    638 	"txseg32",
    639 	"txseg33",
    640 	"txseg34",
    641 	"txseg35",
    642 	"txseg36",
    643 	"txseg37",
    644 	"txseg38",
    645 	"txseg39",
    646 	"txseg40",
    647 };
    648 #endif /* WM_EVENT_COUNTERS */
    649 
    650 #if 0 /* Not currently used */
    651 static __inline uint32_t
    652 wm_io_read(struct wm_softc *sc, int reg)
    653 {
    654 
    655 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    656 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
    657 }
    658 #endif
    659 
    660 static __inline void
    661 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
    662 {
    663 
    664 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    665 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
    666 }
    667 
    668 static __inline void
    669 wm_set_dma_addr(__volatile wiseman_addr_t *wa, bus_addr_t v)
    670 {
    671 	wa->wa_low = htole32(v & 0xffffffffU);
    672 	if (sizeof(bus_addr_t) == 8)
    673 		wa->wa_high = htole32((uint64_t) v >> 32);
    674 	else
    675 		wa->wa_high = 0;
    676 }
    677 
    678 static const struct wm_product *
    679 wm_lookup(const struct pci_attach_args *pa)
    680 {
    681 	const struct wm_product *wmp;
    682 
    683 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
    684 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
    685 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
    686 			return (wmp);
    687 	}
    688 	return (NULL);
    689 }
    690 
    691 static int
    692 wm_match(struct device *parent, struct cfdata *cf, void *aux)
    693 {
    694 	struct pci_attach_args *pa = aux;
    695 
    696 	if (wm_lookup(pa) != NULL)
    697 		return (1);
    698 
    699 	return (0);
    700 }
    701 
    702 static void
    703 wm_attach(struct device *parent, struct device *self, void *aux)
    704 {
    705 	struct wm_softc *sc = (void *) self;
    706 	struct pci_attach_args *pa = aux;
    707 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    708 	pci_chipset_tag_t pc = pa->pa_pc;
    709 	pci_intr_handle_t ih;
    710 	const char *intrstr = NULL;
    711 	const char *eetype;
    712 	bus_space_tag_t memt;
    713 	bus_space_handle_t memh;
    714 	bus_dma_segment_t seg;
    715 	int memh_valid;
    716 	int i, rseg, error;
    717 	const struct wm_product *wmp;
    718 	uint8_t enaddr[ETHER_ADDR_LEN];
    719 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
    720 	pcireg_t preg, memtype;
    721 	uint32_t reg;
    722 	int pmreg;
    723 
    724 	callout_init(&sc->sc_tick_ch);
    725 
    726 	wmp = wm_lookup(pa);
    727 	if (wmp == NULL) {
    728 		printf("\n");
    729 		panic("wm_attach: impossible");
    730 	}
    731 
    732 	if (pci_dma64_available(pa))
    733 		sc->sc_dmat = pa->pa_dmat64;
    734 	else
    735 		sc->sc_dmat = pa->pa_dmat;
    736 
    737 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
    738 	aprint_naive(": Ethernet controller\n");
    739 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
    740 
    741 	sc->sc_type = wmp->wmp_type;
    742 	if (sc->sc_type < WM_T_82543) {
    743 		if (preg < 2) {
    744 			aprint_error("%s: i82542 must be at least rev. 2\n",
    745 			    sc->sc_dev.dv_xname);
    746 			return;
    747 		}
    748 		if (preg < 3)
    749 			sc->sc_type = WM_T_82542_2_0;
    750 	}
    751 
    752 	/*
    753 	 * Map the device.  All devices support memory-mapped acccess,
    754 	 * and it is really required for normal operation.
    755 	 */
    756 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
    757 	switch (memtype) {
    758 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
    759 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
    760 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
    761 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
    762 		break;
    763 	default:
    764 		memh_valid = 0;
    765 	}
    766 
    767 	if (memh_valid) {
    768 		sc->sc_st = memt;
    769 		sc->sc_sh = memh;
    770 	} else {
    771 		aprint_error("%s: unable to map device registers\n",
    772 		    sc->sc_dev.dv_xname);
    773 		return;
    774 	}
    775 
    776 	/*
    777 	 * In addition, i82544 and later support I/O mapped indirect
    778 	 * register access.  It is not desirable (nor supported in
    779 	 * this driver) to use it for normal operation, though it is
    780 	 * required to work around bugs in some chip versions.
    781 	 */
    782 	if (sc->sc_type >= WM_T_82544) {
    783 		/* First we have to find the I/O BAR. */
    784 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
    785 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
    786 			    PCI_MAPREG_TYPE_IO)
    787 				break;
    788 		}
    789 		if (i == PCI_MAPREG_END)
    790 			aprint_error("%s: WARNING: unable to find I/O BAR\n",
    791 			    sc->sc_dev.dv_xname);
    792 		else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
    793 					0, &sc->sc_iot, &sc->sc_ioh,
    794 					NULL, NULL) == 0)
    795 			sc->sc_flags |= WM_F_IOH_VALID;
    796 		else
    797 			aprint_error("%s: WARNING: unable to map I/O space\n",
    798 			    sc->sc_dev.dv_xname);
    799 	}
    800 
    801 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
    802 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    803 	preg |= PCI_COMMAND_MASTER_ENABLE;
    804 	if (sc->sc_type < WM_T_82542_2_1)
    805 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
    806 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
    807 
    808 	/* Get it out of power save mode, if needed. */
    809 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
    810 		preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
    811 		    PCI_PMCSR_STATE_MASK;
    812 		if (preg == PCI_PMCSR_STATE_D3) {
    813 			/*
    814 			 * The card has lost all configuration data in
    815 			 * this state, so punt.
    816 			 */
    817 			aprint_error("%s: unable to wake from power state D3\n",
    818 			    sc->sc_dev.dv_xname);
    819 			return;
    820 		}
    821 		if (preg != PCI_PMCSR_STATE_D0) {
    822 			aprint_normal("%s: waking up from power state D%d\n",
    823 			    sc->sc_dev.dv_xname, preg);
    824 			pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
    825 			    PCI_PMCSR_STATE_D0);
    826 		}
    827 	}
    828 
    829 	/*
    830 	 * Map and establish our interrupt.
    831 	 */
    832 	if (pci_intr_map(pa, &ih)) {
    833 		aprint_error("%s: unable to map interrupt\n",
    834 		    sc->sc_dev.dv_xname);
    835 		return;
    836 	}
    837 	intrstr = pci_intr_string(pc, ih);
    838 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
    839 	if (sc->sc_ih == NULL) {
    840 		aprint_error("%s: unable to establish interrupt",
    841 		    sc->sc_dev.dv_xname);
    842 		if (intrstr != NULL)
    843 			aprint_normal(" at %s", intrstr);
    844 		aprint_normal("\n");
    845 		return;
    846 	}
    847 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
    848 
    849 	/*
    850 	 * Determine a few things about the bus we're connected to.
    851 	 */
    852 	if (sc->sc_type < WM_T_82543) {
    853 		/* We don't really know the bus characteristics here. */
    854 		sc->sc_bus_speed = 33;
    855 	} else  {
    856 		reg = CSR_READ(sc, WMREG_STATUS);
    857 		if (reg & STATUS_BUS64)
    858 			sc->sc_flags |= WM_F_BUS64;
    859 		if (sc->sc_type >= WM_T_82544 &&
    860 		    (reg & STATUS_PCIX_MODE) != 0) {
    861 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
    862 
    863 			sc->sc_flags |= WM_F_PCIX;
    864 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
    865 					       PCI_CAP_PCIX,
    866 					       &sc->sc_pcix_offset, NULL) == 0)
    867 				aprint_error("%s: unable to find PCIX "
    868 				    "capability\n", sc->sc_dev.dv_xname);
    869 			else if (sc->sc_type != WM_T_82545_3 &&
    870 				 sc->sc_type != WM_T_82546_3) {
    871 				/*
    872 				 * Work around a problem caused by the BIOS
    873 				 * setting the max memory read byte count
    874 				 * incorrectly.
    875 				 */
    876 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
    877 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
    878 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
    879 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
    880 
    881 				bytecnt =
    882 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
    883 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
    884 				maxb =
    885 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
    886 				    PCI_PCIX_STATUS_MAXB_SHIFT;
    887 				if (bytecnt > maxb) {
    888 					aprint_verbose("%s: resetting PCI-X "
    889 					    "MMRBC: %d -> %d\n",
    890 					    sc->sc_dev.dv_xname,
    891 					    512 << bytecnt, 512 << maxb);
    892 					pcix_cmd = (pcix_cmd &
    893 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
    894 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
    895 					pci_conf_write(pa->pa_pc, pa->pa_tag,
    896 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
    897 					    pcix_cmd);
    898 				}
    899 			}
    900 		}
    901 		/*
    902 		 * The quad port adapter is special; it has a PCIX-PCIX
    903 		 * bridge on the board, and can run the secondary bus at
    904 		 * a higher speed.
    905 		 */
    906 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
    907 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
    908 								      : 66;
    909 		} else if (sc->sc_flags & WM_F_PCIX) {
    910 			switch (reg & STATUS_PCIXSPD_MASK) {
    911 			case STATUS_PCIXSPD_50_66:
    912 				sc->sc_bus_speed = 66;
    913 				break;
    914 			case STATUS_PCIXSPD_66_100:
    915 				sc->sc_bus_speed = 100;
    916 				break;
    917 			case STATUS_PCIXSPD_100_133:
    918 				sc->sc_bus_speed = 133;
    919 				break;
    920 			default:
    921 				aprint_error(
    922 				    "%s: unknown PCIXSPD %d; assuming 66MHz\n",
    923 				    sc->sc_dev.dv_xname,
    924 				    reg & STATUS_PCIXSPD_MASK);
    925 				sc->sc_bus_speed = 66;
    926 			}
    927 		} else
    928 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
    929 		aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
    930 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
    931 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
    932 	}
    933 
    934 	/*
    935 	 * Allocate the control data structures, and create and load the
    936 	 * DMA map for it.
    937 	 *
    938 	 * NOTE: All Tx descriptors must be in the same 4G segment of
    939 	 * memory.  So must Rx descriptors.  We simplify by allocating
    940 	 * both sets within the same 4G segment.
    941 	 */
    942 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    943 				      sizeof(struct wm_control_data),
    944 				      PAGE_SIZE, (bus_size_t) 0x100000000ULL,
    945 				      &seg, 1, &rseg, 0)) != 0) {
    946 		aprint_error(
    947 		    "%s: unable to allocate control data, error = %d\n",
    948 		    sc->sc_dev.dv_xname, error);
    949 		goto fail_0;
    950 	}
    951 
    952 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    953 				    sizeof(struct wm_control_data),
    954 				    (caddr_t *)&sc->sc_control_data, 0)) != 0) {
    955 		aprint_error("%s: unable to map control data, error = %d\n",
    956 		    sc->sc_dev.dv_xname, error);
    957 		goto fail_1;
    958 	}
    959 
    960 	if ((error = bus_dmamap_create(sc->sc_dmat,
    961 				       sizeof(struct wm_control_data), 1,
    962 				       sizeof(struct wm_control_data), 0, 0,
    963 				       &sc->sc_cddmamap)) != 0) {
    964 		aprint_error("%s: unable to create control data DMA map, "
    965 		    "error = %d\n", sc->sc_dev.dv_xname, error);
    966 		goto fail_2;
    967 	}
    968 
    969 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
    970 				     sc->sc_control_data,
    971 				     sizeof(struct wm_control_data), NULL,
    972 				     0)) != 0) {
    973 		aprint_error(
    974 		    "%s: unable to load control data DMA map, error = %d\n",
    975 		    sc->sc_dev.dv_xname, error);
    976 		goto fail_3;
    977 	}
    978 
    979 	/*
    980 	 * Create the transmit buffer DMA maps.
    981 	 */
    982 	for (i = 0; i < WM_TXQUEUELEN; i++) {
    983 		if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
    984 					       WM_NTXSEGS, MCLBYTES, 0, 0,
    985 					  &sc->sc_txsoft[i].txs_dmamap)) != 0) {
    986 			aprint_error("%s: unable to create Tx DMA map %d, "
    987 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    988 			goto fail_4;
    989 		}
    990 	}
    991 
    992 	/*
    993 	 * Create the receive buffer DMA maps.
    994 	 */
    995 	for (i = 0; i < WM_NRXDESC; i++) {
    996 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    997 					       MCLBYTES, 0, 0,
    998 					  &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
    999 			aprint_error("%s: unable to create Rx DMA map %d, "
   1000 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
   1001 			goto fail_5;
   1002 		}
   1003 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
   1004 	}
   1005 
   1006 	/*
   1007 	 * Reset the chip to a known state.
   1008 	 */
   1009 	wm_reset(sc);
   1010 
   1011 	/*
   1012 	 * Get some information about the EEPROM.
   1013 	 */
   1014 	if (sc->sc_type >= WM_T_82540)
   1015 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
   1016 	if (sc->sc_type <= WM_T_82544)
   1017 		sc->sc_ee_addrbits = 6;
   1018 	else if (sc->sc_type <= WM_T_82546_3) {
   1019 		reg = CSR_READ(sc, WMREG_EECD);
   1020 		if (reg & EECD_EE_SIZE)
   1021 			sc->sc_ee_addrbits = 8;
   1022 		else
   1023 			sc->sc_ee_addrbits = 6;
   1024 	} else if (sc->sc_type <= WM_T_82547_2) {
   1025 		reg = CSR_READ(sc, WMREG_EECD);
   1026 		if (reg & EECD_EE_TYPE) {
   1027 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1028 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   1029 		} else
   1030 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
   1031 	} else {
   1032 		/* Assume everything else is SPI. */
   1033 		reg = CSR_READ(sc, WMREG_EECD);
   1034 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1035 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   1036 	}
   1037 	if (sc->sc_flags & WM_F_EEPROM_SPI)
   1038 		eetype = "SPI";
   1039 	else
   1040 		eetype = "MicroWire";
   1041 	aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
   1042 	    sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
   1043 	    sc->sc_ee_addrbits, eetype);
   1044 
   1045 	/*
   1046 	 * Read the Ethernet address from the EEPROM.
   1047 	 */
   1048 	if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
   1049 	    sizeof(myea) / sizeof(myea[0]), myea)) {
   1050 		aprint_error("%s: unable to read Ethernet address\n",
   1051 		    sc->sc_dev.dv_xname);
   1052 		return;
   1053 	}
   1054 	enaddr[0] = myea[0] & 0xff;
   1055 	enaddr[1] = myea[0] >> 8;
   1056 	enaddr[2] = myea[1] & 0xff;
   1057 	enaddr[3] = myea[1] >> 8;
   1058 	enaddr[4] = myea[2] & 0xff;
   1059 	enaddr[5] = myea[2] >> 8;
   1060 
   1061 	/*
   1062 	 * Toggle the LSB of the MAC address on the second port
   1063 	 * of the i82546.
   1064 	 */
   1065 	if (sc->sc_type == WM_T_82546) {
   1066 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
   1067 			enaddr[5] ^= 1;
   1068 	}
   1069 
   1070 	aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
   1071 	    ether_sprintf(enaddr));
   1072 
   1073 	/*
   1074 	 * Read the config info from the EEPROM, and set up various
   1075 	 * bits in the control registers based on their contents.
   1076 	 */
   1077 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
   1078 		aprint_error("%s: unable to read CFG1 from EEPROM\n",
   1079 		    sc->sc_dev.dv_xname);
   1080 		return;
   1081 	}
   1082 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
   1083 		aprint_error("%s: unable to read CFG2 from EEPROM\n",
   1084 		    sc->sc_dev.dv_xname);
   1085 		return;
   1086 	}
   1087 	if (sc->sc_type >= WM_T_82544) {
   1088 		if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
   1089 			aprint_error("%s: unable to read SWDPIN from EEPROM\n",
   1090 			    sc->sc_dev.dv_xname);
   1091 			return;
   1092 		}
   1093 	}
   1094 
   1095 	if (cfg1 & EEPROM_CFG1_ILOS)
   1096 		sc->sc_ctrl |= CTRL_ILOS;
   1097 	if (sc->sc_type >= WM_T_82544) {
   1098 		sc->sc_ctrl |=
   1099 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1100 		    CTRL_SWDPIO_SHIFT;
   1101 		sc->sc_ctrl |=
   1102 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1103 		    CTRL_SWDPINS_SHIFT;
   1104 	} else {
   1105 		sc->sc_ctrl |=
   1106 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1107 		    CTRL_SWDPIO_SHIFT;
   1108 	}
   1109 
   1110 #if 0
   1111 	if (sc->sc_type >= WM_T_82544) {
   1112 		if (cfg1 & EEPROM_CFG1_IPS0)
   1113 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1114 		if (cfg1 & EEPROM_CFG1_IPS1)
   1115 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1116 		sc->sc_ctrl_ext |=
   1117 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1118 		    CTRL_EXT_SWDPIO_SHIFT;
   1119 		sc->sc_ctrl_ext |=
   1120 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1121 		    CTRL_EXT_SWDPINS_SHIFT;
   1122 	} else {
   1123 		sc->sc_ctrl_ext |=
   1124 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1125 		    CTRL_EXT_SWDPIO_SHIFT;
   1126 	}
   1127 #endif
   1128 
   1129 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1130 #if 0
   1131 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1132 #endif
   1133 
   1134 	/*
   1135 	 * Set up some register offsets that are different between
   1136 	 * the i82542 and the i82543 and later chips.
   1137 	 */
   1138 	if (sc->sc_type < WM_T_82543) {
   1139 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1140 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1141 	} else {
   1142 		sc->sc_rdt_reg = WMREG_RDT;
   1143 		sc->sc_tdt_reg = WMREG_TDT;
   1144 	}
   1145 
   1146 	/*
   1147 	 * Determine if we should use flow control.  We should
   1148 	 * always use it, unless we're on a i82542 < 2.1.
   1149 	 */
   1150 	if (sc->sc_type >= WM_T_82542_2_1)
   1151 		sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
   1152 
   1153 	/*
   1154 	 * Determine if we're TBI or GMII mode, and initialize the
   1155 	 * media structures accordingly.
   1156 	 */
   1157 	if (sc->sc_type < WM_T_82543 ||
   1158 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   1159 		if (wmp->wmp_flags & WMP_F_1000T)
   1160 			aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
   1161 			    "product!\n", sc->sc_dev.dv_xname);
   1162 		wm_tbi_mediainit(sc);
   1163 	} else {
   1164 		if (wmp->wmp_flags & WMP_F_1000X)
   1165 			aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
   1166 			    "product!\n", sc->sc_dev.dv_xname);
   1167 		wm_gmii_mediainit(sc);
   1168 	}
   1169 
   1170 	ifp = &sc->sc_ethercom.ec_if;
   1171 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
   1172 	ifp->if_softc = sc;
   1173 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1174 	ifp->if_ioctl = wm_ioctl;
   1175 	ifp->if_start = wm_start;
   1176 	ifp->if_watchdog = wm_watchdog;
   1177 	ifp->if_init = wm_init;
   1178 	ifp->if_stop = wm_stop;
   1179 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   1180 	IFQ_SET_READY(&ifp->if_snd);
   1181 
   1182 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1183 
   1184 	/*
   1185 	 * If we're a i82543 or greater, we can support VLANs.
   1186 	 */
   1187 	if (sc->sc_type >= WM_T_82543)
   1188 		sc->sc_ethercom.ec_capabilities |=
   1189 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
   1190 
   1191 	/*
   1192 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   1193 	 * on i82543 and later.
   1194 	 */
   1195 	if (sc->sc_type >= WM_T_82543)
   1196 		ifp->if_capabilities |=
   1197 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
   1198 
   1199 	/*
   1200 	 * Attach the interface.
   1201 	 */
   1202 	if_attach(ifp);
   1203 	ether_ifattach(ifp, enaddr);
   1204 #if NRND > 0
   1205 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
   1206 	    RND_TYPE_NET, 0);
   1207 #endif
   1208 
   1209 #ifdef WM_EVENT_COUNTERS
   1210 	/* Attach event counters. */
   1211 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   1212 	    NULL, sc->sc_dev.dv_xname, "txsstall");
   1213 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   1214 	    NULL, sc->sc_dev.dv_xname, "txdstall");
   1215 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
   1216 	    NULL, sc->sc_dev.dv_xname, "txforceintr");
   1217 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   1218 	    NULL, sc->sc_dev.dv_xname, "txdw");
   1219 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   1220 	    NULL, sc->sc_dev.dv_xname, "txqe");
   1221 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   1222 	    NULL, sc->sc_dev.dv_xname, "rxintr");
   1223 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   1224 	    NULL, sc->sc_dev.dv_xname, "linkintr");
   1225 
   1226 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   1227 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
   1228 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   1229 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
   1230 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   1231 	    NULL, sc->sc_dev.dv_xname, "txipsum");
   1232 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   1233 	    NULL, sc->sc_dev.dv_xname, "txtusum");
   1234 
   1235 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
   1236 	    NULL, sc->sc_dev.dv_xname, "txctx init");
   1237 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
   1238 	    NULL, sc->sc_dev.dv_xname, "txctx hit");
   1239 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
   1240 	    NULL, sc->sc_dev.dv_xname, "txctx miss");
   1241 
   1242 	for (i = 0; i < WM_NTXSEGS; i++)
   1243 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   1244 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
   1245 
   1246 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   1247 	    NULL, sc->sc_dev.dv_xname, "txdrop");
   1248 
   1249 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   1250 	    NULL, sc->sc_dev.dv_xname, "tu");
   1251 #endif /* WM_EVENT_COUNTERS */
   1252 
   1253 	/*
   1254 	 * Make sure the interface is shutdown during reboot.
   1255 	 */
   1256 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
   1257 	if (sc->sc_sdhook == NULL)
   1258 		aprint_error("%s: WARNING: unable to establish shutdown hook\n",
   1259 		    sc->sc_dev.dv_xname);
   1260 	return;
   1261 
   1262 	/*
   1263 	 * Free any resources we've allocated during the failed attach
   1264 	 * attempt.  Do this in reverse order and fall through.
   1265 	 */
   1266  fail_5:
   1267 	for (i = 0; i < WM_NRXDESC; i++) {
   1268 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   1269 			bus_dmamap_destroy(sc->sc_dmat,
   1270 			    sc->sc_rxsoft[i].rxs_dmamap);
   1271 	}
   1272  fail_4:
   1273 	for (i = 0; i < WM_TXQUEUELEN; i++) {
   1274 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   1275 			bus_dmamap_destroy(sc->sc_dmat,
   1276 			    sc->sc_txsoft[i].txs_dmamap);
   1277 	}
   1278 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   1279  fail_3:
   1280 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   1281  fail_2:
   1282 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
   1283 	    sizeof(struct wm_control_data));
   1284  fail_1:
   1285 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
   1286  fail_0:
   1287 	return;
   1288 }
   1289 
   1290 /*
   1291  * wm_shutdown:
   1292  *
   1293  *	Make sure the interface is stopped at reboot time.
   1294  */
   1295 static void
   1296 wm_shutdown(void *arg)
   1297 {
   1298 	struct wm_softc *sc = arg;
   1299 
   1300 	wm_stop(&sc->sc_ethercom.ec_if, 1);
   1301 }
   1302 
   1303 /*
   1304  * wm_tx_cksum:
   1305  *
   1306  *	Set up TCP/IP checksumming parameters for the
   1307  *	specified packet.
   1308  */
   1309 static int
   1310 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   1311     uint8_t *fieldsp)
   1312 {
   1313 	struct mbuf *m0 = txs->txs_mbuf;
   1314 	struct livengood_tcpip_ctxdesc *t;
   1315 	uint32_t ipcs, tucs;
   1316 	struct ip *ip;
   1317 	struct ether_header *eh;
   1318 	int offset, iphl;
   1319 	uint8_t fields = 0;
   1320 
   1321 	/*
   1322 	 * XXX It would be nice if the mbuf pkthdr had offset
   1323 	 * fields for the protocol headers.
   1324 	 */
   1325 
   1326 	eh = mtod(m0, struct ether_header *);
   1327 	switch (htons(eh->ether_type)) {
   1328 	case ETHERTYPE_IP:
   1329 		iphl = sizeof(struct ip);
   1330 		offset = ETHER_HDR_LEN;
   1331 		break;
   1332 
   1333 	case ETHERTYPE_VLAN:
   1334 		iphl = sizeof(struct ip);
   1335 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1336 		break;
   1337 
   1338 	default:
   1339 		/*
   1340 		 * Don't support this protocol or encapsulation.
   1341 		 */
   1342 		*fieldsp = 0;
   1343 		*cmdp = 0;
   1344 		return (0);
   1345 	}
   1346 
   1347 	if (m0->m_len < (offset + iphl)) {
   1348 		if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
   1349 			printf("%s: wm_tx_cksum: mbuf allocation failed, "
   1350 			    "packet dropped\n", sc->sc_dev.dv_xname);
   1351 			return (ENOMEM);
   1352 		}
   1353 		m0 = txs->txs_mbuf;
   1354 	}
   1355 
   1356 	ip = (struct ip *) (mtod(m0, caddr_t) + offset);
   1357 	iphl = ip->ip_hl << 2;
   1358 
   1359 	/*
   1360 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   1361 	 * offload feature, if we load the context descriptor, we
   1362 	 * MUST provide valid values for IPCSS and TUCSS fields.
   1363 	 */
   1364 
   1365 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   1366 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   1367 		fields |= WTX_IXSM;
   1368 		ipcs = WTX_TCPIP_IPCSS(offset) |
   1369 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1370 		    WTX_TCPIP_IPCSE(offset + iphl - 1);
   1371 	} else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
   1372 		/* Use the cached value. */
   1373 		ipcs = sc->sc_txctx_ipcs;
   1374 	} else {
   1375 		/* Just initialize it to the likely value anyway. */
   1376 		ipcs = WTX_TCPIP_IPCSS(offset) |
   1377 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1378 		    WTX_TCPIP_IPCSE(offset + iphl - 1);
   1379 	}
   1380 
   1381 	offset += iphl;
   1382 
   1383 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1384 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   1385 		fields |= WTX_TXSM;
   1386 		tucs = WTX_TCPIP_TUCSS(offset) |
   1387 		    WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
   1388 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   1389 	} else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
   1390 		/* Use the cached value. */
   1391 		tucs = sc->sc_txctx_tucs;
   1392 	} else {
   1393 		/* Just initialize it to a valid TCP context. */
   1394 		tucs = WTX_TCPIP_TUCSS(offset) |
   1395 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   1396 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   1397 	}
   1398 
   1399 	if (sc->sc_txctx_ipcs == ipcs &&
   1400 	    sc->sc_txctx_tucs == tucs) {
   1401 		/* Cached context is fine. */
   1402 		WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
   1403 	} else {
   1404 		/* Fill in the context descriptor. */
   1405 #ifdef WM_EVENT_COUNTERS
   1406 		if (sc->sc_txctx_ipcs == 0xffffffff &&
   1407 		    sc->sc_txctx_tucs == 0xffffffff)
   1408 			WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
   1409 		else
   1410 			WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
   1411 #endif
   1412 		t = (struct livengood_tcpip_ctxdesc *)
   1413 		    &sc->sc_txdescs[sc->sc_txnext];
   1414 		t->tcpip_ipcs = htole32(ipcs);
   1415 		t->tcpip_tucs = htole32(tucs);
   1416 		t->tcpip_cmdlen = htole32(WTX_CMD_DEXT | WTX_DTYP_C);
   1417 		t->tcpip_seg = 0;
   1418 		WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   1419 
   1420 		sc->sc_txctx_ipcs = ipcs;
   1421 		sc->sc_txctx_tucs = tucs;
   1422 
   1423 		sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
   1424 		txs->txs_ndesc++;
   1425 	}
   1426 
   1427 	*cmdp = WTX_CMD_DEXT | WTX_DTYP_D;
   1428 	*fieldsp = fields;
   1429 
   1430 	return (0);
   1431 }
   1432 
   1433 /*
   1434  * wm_start:		[ifnet interface function]
   1435  *
   1436  *	Start packet transmission on the interface.
   1437  */
   1438 static void
   1439 wm_start(struct ifnet *ifp)
   1440 {
   1441 	struct wm_softc *sc = ifp->if_softc;
   1442 	struct mbuf *m0;
   1443 #if 0 /* XXXJRT */
   1444 	struct m_tag *mtag;
   1445 #endif
   1446 	struct wm_txsoft *txs;
   1447 	bus_dmamap_t dmamap;
   1448 	int error, nexttx, lasttx = -1, ofree, seg;
   1449 	uint32_t cksumcmd;
   1450 	uint8_t cksumfields;
   1451 
   1452 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   1453 		return;
   1454 
   1455 	/*
   1456 	 * Remember the previous number of free descriptors.
   1457 	 */
   1458 	ofree = sc->sc_txfree;
   1459 
   1460 	/*
   1461 	 * Loop through the send queue, setting up transmit descriptors
   1462 	 * until we drain the queue, or use up all available transmit
   1463 	 * descriptors.
   1464 	 */
   1465 	for (;;) {
   1466 		/* Grab a packet off the queue. */
   1467 		IFQ_POLL(&ifp->if_snd, m0);
   1468 		if (m0 == NULL)
   1469 			break;
   1470 
   1471 		DPRINTF(WM_DEBUG_TX,
   1472 		    ("%s: TX: have packet to transmit: %p\n",
   1473 		    sc->sc_dev.dv_xname, m0));
   1474 
   1475 		/* Get a work queue entry. */
   1476 		if (sc->sc_txsfree < WM_TXQUEUE_GC) {
   1477 			wm_txintr(sc);
   1478 			if (sc->sc_txsfree == 0) {
   1479 				DPRINTF(WM_DEBUG_TX,
   1480 				    ("%s: TX: no free job descriptors\n",
   1481 					sc->sc_dev.dv_xname));
   1482 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   1483 				break;
   1484 			}
   1485 		}
   1486 
   1487 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   1488 		dmamap = txs->txs_dmamap;
   1489 
   1490 		/*
   1491 		 * Load the DMA map.  If this fails, the packet either
   1492 		 * didn't fit in the allotted number of segments, or we
   1493 		 * were short on resources.  For the too-many-segments
   1494 		 * case, we simply report an error and drop the packet,
   1495 		 * since we can't sanely copy a jumbo packet to a single
   1496 		 * buffer.
   1497 		 */
   1498 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   1499 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1500 		if (error) {
   1501 			if (error == EFBIG) {
   1502 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   1503 				printf("%s: Tx packet consumes too many "
   1504 				    "DMA segments, dropping...\n",
   1505 				    sc->sc_dev.dv_xname);
   1506 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   1507 				m_freem(m0);
   1508 				continue;
   1509 			}
   1510 			/*
   1511 			 * Short on resources, just stop for now.
   1512 			 */
   1513 			DPRINTF(WM_DEBUG_TX,
   1514 			    ("%s: TX: dmamap load failed: %d\n",
   1515 			    sc->sc_dev.dv_xname, error));
   1516 			break;
   1517 		}
   1518 
   1519 		/*
   1520 		 * Ensure we have enough descriptors free to describe
   1521 		 * the packet.  Note, we always reserve one descriptor
   1522 		 * at the end of the ring due to the semantics of the
   1523 		 * TDT register, plus one more in the event we need
   1524 		 * to re-load checksum offload context.
   1525 		 */
   1526 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
   1527 			/*
   1528 			 * Not enough free descriptors to transmit this
   1529 			 * packet.  We haven't committed anything yet,
   1530 			 * so just unload the DMA map, put the packet
   1531 			 * pack on the queue, and punt.  Notify the upper
   1532 			 * layer that there are no more slots left.
   1533 			 */
   1534 			DPRINTF(WM_DEBUG_TX,
   1535 			    ("%s: TX: need %d descriptors, have %d\n",
   1536 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs,
   1537 			    sc->sc_txfree - 1));
   1538 			ifp->if_flags |= IFF_OACTIVE;
   1539 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   1540 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   1541 			break;
   1542 		}
   1543 
   1544 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   1545 
   1546 		/*
   1547 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   1548 		 */
   1549 
   1550 		/* Sync the DMA map. */
   1551 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   1552 		    BUS_DMASYNC_PREWRITE);
   1553 
   1554 		DPRINTF(WM_DEBUG_TX,
   1555 		    ("%s: TX: packet has %d DMA segments\n",
   1556 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs));
   1557 
   1558 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   1559 
   1560 		/*
   1561 		 * Store a pointer to the packet so that we can free it
   1562 		 * later.
   1563 		 *
   1564 		 * Initially, we consider the number of descriptors the
   1565 		 * packet uses the number of DMA segments.  This may be
   1566 		 * incremented by 1 if we do checksum offload (a descriptor
   1567 		 * is used to set the checksum context).
   1568 		 */
   1569 		txs->txs_mbuf = m0;
   1570 		txs->txs_firstdesc = sc->sc_txnext;
   1571 		txs->txs_ndesc = dmamap->dm_nsegs;
   1572 
   1573 		/*
   1574 		 * Set up checksum offload parameters for
   1575 		 * this packet.
   1576 		 */
   1577 		if (m0->m_pkthdr.csum_flags &
   1578 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1579 			if (wm_tx_cksum(sc, txs, &cksumcmd,
   1580 					&cksumfields) != 0) {
   1581 				/* Error message already displayed. */
   1582 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   1583 				continue;
   1584 			}
   1585 		} else {
   1586 			cksumcmd = 0;
   1587 			cksumfields = 0;
   1588 		}
   1589 
   1590 		cksumcmd |= WTX_CMD_IDE;
   1591 
   1592 		/*
   1593 		 * Initialize the transmit descriptor.
   1594 		 */
   1595 		for (nexttx = sc->sc_txnext, seg = 0;
   1596 		     seg < dmamap->dm_nsegs;
   1597 		     seg++, nexttx = WM_NEXTTX(nexttx)) {
   1598 			wm_set_dma_addr(&sc->sc_txdescs[nexttx].wtx_addr,
   1599 			    dmamap->dm_segs[seg].ds_addr);
   1600 			sc->sc_txdescs[nexttx].wtx_cmdlen =
   1601 			    htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
   1602 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
   1603 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
   1604 			    cksumfields;
   1605 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
   1606 			lasttx = nexttx;
   1607 
   1608 			DPRINTF(WM_DEBUG_TX,
   1609 			    ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
   1610 			    sc->sc_dev.dv_xname, nexttx,
   1611 			    le32toh(dmamap->dm_segs[seg].ds_addr),
   1612 			    le32toh(dmamap->dm_segs[seg].ds_len)));
   1613 		}
   1614 
   1615 		KASSERT(lasttx != -1);
   1616 
   1617 		/*
   1618 		 * Set up the command byte on the last descriptor of
   1619 		 * the packet.  If we're in the interrupt delay window,
   1620 		 * delay the interrupt.
   1621 		 */
   1622 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1623 		    htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
   1624 
   1625 #if 0 /* XXXJRT */
   1626 		/*
   1627 		 * If VLANs are enabled and the packet has a VLAN tag, set
   1628 		 * up the descriptor to encapsulate the packet for us.
   1629 		 *
   1630 		 * This is only valid on the last descriptor of the packet.
   1631 		 */
   1632 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   1633 		    (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
   1634 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1635 			    htole32(WTX_CMD_VLE);
   1636 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
   1637 			    = htole16(*(u_int *)(mtag + 1) & 0xffff);
   1638 		}
   1639 #endif /* XXXJRT */
   1640 
   1641 		txs->txs_lastdesc = lasttx;
   1642 
   1643 		DPRINTF(WM_DEBUG_TX,
   1644 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
   1645 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
   1646 
   1647 		/* Sync the descriptors we're using. */
   1648 		WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
   1649 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1650 
   1651 		/* Give the packet to the chip. */
   1652 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   1653 
   1654 		DPRINTF(WM_DEBUG_TX,
   1655 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
   1656 
   1657 		DPRINTF(WM_DEBUG_TX,
   1658 		    ("%s: TX: finished transmitting packet, job %d\n",
   1659 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
   1660 
   1661 		/* Advance the tx pointer. */
   1662 		sc->sc_txfree -= txs->txs_ndesc;
   1663 		sc->sc_txnext = nexttx;
   1664 
   1665 		sc->sc_txsfree--;
   1666 		sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
   1667 
   1668 #if NBPFILTER > 0
   1669 		/* Pass the packet to any BPF listeners. */
   1670 		if (ifp->if_bpf)
   1671 			bpf_mtap(ifp->if_bpf, m0);
   1672 #endif /* NBPFILTER > 0 */
   1673 	}
   1674 
   1675 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   1676 		/* No more slots; notify upper layer. */
   1677 		ifp->if_flags |= IFF_OACTIVE;
   1678 	}
   1679 
   1680 	if (sc->sc_txfree != ofree) {
   1681 		/* Set a watchdog timer in case the chip flakes out. */
   1682 		ifp->if_timer = 5;
   1683 	}
   1684 }
   1685 
   1686 /*
   1687  * wm_watchdog:		[ifnet interface function]
   1688  *
   1689  *	Watchdog timer handler.
   1690  */
   1691 static void
   1692 wm_watchdog(struct ifnet *ifp)
   1693 {
   1694 	struct wm_softc *sc = ifp->if_softc;
   1695 
   1696 	/*
   1697 	 * Since we're using delayed interrupts, sweep up
   1698 	 * before we report an error.
   1699 	 */
   1700 	wm_txintr(sc);
   1701 
   1702 	if (sc->sc_txfree != WM_NTXDESC) {
   1703 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   1704 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
   1705 		    sc->sc_txnext);
   1706 		ifp->if_oerrors++;
   1707 
   1708 		/* Reset the interface. */
   1709 		(void) wm_init(ifp);
   1710 	}
   1711 
   1712 	/* Try to get more packets going. */
   1713 	wm_start(ifp);
   1714 }
   1715 
   1716 /*
   1717  * wm_ioctl:		[ifnet interface function]
   1718  *
   1719  *	Handle control requests from the operator.
   1720  */
   1721 static int
   1722 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
   1723 {
   1724 	struct wm_softc *sc = ifp->if_softc;
   1725 	struct ifreq *ifr = (struct ifreq *) data;
   1726 	int s, error;
   1727 
   1728 	s = splnet();
   1729 
   1730 	switch (cmd) {
   1731 	case SIOCSIFMEDIA:
   1732 	case SIOCGIFMEDIA:
   1733 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   1734 		break;
   1735 	default:
   1736 		error = ether_ioctl(ifp, cmd, data);
   1737 		if (error == ENETRESET) {
   1738 			/*
   1739 			 * Multicast list has changed; set the hardware filter
   1740 			 * accordingly.
   1741 			 */
   1742 			wm_set_filter(sc);
   1743 			error = 0;
   1744 		}
   1745 		break;
   1746 	}
   1747 
   1748 	/* Try to get more packets going. */
   1749 	wm_start(ifp);
   1750 
   1751 	splx(s);
   1752 	return (error);
   1753 }
   1754 
   1755 /*
   1756  * wm_intr:
   1757  *
   1758  *	Interrupt service routine.
   1759  */
   1760 static int
   1761 wm_intr(void *arg)
   1762 {
   1763 	struct wm_softc *sc = arg;
   1764 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1765 	uint32_t icr;
   1766 	int wantinit, handled = 0;
   1767 
   1768 	for (wantinit = 0; wantinit == 0;) {
   1769 		icr = CSR_READ(sc, WMREG_ICR);
   1770 		if ((icr & sc->sc_icr) == 0)
   1771 			break;
   1772 
   1773 #if 0 /*NRND > 0*/
   1774 		if (RND_ENABLED(&sc->rnd_source))
   1775 			rnd_add_uint32(&sc->rnd_source, icr);
   1776 #endif
   1777 
   1778 		handled = 1;
   1779 
   1780 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   1781 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   1782 			DPRINTF(WM_DEBUG_RX,
   1783 			    ("%s: RX: got Rx intr 0x%08x\n",
   1784 			    sc->sc_dev.dv_xname,
   1785 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   1786 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   1787 		}
   1788 #endif
   1789 		wm_rxintr(sc);
   1790 
   1791 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   1792 		if (icr & ICR_TXDW) {
   1793 			DPRINTF(WM_DEBUG_TX,
   1794 			    ("%s: TX: got TXDW interrupt\n",
   1795 			    sc->sc_dev.dv_xname));
   1796 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   1797 		}
   1798 #endif
   1799 		wm_txintr(sc);
   1800 
   1801 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   1802 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   1803 			wm_linkintr(sc, icr);
   1804 		}
   1805 
   1806 		if (icr & ICR_RXO) {
   1807 			printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
   1808 			wantinit = 1;
   1809 		}
   1810 	}
   1811 
   1812 	if (handled) {
   1813 		if (wantinit)
   1814 			wm_init(ifp);
   1815 
   1816 		/* Try to get more packets going. */
   1817 		wm_start(ifp);
   1818 	}
   1819 
   1820 	return (handled);
   1821 }
   1822 
   1823 /*
   1824  * wm_txintr:
   1825  *
   1826  *	Helper; handle transmit interrupts.
   1827  */
   1828 static void
   1829 wm_txintr(struct wm_softc *sc)
   1830 {
   1831 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1832 	struct wm_txsoft *txs;
   1833 	uint8_t status;
   1834 	int i;
   1835 
   1836 	ifp->if_flags &= ~IFF_OACTIVE;
   1837 
   1838 	/*
   1839 	 * Go through the Tx list and free mbufs for those
   1840 	 * frames which have been transmitted.
   1841 	 */
   1842 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
   1843 	     i = WM_NEXTTXS(i), sc->sc_txsfree++) {
   1844 		txs = &sc->sc_txsoft[i];
   1845 
   1846 		DPRINTF(WM_DEBUG_TX,
   1847 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
   1848 
   1849 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
   1850 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1851 
   1852 		status =
   1853 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   1854 		if ((status & WTX_ST_DD) == 0) {
   1855 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   1856 			    BUS_DMASYNC_PREREAD);
   1857 			break;
   1858 		}
   1859 
   1860 		DPRINTF(WM_DEBUG_TX,
   1861 		    ("%s: TX: job %d done: descs %d..%d\n",
   1862 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
   1863 		    txs->txs_lastdesc));
   1864 
   1865 		/*
   1866 		 * XXX We should probably be using the statistics
   1867 		 * XXX registers, but I don't know if they exist
   1868 		 * XXX on chips before the i82544.
   1869 		 */
   1870 
   1871 #ifdef WM_EVENT_COUNTERS
   1872 		if (status & WTX_ST_TU)
   1873 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   1874 #endif /* WM_EVENT_COUNTERS */
   1875 
   1876 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   1877 			ifp->if_oerrors++;
   1878 			if (status & WTX_ST_LC)
   1879 				printf("%s: late collision\n",
   1880 				    sc->sc_dev.dv_xname);
   1881 			else if (status & WTX_ST_EC) {
   1882 				ifp->if_collisions += 16;
   1883 				printf("%s: excessive collisions\n",
   1884 				    sc->sc_dev.dv_xname);
   1885 			}
   1886 		} else
   1887 			ifp->if_opackets++;
   1888 
   1889 		sc->sc_txfree += txs->txs_ndesc;
   1890 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   1891 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1892 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1893 		m_freem(txs->txs_mbuf);
   1894 		txs->txs_mbuf = NULL;
   1895 	}
   1896 
   1897 	/* Update the dirty transmit buffer pointer. */
   1898 	sc->sc_txsdirty = i;
   1899 	DPRINTF(WM_DEBUG_TX,
   1900 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
   1901 
   1902 	/*
   1903 	 * If there are no more pending transmissions, cancel the watchdog
   1904 	 * timer.
   1905 	 */
   1906 	if (sc->sc_txsfree == WM_TXQUEUELEN)
   1907 		ifp->if_timer = 0;
   1908 }
   1909 
   1910 /*
   1911  * wm_rxintr:
   1912  *
   1913  *	Helper; handle receive interrupts.
   1914  */
   1915 static void
   1916 wm_rxintr(struct wm_softc *sc)
   1917 {
   1918 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1919 	struct wm_rxsoft *rxs;
   1920 	struct mbuf *m;
   1921 	int i, len;
   1922 	uint8_t status, errors;
   1923 
   1924 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   1925 		rxs = &sc->sc_rxsoft[i];
   1926 
   1927 		DPRINTF(WM_DEBUG_RX,
   1928 		    ("%s: RX: checking descriptor %d\n",
   1929 		    sc->sc_dev.dv_xname, i));
   1930 
   1931 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1932 
   1933 		status = sc->sc_rxdescs[i].wrx_status;
   1934 		errors = sc->sc_rxdescs[i].wrx_errors;
   1935 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   1936 
   1937 		if ((status & WRX_ST_DD) == 0) {
   1938 			/*
   1939 			 * We have processed all of the receive descriptors.
   1940 			 */
   1941 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   1942 			break;
   1943 		}
   1944 
   1945 		if (__predict_false(sc->sc_rxdiscard)) {
   1946 			DPRINTF(WM_DEBUG_RX,
   1947 			    ("%s: RX: discarding contents of descriptor %d\n",
   1948 			    sc->sc_dev.dv_xname, i));
   1949 			WM_INIT_RXDESC(sc, i);
   1950 			if (status & WRX_ST_EOP) {
   1951 				/* Reset our state. */
   1952 				DPRINTF(WM_DEBUG_RX,
   1953 				    ("%s: RX: resetting rxdiscard -> 0\n",
   1954 				    sc->sc_dev.dv_xname));
   1955 				sc->sc_rxdiscard = 0;
   1956 			}
   1957 			continue;
   1958 		}
   1959 
   1960 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1961 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1962 
   1963 		m = rxs->rxs_mbuf;
   1964 
   1965 		/*
   1966 		 * Add a new receive buffer to the ring.
   1967 		 */
   1968 		if (wm_add_rxbuf(sc, i) != 0) {
   1969 			/*
   1970 			 * Failed, throw away what we've done so
   1971 			 * far, and discard the rest of the packet.
   1972 			 */
   1973 			ifp->if_ierrors++;
   1974 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1975 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1976 			WM_INIT_RXDESC(sc, i);
   1977 			if ((status & WRX_ST_EOP) == 0)
   1978 				sc->sc_rxdiscard = 1;
   1979 			if (sc->sc_rxhead != NULL)
   1980 				m_freem(sc->sc_rxhead);
   1981 			WM_RXCHAIN_RESET(sc);
   1982 			DPRINTF(WM_DEBUG_RX,
   1983 			    ("%s: RX: Rx buffer allocation failed, "
   1984 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
   1985 			    sc->sc_rxdiscard ? " (discard)" : ""));
   1986 			continue;
   1987 		}
   1988 
   1989 		WM_RXCHAIN_LINK(sc, m);
   1990 
   1991 		m->m_len = len;
   1992 
   1993 		DPRINTF(WM_DEBUG_RX,
   1994 		    ("%s: RX: buffer at %p len %d\n",
   1995 		    sc->sc_dev.dv_xname, m->m_data, len));
   1996 
   1997 		/*
   1998 		 * If this is not the end of the packet, keep
   1999 		 * looking.
   2000 		 */
   2001 		if ((status & WRX_ST_EOP) == 0) {
   2002 			sc->sc_rxlen += len;
   2003 			DPRINTF(WM_DEBUG_RX,
   2004 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   2005 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
   2006 			continue;
   2007 		}
   2008 
   2009 		/*
   2010 		 * Okay, we have the entire packet now...
   2011 		 */
   2012 		*sc->sc_rxtailp = NULL;
   2013 		m = sc->sc_rxhead;
   2014 		len += sc->sc_rxlen;
   2015 
   2016 		WM_RXCHAIN_RESET(sc);
   2017 
   2018 		DPRINTF(WM_DEBUG_RX,
   2019 		    ("%s: RX: have entire packet, len -> %d\n",
   2020 		    sc->sc_dev.dv_xname, len));
   2021 
   2022 		/*
   2023 		 * If an error occurred, update stats and drop the packet.
   2024 		 */
   2025 		if (errors &
   2026 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   2027 			ifp->if_ierrors++;
   2028 			if (errors & WRX_ER_SE)
   2029 				printf("%s: symbol error\n",
   2030 				    sc->sc_dev.dv_xname);
   2031 			else if (errors & WRX_ER_SEQ)
   2032 				printf("%s: receive sequence error\n",
   2033 				    sc->sc_dev.dv_xname);
   2034 			else if (errors & WRX_ER_CE)
   2035 				printf("%s: CRC error\n",
   2036 				    sc->sc_dev.dv_xname);
   2037 			m_freem(m);
   2038 			continue;
   2039 		}
   2040 
   2041 		/*
   2042 		 * No errors.  Receive the packet.
   2043 		 *
   2044 		 * Note, we have configured the chip to include the
   2045 		 * CRC with every packet.
   2046 		 */
   2047 		m->m_flags |= M_HASFCS;
   2048 		m->m_pkthdr.rcvif = ifp;
   2049 		m->m_pkthdr.len = len;
   2050 
   2051 #if 0 /* XXXJRT */
   2052 		/*
   2053 		 * If VLANs are enabled, VLAN packets have been unwrapped
   2054 		 * for us.  Associate the tag with the packet.
   2055 		 */
   2056 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   2057 		    (status & WRX_ST_VP) != 0) {
   2058 			struct m_tag *vtag;
   2059 
   2060 			vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
   2061 			    M_NOWAIT);
   2062 			if (vtag == NULL) {
   2063 				ifp->if_ierrors++;
   2064 				printf("%s: unable to allocate VLAN tag\n",
   2065 				    sc->sc_dev.dv_xname);
   2066 				m_freem(m);
   2067 				continue;
   2068 			}
   2069 
   2070 			*(u_int *)(vtag + 1) =
   2071 			    le16toh(sc->sc_rxdescs[i].wrx_special);
   2072 		}
   2073 #endif /* XXXJRT */
   2074 
   2075 		/*
   2076 		 * Set up checksum info for this packet.
   2077 		 */
   2078 		if (status & WRX_ST_IPCS) {
   2079 			WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   2080 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   2081 			if (errors & WRX_ER_IPE)
   2082 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   2083 		}
   2084 		if (status & WRX_ST_TCPCS) {
   2085 			/*
   2086 			 * Note: we don't know if this was TCP or UDP,
   2087 			 * so we just set both bits, and expect the
   2088 			 * upper layers to deal.
   2089 			 */
   2090 			WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   2091 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
   2092 			if (errors & WRX_ER_TCPE)
   2093 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   2094 		}
   2095 
   2096 		ifp->if_ipackets++;
   2097 
   2098 #if NBPFILTER > 0
   2099 		/* Pass this up to any BPF listeners. */
   2100 		if (ifp->if_bpf)
   2101 			bpf_mtap(ifp->if_bpf, m);
   2102 #endif /* NBPFILTER > 0 */
   2103 
   2104 		/* Pass it on. */
   2105 		(*ifp->if_input)(ifp, m);
   2106 	}
   2107 
   2108 	/* Update the receive pointer. */
   2109 	sc->sc_rxptr = i;
   2110 
   2111 	DPRINTF(WM_DEBUG_RX,
   2112 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
   2113 }
   2114 
   2115 /*
   2116  * wm_linkintr:
   2117  *
   2118  *	Helper; handle link interrupts.
   2119  */
   2120 static void
   2121 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   2122 {
   2123 	uint32_t status;
   2124 
   2125 	/*
   2126 	 * If we get a link status interrupt on a 1000BASE-T
   2127 	 * device, just fall into the normal MII tick path.
   2128 	 */
   2129 	if (sc->sc_flags & WM_F_HAS_MII) {
   2130 		if (icr & ICR_LSC) {
   2131 			DPRINTF(WM_DEBUG_LINK,
   2132 			    ("%s: LINK: LSC -> mii_tick\n",
   2133 			    sc->sc_dev.dv_xname));
   2134 			mii_tick(&sc->sc_mii);
   2135 		} else if (icr & ICR_RXSEQ) {
   2136 			DPRINTF(WM_DEBUG_LINK,
   2137 			    ("%s: LINK Receive sequence error\n",
   2138 			    sc->sc_dev.dv_xname));
   2139 		}
   2140 		return;
   2141 	}
   2142 
   2143 	/*
   2144 	 * If we are now receiving /C/, check for link again in
   2145 	 * a couple of link clock ticks.
   2146 	 */
   2147 	if (icr & ICR_RXCFG) {
   2148 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   2149 		    sc->sc_dev.dv_xname));
   2150 		sc->sc_tbi_anstate = 2;
   2151 	}
   2152 
   2153 	if (icr & ICR_LSC) {
   2154 		status = CSR_READ(sc, WMREG_STATUS);
   2155 		if (status & STATUS_LU) {
   2156 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   2157 			    sc->sc_dev.dv_xname,
   2158 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   2159 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2160 			if (status & STATUS_FD)
   2161 				sc->sc_tctl |=
   2162 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2163 			else
   2164 				sc->sc_tctl |=
   2165 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2166 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2167 			sc->sc_tbi_linkup = 1;
   2168 		} else {
   2169 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   2170 			    sc->sc_dev.dv_xname));
   2171 			sc->sc_tbi_linkup = 0;
   2172 		}
   2173 		sc->sc_tbi_anstate = 2;
   2174 		wm_tbi_set_linkled(sc);
   2175 	} else if (icr & ICR_RXSEQ) {
   2176 		DPRINTF(WM_DEBUG_LINK,
   2177 		    ("%s: LINK: Receive sequence error\n",
   2178 		    sc->sc_dev.dv_xname));
   2179 	}
   2180 }
   2181 
   2182 /*
   2183  * wm_tick:
   2184  *
   2185  *	One second timer, used to check link status, sweep up
   2186  *	completed transmit jobs, etc.
   2187  */
   2188 static void
   2189 wm_tick(void *arg)
   2190 {
   2191 	struct wm_softc *sc = arg;
   2192 	int s;
   2193 
   2194 	s = splnet();
   2195 
   2196 	if (sc->sc_flags & WM_F_HAS_MII)
   2197 		mii_tick(&sc->sc_mii);
   2198 	else
   2199 		wm_tbi_check_link(sc);
   2200 
   2201 	splx(s);
   2202 
   2203 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2204 }
   2205 
   2206 /*
   2207  * wm_reset:
   2208  *
   2209  *	Reset the i82542 chip.
   2210  */
   2211 static void
   2212 wm_reset(struct wm_softc *sc)
   2213 {
   2214 	int i;
   2215 
   2216 	switch (sc->sc_type) {
   2217 	case WM_T_82544:
   2218 	case WM_T_82540:
   2219 	case WM_T_82545:
   2220 	case WM_T_82546:
   2221 	case WM_T_82541:
   2222 	case WM_T_82541_2:
   2223 		/*
   2224 		 * These chips have a problem with the memory-mapped
   2225 		 * write cycle when issuing the reset, so use I/O-mapped
   2226 		 * access, if possible.
   2227 		 */
   2228 		if (sc->sc_flags & WM_F_IOH_VALID)
   2229 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   2230 		else
   2231 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2232 		break;
   2233 
   2234 	case WM_T_82545_3:
   2235 	case WM_T_82546_3:
   2236 		/* Use the shadow control register on these chips. */
   2237 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   2238 		break;
   2239 
   2240 	default:
   2241 		/* Everything else can safely use the documented method. */
   2242 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2243 		break;
   2244 	}
   2245 	delay(10000);
   2246 
   2247 	for (i = 0; i < 1000; i++) {
   2248 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
   2249 			return;
   2250 		delay(20);
   2251 	}
   2252 
   2253 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
   2254 		printf("%s: WARNING: reset failed to complete\n",
   2255 		    sc->sc_dev.dv_xname);
   2256 }
   2257 
   2258 /*
   2259  * wm_init:		[ifnet interface function]
   2260  *
   2261  *	Initialize the interface.  Must be called at splnet().
   2262  */
   2263 static int
   2264 wm_init(struct ifnet *ifp)
   2265 {
   2266 	struct wm_softc *sc = ifp->if_softc;
   2267 	struct wm_rxsoft *rxs;
   2268 	int i, error = 0;
   2269 	uint32_t reg;
   2270 
   2271 	/*
   2272 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   2273 	 * There is a small but measurable benefit to avoiding the adjusment
   2274 	 * of the descriptor so that the headers are aligned, for normal mtu,
   2275 	 * on such platforms.  One possibility is that the DMA itself is
   2276 	 * slightly more efficient if the front of the entire packet (instead
   2277 	 * of the front of the headers) is aligned.
   2278 	 *
   2279 	 * Note we must always set align_tweak to 0 if we are using
   2280 	 * jumbo frames.
   2281 	 */
   2282 #ifdef __NO_STRICT_ALIGNMENT
   2283 	sc->sc_align_tweak = 0;
   2284 #else
   2285 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   2286 		sc->sc_align_tweak = 0;
   2287 	else
   2288 		sc->sc_align_tweak = 2;
   2289 #endif /* __NO_STRICT_ALIGNMENT */
   2290 
   2291 	/* Cancel any pending I/O. */
   2292 	wm_stop(ifp, 0);
   2293 
   2294 	/* Reset the chip to a known state. */
   2295 	wm_reset(sc);
   2296 
   2297 	/* Initialize the transmit descriptor ring. */
   2298 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
   2299 	WM_CDTXSYNC(sc, 0, WM_NTXDESC,
   2300 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   2301 	sc->sc_txfree = WM_NTXDESC;
   2302 	sc->sc_txnext = 0;
   2303 
   2304 	sc->sc_txctx_ipcs = 0xffffffff;
   2305 	sc->sc_txctx_tucs = 0xffffffff;
   2306 
   2307 	if (sc->sc_type < WM_T_82543) {
   2308 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
   2309 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
   2310 		CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
   2311 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   2312 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   2313 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   2314 	} else {
   2315 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
   2316 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
   2317 		CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
   2318 		CSR_WRITE(sc, WMREG_TDH, 0);
   2319 		CSR_WRITE(sc, WMREG_TDT, 0);
   2320 		CSR_WRITE(sc, WMREG_TIDV, 128);
   2321 
   2322 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   2323 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   2324 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   2325 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   2326 	}
   2327 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   2328 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   2329 
   2330 	/* Initialize the transmit job descriptors. */
   2331 	for (i = 0; i < WM_TXQUEUELEN; i++)
   2332 		sc->sc_txsoft[i].txs_mbuf = NULL;
   2333 	sc->sc_txsfree = WM_TXQUEUELEN;
   2334 	sc->sc_txsnext = 0;
   2335 	sc->sc_txsdirty = 0;
   2336 
   2337 	/*
   2338 	 * Initialize the receive descriptor and receive job
   2339 	 * descriptor rings.
   2340 	 */
   2341 	if (sc->sc_type < WM_T_82543) {
   2342 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
   2343 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
   2344 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   2345 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   2346 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   2347 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   2348 
   2349 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   2350 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   2351 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   2352 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   2353 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   2354 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   2355 	} else {
   2356 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
   2357 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
   2358 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   2359 		CSR_WRITE(sc, WMREG_RDH, 0);
   2360 		CSR_WRITE(sc, WMREG_RDT, 0);
   2361 		CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
   2362 	}
   2363 	for (i = 0; i < WM_NRXDESC; i++) {
   2364 		rxs = &sc->sc_rxsoft[i];
   2365 		if (rxs->rxs_mbuf == NULL) {
   2366 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   2367 				printf("%s: unable to allocate or map rx "
   2368 				    "buffer %d, error = %d\n",
   2369 				    sc->sc_dev.dv_xname, i, error);
   2370 				/*
   2371 				 * XXX Should attempt to run with fewer receive
   2372 				 * XXX buffers instead of just failing.
   2373 				 */
   2374 				wm_rxdrain(sc);
   2375 				goto out;
   2376 			}
   2377 		} else
   2378 			WM_INIT_RXDESC(sc, i);
   2379 	}
   2380 	sc->sc_rxptr = 0;
   2381 	sc->sc_rxdiscard = 0;
   2382 	WM_RXCHAIN_RESET(sc);
   2383 
   2384 	/*
   2385 	 * Clear out the VLAN table -- we don't use it (yet).
   2386 	 */
   2387 	CSR_WRITE(sc, WMREG_VET, 0);
   2388 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   2389 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   2390 
   2391 	/*
   2392 	 * Set up flow-control parameters.
   2393 	 *
   2394 	 * XXX Values could probably stand some tuning.
   2395 	 */
   2396 	if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
   2397 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   2398 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   2399 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   2400 
   2401 		if (sc->sc_type < WM_T_82543) {
   2402 			CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   2403 			CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
   2404 		} else {
   2405 			CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   2406 			CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
   2407 		}
   2408 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   2409 	}
   2410 
   2411 #if 0 /* XXXJRT */
   2412 	/* Deal with VLAN enables. */
   2413 	if (sc->sc_ethercom.ec_nvlans != 0)
   2414 		sc->sc_ctrl |= CTRL_VME;
   2415 	else
   2416 #endif /* XXXJRT */
   2417 		sc->sc_ctrl &= ~CTRL_VME;
   2418 
   2419 	/* Write the control registers. */
   2420 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2421 #if 0
   2422 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2423 #endif
   2424 
   2425 	/*
   2426 	 * Set up checksum offload parameters.
   2427 	 */
   2428 	reg = CSR_READ(sc, WMREG_RXCSUM);
   2429 	if (ifp->if_capenable & IFCAP_CSUM_IPv4)
   2430 		reg |= RXCSUM_IPOFL;
   2431 	else
   2432 		reg &= ~RXCSUM_IPOFL;
   2433 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
   2434 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   2435 	else {
   2436 		reg &= ~RXCSUM_TUOFL;
   2437 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
   2438 			reg &= ~RXCSUM_IPOFL;
   2439 	}
   2440 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   2441 
   2442 	/*
   2443 	 * Set up the interrupt registers.
   2444 	 */
   2445 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   2446 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   2447 	    ICR_RXO | ICR_RXT0;
   2448 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   2449 		sc->sc_icr |= ICR_RXCFG;
   2450 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   2451 
   2452 	/* Set up the inter-packet gap. */
   2453 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   2454 
   2455 #if 0 /* XXXJRT */
   2456 	/* Set the VLAN ethernetype. */
   2457 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   2458 #endif
   2459 
   2460 	/*
   2461 	 * Set up the transmit control register; we start out with
   2462 	 * a collision distance suitable for FDX, but update it whe
   2463 	 * we resolve the media type.
   2464 	 */
   2465 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
   2466 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2467 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2468 
   2469 	/* Set the media. */
   2470 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
   2471 
   2472 	/*
   2473 	 * Set up the receive control register; we actually program
   2474 	 * the register when we set the receive filter.  Use multicast
   2475 	 * address offset type 0.
   2476 	 *
   2477 	 * Only the i82544 has the ability to strip the incoming
   2478 	 * CRC, so we don't enable that feature.
   2479 	 */
   2480 	sc->sc_mchash_type = 0;
   2481 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
   2482 	    RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
   2483 
   2484 	if(MCLBYTES == 2048) {
   2485 		sc->sc_rctl |= RCTL_2k;
   2486 	} else {
   2487 	/*
   2488 	 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
   2489 	 * XXX segments, dropping" -- why?
   2490 	 */
   2491 #if 0
   2492 		if(sc->sc_type >= WM_T_82543) {
   2493 			switch(MCLBYTES) {
   2494 			case 4096:
   2495 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   2496 				break;
   2497 			case 8192:
   2498 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   2499 				break;
   2500 			case 16384:
   2501 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   2502 				break;
   2503 			default:
   2504 				panic("wm_init: MCLBYTES %d unsupported",
   2505 				    MCLBYTES);
   2506 				break;
   2507 			}
   2508 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   2509 #else
   2510 		panic("wm_init: MCLBYTES > 2048 not supported.");
   2511 #endif
   2512 	}
   2513 
   2514 	/* Set the receive filter. */
   2515 	wm_set_filter(sc);
   2516 
   2517 	/* Start the one second link check clock. */
   2518 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2519 
   2520 	/* ...all done! */
   2521 	ifp->if_flags |= IFF_RUNNING;
   2522 	ifp->if_flags &= ~IFF_OACTIVE;
   2523 
   2524  out:
   2525 	if (error)
   2526 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
   2527 	return (error);
   2528 }
   2529 
   2530 /*
   2531  * wm_rxdrain:
   2532  *
   2533  *	Drain the receive queue.
   2534  */
   2535 static void
   2536 wm_rxdrain(struct wm_softc *sc)
   2537 {
   2538 	struct wm_rxsoft *rxs;
   2539 	int i;
   2540 
   2541 	for (i = 0; i < WM_NRXDESC; i++) {
   2542 		rxs = &sc->sc_rxsoft[i];
   2543 		if (rxs->rxs_mbuf != NULL) {
   2544 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2545 			m_freem(rxs->rxs_mbuf);
   2546 			rxs->rxs_mbuf = NULL;
   2547 		}
   2548 	}
   2549 }
   2550 
   2551 /*
   2552  * wm_stop:		[ifnet interface function]
   2553  *
   2554  *	Stop transmission on the interface.
   2555  */
   2556 static void
   2557 wm_stop(struct ifnet *ifp, int disable)
   2558 {
   2559 	struct wm_softc *sc = ifp->if_softc;
   2560 	struct wm_txsoft *txs;
   2561 	int i;
   2562 
   2563 	/* Stop the one second clock. */
   2564 	callout_stop(&sc->sc_tick_ch);
   2565 
   2566 	if (sc->sc_flags & WM_F_HAS_MII) {
   2567 		/* Down the MII. */
   2568 		mii_down(&sc->sc_mii);
   2569 	}
   2570 
   2571 	/* Stop the transmit and receive processes. */
   2572 	CSR_WRITE(sc, WMREG_TCTL, 0);
   2573 	CSR_WRITE(sc, WMREG_RCTL, 0);
   2574 
   2575 	/* Release any queued transmit buffers. */
   2576 	for (i = 0; i < WM_TXQUEUELEN; i++) {
   2577 		txs = &sc->sc_txsoft[i];
   2578 		if (txs->txs_mbuf != NULL) {
   2579 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   2580 			m_freem(txs->txs_mbuf);
   2581 			txs->txs_mbuf = NULL;
   2582 		}
   2583 	}
   2584 
   2585 	if (disable)
   2586 		wm_rxdrain(sc);
   2587 
   2588 	/* Mark the interface as down and cancel the watchdog timer. */
   2589 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2590 	ifp->if_timer = 0;
   2591 }
   2592 
   2593 /*
   2594  * wm_acquire_eeprom:
   2595  *
   2596  *	Perform the EEPROM handshake required on some chips.
   2597  */
   2598 static int
   2599 wm_acquire_eeprom(struct wm_softc *sc)
   2600 {
   2601 	uint32_t reg;
   2602 	int x;
   2603 
   2604 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
   2605 		reg = CSR_READ(sc, WMREG_EECD);
   2606 
   2607 		/* Request EEPROM access. */
   2608 		reg |= EECD_EE_REQ;
   2609 		CSR_WRITE(sc, WMREG_EECD, reg);
   2610 
   2611 		/* ..and wait for it to be granted. */
   2612 		for (x = 0; x < 100; x++) {
   2613 			reg = CSR_READ(sc, WMREG_EECD);
   2614 			if (reg & EECD_EE_GNT)
   2615 				break;
   2616 			delay(5);
   2617 		}
   2618 		if ((reg & EECD_EE_GNT) == 0) {
   2619 			aprint_error("%s: could not acquire EEPROM GNT\n",
   2620 			    sc->sc_dev.dv_xname);
   2621 			reg &= ~EECD_EE_REQ;
   2622 			CSR_WRITE(sc, WMREG_EECD, reg);
   2623 			return (1);
   2624 		}
   2625 	}
   2626 
   2627 	return (0);
   2628 }
   2629 
   2630 /*
   2631  * wm_release_eeprom:
   2632  *
   2633  *	Release the EEPROM mutex.
   2634  */
   2635 static void
   2636 wm_release_eeprom(struct wm_softc *sc)
   2637 {
   2638 	uint32_t reg;
   2639 
   2640 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
   2641 		reg = CSR_READ(sc, WMREG_EECD);
   2642 		reg &= ~EECD_EE_REQ;
   2643 		CSR_WRITE(sc, WMREG_EECD, reg);
   2644 	}
   2645 }
   2646 
   2647 /*
   2648  * wm_eeprom_sendbits:
   2649  *
   2650  *	Send a series of bits to the EEPROM.
   2651  */
   2652 static void
   2653 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   2654 {
   2655 	uint32_t reg;
   2656 	int x;
   2657 
   2658 	reg = CSR_READ(sc, WMREG_EECD);
   2659 
   2660 	for (x = nbits; x > 0; x--) {
   2661 		if (bits & (1U << (x - 1)))
   2662 			reg |= EECD_DI;
   2663 		else
   2664 			reg &= ~EECD_DI;
   2665 		CSR_WRITE(sc, WMREG_EECD, reg);
   2666 		delay(2);
   2667 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2668 		delay(2);
   2669 		CSR_WRITE(sc, WMREG_EECD, reg);
   2670 		delay(2);
   2671 	}
   2672 }
   2673 
   2674 /*
   2675  * wm_eeprom_recvbits:
   2676  *
   2677  *	Receive a series of bits from the EEPROM.
   2678  */
   2679 static void
   2680 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   2681 {
   2682 	uint32_t reg, val;
   2683 	int x;
   2684 
   2685 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   2686 
   2687 	val = 0;
   2688 	for (x = nbits; x > 0; x--) {
   2689 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2690 		delay(2);
   2691 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   2692 			val |= (1U << (x - 1));
   2693 		CSR_WRITE(sc, WMREG_EECD, reg);
   2694 		delay(2);
   2695 	}
   2696 	*valp = val;
   2697 }
   2698 
   2699 /*
   2700  * wm_read_eeprom_uwire:
   2701  *
   2702  *	Read a word from the EEPROM using the MicroWire protocol.
   2703  */
   2704 static int
   2705 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2706 {
   2707 	uint32_t reg, val;
   2708 	int i;
   2709 
   2710 	for (i = 0; i < wordcnt; i++) {
   2711 		/* Clear SK and DI. */
   2712 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   2713 		CSR_WRITE(sc, WMREG_EECD, reg);
   2714 
   2715 		/* Set CHIP SELECT. */
   2716 		reg |= EECD_CS;
   2717 		CSR_WRITE(sc, WMREG_EECD, reg);
   2718 		delay(2);
   2719 
   2720 		/* Shift in the READ command. */
   2721 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   2722 
   2723 		/* Shift in address. */
   2724 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   2725 
   2726 		/* Shift out the data. */
   2727 		wm_eeprom_recvbits(sc, &val, 16);
   2728 		data[i] = val & 0xffff;
   2729 
   2730 		/* Clear CHIP SELECT. */
   2731 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   2732 		CSR_WRITE(sc, WMREG_EECD, reg);
   2733 		delay(2);
   2734 	}
   2735 
   2736 	return (0);
   2737 }
   2738 
   2739 /*
   2740  * wm_spi_eeprom_ready:
   2741  *
   2742  *	Wait for a SPI EEPROM to be ready for commands.
   2743  */
   2744 static int
   2745 wm_spi_eeprom_ready(struct wm_softc *sc)
   2746 {
   2747 	uint32_t val;
   2748 	int usec;
   2749 
   2750 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   2751 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   2752 		wm_eeprom_recvbits(sc, &val, 8);
   2753 		if ((val & SPI_SR_RDY) == 0)
   2754 			break;
   2755 	}
   2756 	if (usec >= SPI_MAX_RETRIES) {
   2757 		aprint_error("%s: EEPROM failed to become ready\n",
   2758 		    sc->sc_dev.dv_xname);
   2759 		return (1);
   2760 	}
   2761 	return (0);
   2762 }
   2763 
   2764 /*
   2765  * wm_read_eeprom_spi:
   2766  *
   2767  *	Read a work from the EEPROM using the SPI protocol.
   2768  */
   2769 static int
   2770 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2771 {
   2772 	uint32_t reg, val;
   2773 	int i;
   2774 	uint8_t opc;
   2775 
   2776 	/* Clear SK and CS. */
   2777 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   2778 	CSR_WRITE(sc, WMREG_EECD, reg);
   2779 	delay(2);
   2780 
   2781 	if (wm_spi_eeprom_ready(sc))
   2782 		return (1);
   2783 
   2784 	/* Toggle CS to flush commands. */
   2785 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   2786 	delay(2);
   2787 	CSR_WRITE(sc, WMREG_EECD, reg);
   2788 	delay(2);
   2789 
   2790 	opc = SPI_OPC_READ;
   2791 	if (sc->sc_ee_addrbits == 8 && word >= 128)
   2792 		opc |= SPI_OPC_A8;
   2793 
   2794 	wm_eeprom_sendbits(sc, opc, 8);
   2795 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
   2796 
   2797 	for (i = 0; i < wordcnt; i++) {
   2798 		wm_eeprom_recvbits(sc, &val, 16);
   2799 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   2800 	}
   2801 
   2802 	/* Raise CS and clear SK. */
   2803 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   2804 	CSR_WRITE(sc, WMREG_EECD, reg);
   2805 	delay(2);
   2806 
   2807 	return (0);
   2808 }
   2809 
   2810 /*
   2811  * wm_read_eeprom:
   2812  *
   2813  *	Read data from the serial EEPROM.
   2814  */
   2815 static int
   2816 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2817 {
   2818 	int rv;
   2819 
   2820 	if (wm_acquire_eeprom(sc))
   2821 		return (1);
   2822 
   2823 	if (sc->sc_flags & WM_F_EEPROM_SPI)
   2824 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
   2825 	else
   2826 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
   2827 
   2828 	wm_release_eeprom(sc);
   2829 	return (rv);
   2830 }
   2831 
   2832 /*
   2833  * wm_add_rxbuf:
   2834  *
   2835  *	Add a receive buffer to the indiciated descriptor.
   2836  */
   2837 static int
   2838 wm_add_rxbuf(struct wm_softc *sc, int idx)
   2839 {
   2840 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   2841 	struct mbuf *m;
   2842 	int error;
   2843 
   2844 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   2845 	if (m == NULL)
   2846 		return (ENOBUFS);
   2847 
   2848 	MCLGET(m, M_DONTWAIT);
   2849 	if ((m->m_flags & M_EXT) == 0) {
   2850 		m_freem(m);
   2851 		return (ENOBUFS);
   2852 	}
   2853 
   2854 	if (rxs->rxs_mbuf != NULL)
   2855 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2856 
   2857 	rxs->rxs_mbuf = m;
   2858 
   2859 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   2860 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   2861 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   2862 	if (error) {
   2863 		printf("%s: unable to load rx DMA map %d, error = %d\n",
   2864 		    sc->sc_dev.dv_xname, idx, error);
   2865 		panic("wm_add_rxbuf");	/* XXX XXX XXX */
   2866 	}
   2867 
   2868 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   2869 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   2870 
   2871 	WM_INIT_RXDESC(sc, idx);
   2872 
   2873 	return (0);
   2874 }
   2875 
   2876 /*
   2877  * wm_set_ral:
   2878  *
   2879  *	Set an entery in the receive address list.
   2880  */
   2881 static void
   2882 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2883 {
   2884 	uint32_t ral_lo, ral_hi;
   2885 
   2886 	if (enaddr != NULL) {
   2887 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2888 		    (enaddr[3] << 24);
   2889 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2890 		ral_hi |= RAL_AV;
   2891 	} else {
   2892 		ral_lo = 0;
   2893 		ral_hi = 0;
   2894 	}
   2895 
   2896 	if (sc->sc_type >= WM_T_82544) {
   2897 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2898 		    ral_lo);
   2899 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2900 		    ral_hi);
   2901 	} else {
   2902 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2903 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2904 	}
   2905 }
   2906 
   2907 /*
   2908  * wm_mchash:
   2909  *
   2910  *	Compute the hash of the multicast address for the 4096-bit
   2911  *	multicast filter.
   2912  */
   2913 static uint32_t
   2914 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2915 {
   2916 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2917 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2918 	uint32_t hash;
   2919 
   2920 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   2921 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   2922 
   2923 	return (hash & 0xfff);
   2924 }
   2925 
   2926 /*
   2927  * wm_set_filter:
   2928  *
   2929  *	Set up the receive filter.
   2930  */
   2931 static void
   2932 wm_set_filter(struct wm_softc *sc)
   2933 {
   2934 	struct ethercom *ec = &sc->sc_ethercom;
   2935 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2936 	struct ether_multi *enm;
   2937 	struct ether_multistep step;
   2938 	bus_addr_t mta_reg;
   2939 	uint32_t hash, reg, bit;
   2940 	int i;
   2941 
   2942 	if (sc->sc_type >= WM_T_82544)
   2943 		mta_reg = WMREG_CORDOVA_MTA;
   2944 	else
   2945 		mta_reg = WMREG_MTA;
   2946 
   2947 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   2948 
   2949 	if (ifp->if_flags & IFF_BROADCAST)
   2950 		sc->sc_rctl |= RCTL_BAM;
   2951 	if (ifp->if_flags & IFF_PROMISC) {
   2952 		sc->sc_rctl |= RCTL_UPE;
   2953 		goto allmulti;
   2954 	}
   2955 
   2956 	/*
   2957 	 * Set the station address in the first RAL slot, and
   2958 	 * clear the remaining slots.
   2959 	 */
   2960 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
   2961 	for (i = 1; i < WM_RAL_TABSIZE; i++)
   2962 		wm_set_ral(sc, NULL, i);
   2963 
   2964 	/* Clear out the multicast table. */
   2965 	for (i = 0; i < WM_MC_TABSIZE; i++)
   2966 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   2967 
   2968 	ETHER_FIRST_MULTI(step, ec, enm);
   2969 	while (enm != NULL) {
   2970 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2971 			/*
   2972 			 * We must listen to a range of multicast addresses.
   2973 			 * For now, just accept all multicasts, rather than
   2974 			 * trying to set only those filter bits needed to match
   2975 			 * the range.  (At this time, the only use of address
   2976 			 * ranges is for IP multicast routing, for which the
   2977 			 * range is big enough to require all bits set.)
   2978 			 */
   2979 			goto allmulti;
   2980 		}
   2981 
   2982 		hash = wm_mchash(sc, enm->enm_addrlo);
   2983 
   2984 		reg = (hash >> 5) & 0x7f;
   2985 		bit = hash & 0x1f;
   2986 
   2987 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   2988 		hash |= 1U << bit;
   2989 
   2990 		/* XXX Hardware bug?? */
   2991 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   2992 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   2993 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2994 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   2995 		} else
   2996 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2997 
   2998 		ETHER_NEXT_MULTI(step, enm);
   2999 	}
   3000 
   3001 	ifp->if_flags &= ~IFF_ALLMULTI;
   3002 	goto setit;
   3003 
   3004  allmulti:
   3005 	ifp->if_flags |= IFF_ALLMULTI;
   3006 	sc->sc_rctl |= RCTL_MPE;
   3007 
   3008  setit:
   3009 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3010 }
   3011 
   3012 /*
   3013  * wm_tbi_mediainit:
   3014  *
   3015  *	Initialize media for use on 1000BASE-X devices.
   3016  */
   3017 static void
   3018 wm_tbi_mediainit(struct wm_softc *sc)
   3019 {
   3020 	const char *sep = "";
   3021 
   3022 	if (sc->sc_type < WM_T_82543)
   3023 		sc->sc_tipg = TIPG_WM_DFLT;
   3024 	else
   3025 		sc->sc_tipg = TIPG_LG_DFLT;
   3026 
   3027 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   3028 	    wm_tbi_mediastatus);
   3029 
   3030 	/*
   3031 	 * SWD Pins:
   3032 	 *
   3033 	 *	0 = Link LED (output)
   3034 	 *	1 = Loss Of Signal (input)
   3035 	 */
   3036 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   3037 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   3038 
   3039 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3040 
   3041 #define	ADD(ss, mm, dd)							\
   3042 do {									\
   3043 	printf("%s%s", sep, ss);					\
   3044 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   3045 	sep = ", ";							\
   3046 } while (/*CONSTCOND*/0)
   3047 
   3048 	printf("%s: ", sc->sc_dev.dv_xname);
   3049 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   3050 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   3051 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   3052 	printf("\n");
   3053 
   3054 #undef ADD
   3055 
   3056 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   3057 }
   3058 
   3059 /*
   3060  * wm_tbi_mediastatus:	[ifmedia interface function]
   3061  *
   3062  *	Get the current interface media status on a 1000BASE-X device.
   3063  */
   3064 static void
   3065 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   3066 {
   3067 	struct wm_softc *sc = ifp->if_softc;
   3068 
   3069 	ifmr->ifm_status = IFM_AVALID;
   3070 	ifmr->ifm_active = IFM_ETHER;
   3071 
   3072 	if (sc->sc_tbi_linkup == 0) {
   3073 		ifmr->ifm_active |= IFM_NONE;
   3074 		return;
   3075 	}
   3076 
   3077 	ifmr->ifm_status |= IFM_ACTIVE;
   3078 	ifmr->ifm_active |= IFM_1000_SX;
   3079 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   3080 		ifmr->ifm_active |= IFM_FDX;
   3081 }
   3082 
   3083 /*
   3084  * wm_tbi_mediachange:	[ifmedia interface function]
   3085  *
   3086  *	Set hardware to newly-selected media on a 1000BASE-X device.
   3087  */
   3088 static int
   3089 wm_tbi_mediachange(struct ifnet *ifp)
   3090 {
   3091 	struct wm_softc *sc = ifp->if_softc;
   3092 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   3093 	uint32_t status;
   3094 	int i;
   3095 
   3096 	sc->sc_txcw = ife->ifm_data;
   3097 	if (sc->sc_ctrl & CTRL_RFCE)
   3098 		sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
   3099 	if (sc->sc_ctrl & CTRL_TFCE)
   3100 		sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
   3101 	sc->sc_txcw |= TXCW_ANE;
   3102 
   3103 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   3104 	delay(10000);
   3105 
   3106 	sc->sc_tbi_anstate = 0;
   3107 
   3108 	if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
   3109 		/* Have signal; wait for the link to come up. */
   3110 		for (i = 0; i < 50; i++) {
   3111 			delay(10000);
   3112 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   3113 				break;
   3114 		}
   3115 
   3116 		status = CSR_READ(sc, WMREG_STATUS);
   3117 		if (status & STATUS_LU) {
   3118 			/* Link is up. */
   3119 			DPRINTF(WM_DEBUG_LINK,
   3120 			    ("%s: LINK: set media -> link up %s\n",
   3121 			    sc->sc_dev.dv_xname,
   3122 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   3123 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3124 			if (status & STATUS_FD)
   3125 				sc->sc_tctl |=
   3126 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3127 			else
   3128 				sc->sc_tctl |=
   3129 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3130 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3131 			sc->sc_tbi_linkup = 1;
   3132 		} else {
   3133 			/* Link is down. */
   3134 			DPRINTF(WM_DEBUG_LINK,
   3135 			    ("%s: LINK: set media -> link down\n",
   3136 			    sc->sc_dev.dv_xname));
   3137 			sc->sc_tbi_linkup = 0;
   3138 		}
   3139 	} else {
   3140 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   3141 		    sc->sc_dev.dv_xname));
   3142 		sc->sc_tbi_linkup = 0;
   3143 	}
   3144 
   3145 	wm_tbi_set_linkled(sc);
   3146 
   3147 	return (0);
   3148 }
   3149 
   3150 /*
   3151  * wm_tbi_set_linkled:
   3152  *
   3153  *	Update the link LED on 1000BASE-X devices.
   3154  */
   3155 static void
   3156 wm_tbi_set_linkled(struct wm_softc *sc)
   3157 {
   3158 
   3159 	if (sc->sc_tbi_linkup)
   3160 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   3161 	else
   3162 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   3163 
   3164 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3165 }
   3166 
   3167 /*
   3168  * wm_tbi_check_link:
   3169  *
   3170  *	Check the link on 1000BASE-X devices.
   3171  */
   3172 static void
   3173 wm_tbi_check_link(struct wm_softc *sc)
   3174 {
   3175 	uint32_t rxcw, ctrl, status;
   3176 
   3177 	if (sc->sc_tbi_anstate == 0)
   3178 		return;
   3179 	else if (sc->sc_tbi_anstate > 1) {
   3180 		DPRINTF(WM_DEBUG_LINK,
   3181 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
   3182 		    sc->sc_tbi_anstate));
   3183 		sc->sc_tbi_anstate--;
   3184 		return;
   3185 	}
   3186 
   3187 	sc->sc_tbi_anstate = 0;
   3188 
   3189 	rxcw = CSR_READ(sc, WMREG_RXCW);
   3190 	ctrl = CSR_READ(sc, WMREG_CTRL);
   3191 	status = CSR_READ(sc, WMREG_STATUS);
   3192 
   3193 	if ((status & STATUS_LU) == 0) {
   3194 		DPRINTF(WM_DEBUG_LINK,
   3195 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
   3196 		sc->sc_tbi_linkup = 0;
   3197 	} else {
   3198 		DPRINTF(WM_DEBUG_LINK,
   3199 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
   3200 		    (status & STATUS_FD) ? "FDX" : "HDX"));
   3201 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3202 		if (status & STATUS_FD)
   3203 			sc->sc_tctl |=
   3204 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3205 		else
   3206 			sc->sc_tctl |=
   3207 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3208 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3209 		sc->sc_tbi_linkup = 1;
   3210 	}
   3211 
   3212 	wm_tbi_set_linkled(sc);
   3213 }
   3214 
   3215 /*
   3216  * wm_gmii_reset:
   3217  *
   3218  *	Reset the PHY.
   3219  */
   3220 static void
   3221 wm_gmii_reset(struct wm_softc *sc)
   3222 {
   3223 	uint32_t reg;
   3224 
   3225 	if (sc->sc_type >= WM_T_82544) {
   3226 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   3227 		delay(20000);
   3228 
   3229 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3230 		delay(20000);
   3231 	} else {
   3232 		/* The PHY reset pin is active-low. */
   3233 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3234 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   3235 		    CTRL_EXT_SWDPIN(4));
   3236 		reg |= CTRL_EXT_SWDPIO(4);
   3237 
   3238 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   3239 		delay(10);
   3240 
   3241 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3242 		delay(10);
   3243 
   3244 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   3245 		delay(10);
   3246 #if 0
   3247 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   3248 #endif
   3249 	}
   3250 }
   3251 
   3252 /*
   3253  * wm_gmii_mediainit:
   3254  *
   3255  *	Initialize media for use on 1000BASE-T devices.
   3256  */
   3257 static void
   3258 wm_gmii_mediainit(struct wm_softc *sc)
   3259 {
   3260 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3261 
   3262 	/* We have MII. */
   3263 	sc->sc_flags |= WM_F_HAS_MII;
   3264 
   3265 	sc->sc_tipg = TIPG_1000T_DFLT;
   3266 
   3267 	/*
   3268 	 * Let the chip set speed/duplex on its own based on
   3269 	 * signals from the PHY.
   3270 	 */
   3271 	sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
   3272 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3273 
   3274 	/* Initialize our media structures and probe the GMII. */
   3275 	sc->sc_mii.mii_ifp = ifp;
   3276 
   3277 	if (sc->sc_type >= WM_T_82544) {
   3278 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
   3279 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
   3280 	} else {
   3281 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
   3282 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
   3283 	}
   3284 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
   3285 
   3286 	wm_gmii_reset(sc);
   3287 
   3288 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
   3289 	    wm_gmii_mediastatus);
   3290 
   3291 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   3292 	    MII_OFFSET_ANY, 0);
   3293 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
   3294 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   3295 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
   3296 	} else
   3297 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   3298 }
   3299 
   3300 /*
   3301  * wm_gmii_mediastatus:	[ifmedia interface function]
   3302  *
   3303  *	Get the current interface media status on a 1000BASE-T device.
   3304  */
   3305 static void
   3306 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   3307 {
   3308 	struct wm_softc *sc = ifp->if_softc;
   3309 
   3310 	mii_pollstat(&sc->sc_mii);
   3311 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   3312 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   3313 }
   3314 
   3315 /*
   3316  * wm_gmii_mediachange:	[ifmedia interface function]
   3317  *
   3318  *	Set hardware to newly-selected media on a 1000BASE-T device.
   3319  */
   3320 static int
   3321 wm_gmii_mediachange(struct ifnet *ifp)
   3322 {
   3323 	struct wm_softc *sc = ifp->if_softc;
   3324 
   3325 	if (ifp->if_flags & IFF_UP)
   3326 		mii_mediachg(&sc->sc_mii);
   3327 	return (0);
   3328 }
   3329 
   3330 #define	MDI_IO		CTRL_SWDPIN(2)
   3331 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   3332 #define	MDI_CLK		CTRL_SWDPIN(3)
   3333 
   3334 static void
   3335 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   3336 {
   3337 	uint32_t i, v;
   3338 
   3339 	v = CSR_READ(sc, WMREG_CTRL);
   3340 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   3341 	v |= MDI_DIR | CTRL_SWDPIO(3);
   3342 
   3343 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   3344 		if (data & i)
   3345 			v |= MDI_IO;
   3346 		else
   3347 			v &= ~MDI_IO;
   3348 		CSR_WRITE(sc, WMREG_CTRL, v);
   3349 		delay(10);
   3350 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3351 		delay(10);
   3352 		CSR_WRITE(sc, WMREG_CTRL, v);
   3353 		delay(10);
   3354 	}
   3355 }
   3356 
   3357 static uint32_t
   3358 i82543_mii_recvbits(struct wm_softc *sc)
   3359 {
   3360 	uint32_t v, i, data = 0;
   3361 
   3362 	v = CSR_READ(sc, WMREG_CTRL);
   3363 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   3364 	v |= CTRL_SWDPIO(3);
   3365 
   3366 	CSR_WRITE(sc, WMREG_CTRL, v);
   3367 	delay(10);
   3368 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3369 	delay(10);
   3370 	CSR_WRITE(sc, WMREG_CTRL, v);
   3371 	delay(10);
   3372 
   3373 	for (i = 0; i < 16; i++) {
   3374 		data <<= 1;
   3375 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3376 		delay(10);
   3377 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   3378 			data |= 1;
   3379 		CSR_WRITE(sc, WMREG_CTRL, v);
   3380 		delay(10);
   3381 	}
   3382 
   3383 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3384 	delay(10);
   3385 	CSR_WRITE(sc, WMREG_CTRL, v);
   3386 	delay(10);
   3387 
   3388 	return (data);
   3389 }
   3390 
   3391 #undef MDI_IO
   3392 #undef MDI_DIR
   3393 #undef MDI_CLK
   3394 
   3395 /*
   3396  * wm_gmii_i82543_readreg:	[mii interface function]
   3397  *
   3398  *	Read a PHY register on the GMII (i82543 version).
   3399  */
   3400 static int
   3401 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
   3402 {
   3403 	struct wm_softc *sc = (void *) self;
   3404 	int rv;
   3405 
   3406 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   3407 	i82543_mii_sendbits(sc, reg | (phy << 5) |
   3408 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   3409 	rv = i82543_mii_recvbits(sc) & 0xffff;
   3410 
   3411 	DPRINTF(WM_DEBUG_GMII,
   3412 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   3413 	    sc->sc_dev.dv_xname, phy, reg, rv));
   3414 
   3415 	return (rv);
   3416 }
   3417 
   3418 /*
   3419  * wm_gmii_i82543_writereg:	[mii interface function]
   3420  *
   3421  *	Write a PHY register on the GMII (i82543 version).
   3422  */
   3423 static void
   3424 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
   3425 {
   3426 	struct wm_softc *sc = (void *) self;
   3427 
   3428 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   3429 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   3430 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   3431 	    (MII_COMMAND_START << 30), 32);
   3432 }
   3433 
   3434 /*
   3435  * wm_gmii_i82544_readreg:	[mii interface function]
   3436  *
   3437  *	Read a PHY register on the GMII.
   3438  */
   3439 static int
   3440 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
   3441 {
   3442 	struct wm_softc *sc = (void *) self;
   3443 	uint32_t mdic = 0;
   3444 	int i, rv;
   3445 
   3446 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   3447 	    MDIC_REGADD(reg));
   3448 
   3449 	for (i = 0; i < 100; i++) {
   3450 		mdic = CSR_READ(sc, WMREG_MDIC);
   3451 		if (mdic & MDIC_READY)
   3452 			break;
   3453 		delay(10);
   3454 	}
   3455 
   3456 	if ((mdic & MDIC_READY) == 0) {
   3457 		printf("%s: MDIC read timed out: phy %d reg %d\n",
   3458 		    sc->sc_dev.dv_xname, phy, reg);
   3459 		rv = 0;
   3460 	} else if (mdic & MDIC_E) {
   3461 #if 0 /* This is normal if no PHY is present. */
   3462 		printf("%s: MDIC read error: phy %d reg %d\n",
   3463 		    sc->sc_dev.dv_xname, phy, reg);
   3464 #endif
   3465 		rv = 0;
   3466 	} else {
   3467 		rv = MDIC_DATA(mdic);
   3468 		if (rv == 0xffff)
   3469 			rv = 0;
   3470 	}
   3471 
   3472 	return (rv);
   3473 }
   3474 
   3475 /*
   3476  * wm_gmii_i82544_writereg:	[mii interface function]
   3477  *
   3478  *	Write a PHY register on the GMII.
   3479  */
   3480 static void
   3481 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
   3482 {
   3483 	struct wm_softc *sc = (void *) self;
   3484 	uint32_t mdic = 0;
   3485 	int i;
   3486 
   3487 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   3488 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   3489 
   3490 	for (i = 0; i < 100; i++) {
   3491 		mdic = CSR_READ(sc, WMREG_MDIC);
   3492 		if (mdic & MDIC_READY)
   3493 			break;
   3494 		delay(10);
   3495 	}
   3496 
   3497 	if ((mdic & MDIC_READY) == 0)
   3498 		printf("%s: MDIC write timed out: phy %d reg %d\n",
   3499 		    sc->sc_dev.dv_xname, phy, reg);
   3500 	else if (mdic & MDIC_E)
   3501 		printf("%s: MDIC write error: phy %d reg %d\n",
   3502 		    sc->sc_dev.dv_xname, phy, reg);
   3503 }
   3504 
   3505 /*
   3506  * wm_gmii_statchg:	[mii interface function]
   3507  *
   3508  *	Callback from MII layer when media changes.
   3509  */
   3510 static void
   3511 wm_gmii_statchg(struct device *self)
   3512 {
   3513 	struct wm_softc *sc = (void *) self;
   3514 
   3515 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3516 
   3517 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   3518 		DPRINTF(WM_DEBUG_LINK,
   3519 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
   3520 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3521 	} else  {
   3522 		DPRINTF(WM_DEBUG_LINK,
   3523 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
   3524 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3525 	}
   3526 
   3527 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3528 }
   3529