Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.64
      1 /*	$NetBSD: if_wm.c,v 1.64 2003/12/04 06:57:37 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     40  *
     41  * TODO (in order of importance):
     42  *
     43  *	- Rework how parameters are loaded from the EEPROM.
     44  *	- Figure out performance stability issue on i82547 (fvdl).
     45  *	- Figure out what to do with the i82545GM and i82546GB
     46  *	  SERDES controllers.
     47  *	- Fix hw VLAN assist.
     48  */
     49 
     50 #include <sys/cdefs.h>
     51 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.64 2003/12/04 06:57:37 thorpej Exp $");
     52 
     53 #include "bpfilter.h"
     54 #include "rnd.h"
     55 
     56 #include <sys/param.h>
     57 #include <sys/systm.h>
     58 #include <sys/callout.h>
     59 #include <sys/mbuf.h>
     60 #include <sys/malloc.h>
     61 #include <sys/kernel.h>
     62 #include <sys/socket.h>
     63 #include <sys/ioctl.h>
     64 #include <sys/errno.h>
     65 #include <sys/device.h>
     66 #include <sys/queue.h>
     67 
     68 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
     69 
     70 #if NRND > 0
     71 #include <sys/rnd.h>
     72 #endif
     73 
     74 #include <net/if.h>
     75 #include <net/if_dl.h>
     76 #include <net/if_media.h>
     77 #include <net/if_ether.h>
     78 
     79 #if NBPFILTER > 0
     80 #include <net/bpf.h>
     81 #endif
     82 
     83 #include <netinet/in.h>			/* XXX for struct ip */
     84 #include <netinet/in_systm.h>		/* XXX for struct ip */
     85 #include <netinet/ip.h>			/* XXX for struct ip */
     86 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
     87 
     88 #include <machine/bus.h>
     89 #include <machine/intr.h>
     90 #include <machine/endian.h>
     91 
     92 #include <dev/mii/mii.h>
     93 #include <dev/mii/miivar.h>
     94 #include <dev/mii/mii_bitbang.h>
     95 
     96 #include <dev/pci/pcireg.h>
     97 #include <dev/pci/pcivar.h>
     98 #include <dev/pci/pcidevs.h>
     99 
    100 #include <dev/pci/if_wmreg.h>
    101 
    102 #ifdef WM_DEBUG
    103 #define	WM_DEBUG_LINK		0x01
    104 #define	WM_DEBUG_TX		0x02
    105 #define	WM_DEBUG_RX		0x04
    106 #define	WM_DEBUG_GMII		0x08
    107 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
    108 
    109 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    110 #else
    111 #define	DPRINTF(x, y)	/* nothing */
    112 #endif /* WM_DEBUG */
    113 
    114 /*
    115  * Transmit descriptor list size.  Due to errata, we can only have
    116  * 256 hardware descriptors in the ring.  We tell the upper layers
    117  * that they can queue a lot of packets, and we go ahead and manage
    118  * up to 64 of them at a time.  We allow up to 40 DMA segments per
    119  * packet (there have been reports of jumbo frame packets with as
    120  * many as 30 DMA segments!).
    121  */
    122 #define	WM_NTXSEGS		40
    123 #define	WM_IFQUEUELEN		256
    124 #define	WM_TXQUEUELEN		64
    125 #define	WM_TXQUEUELEN_MASK	(WM_TXQUEUELEN - 1)
    126 #define	WM_TXQUEUE_GC		(WM_TXQUEUELEN / 8)
    127 #define	WM_NTXDESC		256
    128 #define	WM_NTXDESC_MASK		(WM_NTXDESC - 1)
    129 #define	WM_NEXTTX(x)		(((x) + 1) & WM_NTXDESC_MASK)
    130 #define	WM_NEXTTXS(x)		(((x) + 1) & WM_TXQUEUELEN_MASK)
    131 
    132 /*
    133  * Receive descriptor list size.  We have one Rx buffer for normal
    134  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    135  * packet.  We allocate 256 receive descriptors, each with a 2k
    136  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    137  */
    138 #define	WM_NRXDESC		256
    139 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    140 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    141 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    142 
    143 /*
    144  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    145  * a single clump that maps to a single DMA segment to make serveral things
    146  * easier.
    147  */
    148 struct wm_control_data {
    149 	/*
    150 	 * The transmit descriptors.
    151 	 */
    152 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
    153 
    154 	/*
    155 	 * The receive descriptors.
    156 	 */
    157 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    158 };
    159 
    160 #define	WM_CDOFF(x)	offsetof(struct wm_control_data, x)
    161 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
    162 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    163 
    164 /*
    165  * Software state for transmit jobs.
    166  */
    167 struct wm_txsoft {
    168 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    169 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    170 	int txs_firstdesc;		/* first descriptor in packet */
    171 	int txs_lastdesc;		/* last descriptor in packet */
    172 	int txs_ndesc;			/* # of descriptors used */
    173 };
    174 
    175 /*
    176  * Software state for receive buffers.  Each descriptor gets a
    177  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    178  * more than one buffer, we chain them together.
    179  */
    180 struct wm_rxsoft {
    181 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    182 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    183 };
    184 
    185 typedef enum {
    186 	WM_T_unknown		= 0,
    187 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
    188 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
    189 	WM_T_82543,			/* i82543 */
    190 	WM_T_82544,			/* i82544 */
    191 	WM_T_82540,			/* i82540 */
    192 	WM_T_82545,			/* i82545 */
    193 	WM_T_82545_3,			/* i82545 3.0+ */
    194 	WM_T_82546,			/* i82546 */
    195 	WM_T_82546_3,			/* i82546 3.0+ */
    196 	WM_T_82541,			/* i82541 */
    197 	WM_T_82541_2,			/* i82541 2.0+ */
    198 	WM_T_82547,			/* i82547 */
    199 	WM_T_82547_2,			/* i82547 2.0+ */
    200 } wm_chip_type;
    201 
    202 /*
    203  * Software state per device.
    204  */
    205 struct wm_softc {
    206 	struct device sc_dev;		/* generic device information */
    207 	bus_space_tag_t sc_st;		/* bus space tag */
    208 	bus_space_handle_t sc_sh;	/* bus space handle */
    209 	bus_space_tag_t sc_iot;		/* I/O space tag */
    210 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    211 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    212 	struct ethercom sc_ethercom;	/* ethernet common data */
    213 	void *sc_sdhook;		/* shutdown hook */
    214 
    215 	wm_chip_type sc_type;		/* chip type */
    216 	int sc_flags;			/* flags; see below */
    217 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    218 	int sc_pcix_offset;		/* PCIX capability register offset */
    219 
    220 	void *sc_ih;			/* interrupt cookie */
    221 
    222 	int sc_ee_addrbits;		/* EEPROM address bits */
    223 
    224 	struct mii_data sc_mii;		/* MII/media information */
    225 
    226 	struct callout sc_tick_ch;	/* tick callout */
    227 
    228 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    229 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    230 
    231 	int		sc_align_tweak;
    232 
    233 	/*
    234 	 * Software state for the transmit and receive descriptors.
    235 	 */
    236 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
    237 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    238 
    239 	/*
    240 	 * Control data structures.
    241 	 */
    242 	struct wm_control_data *sc_control_data;
    243 #define	sc_txdescs	sc_control_data->wcd_txdescs
    244 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    245 
    246 #ifdef WM_EVENT_COUNTERS
    247 	/* Event counters. */
    248 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    249 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    250 	struct evcnt sc_ev_txforceintr;	/* Tx interrupts forced */
    251 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    252 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    253 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    254 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    255 
    256 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    257 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    258 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    259 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    260 
    261 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
    262 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
    263 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
    264 
    265 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    266 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    267 
    268 	struct evcnt sc_ev_tu;		/* Tx underrun */
    269 #endif /* WM_EVENT_COUNTERS */
    270 
    271 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    272 
    273 	int	sc_txfree;		/* number of free Tx descriptors */
    274 	int	sc_txnext;		/* next ready Tx descriptor */
    275 
    276 	int	sc_txsfree;		/* number of free Tx jobs */
    277 	int	sc_txsnext;		/* next free Tx job */
    278 	int	sc_txsdirty;		/* dirty Tx jobs */
    279 
    280 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
    281 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
    282 
    283 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    284 
    285 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    286 	int	sc_rxdiscard;
    287 	int	sc_rxlen;
    288 	struct mbuf *sc_rxhead;
    289 	struct mbuf *sc_rxtail;
    290 	struct mbuf **sc_rxtailp;
    291 
    292 	uint32_t sc_ctrl;		/* prototype CTRL register */
    293 #if 0
    294 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    295 #endif
    296 	uint32_t sc_icr;		/* prototype interrupt bits */
    297 	uint32_t sc_tctl;		/* prototype TCTL register */
    298 	uint32_t sc_rctl;		/* prototype RCTL register */
    299 	uint32_t sc_txcw;		/* prototype TXCW register */
    300 	uint32_t sc_tipg;		/* prototype TIPG register */
    301 
    302 	int sc_tbi_linkup;		/* TBI link status */
    303 	int sc_tbi_anstate;		/* autonegotiation state */
    304 
    305 	int sc_mchash_type;		/* multicast filter offset */
    306 
    307 #if NRND > 0
    308 	rndsource_element_t rnd_source;	/* random source */
    309 #endif
    310 };
    311 
    312 #define	WM_RXCHAIN_RESET(sc)						\
    313 do {									\
    314 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    315 	*(sc)->sc_rxtailp = NULL;					\
    316 	(sc)->sc_rxlen = 0;						\
    317 } while (/*CONSTCOND*/0)
    318 
    319 #define	WM_RXCHAIN_LINK(sc, m)						\
    320 do {									\
    321 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    322 	(sc)->sc_rxtailp = &(m)->m_next;				\
    323 } while (/*CONSTCOND*/0)
    324 
    325 /* sc_flags */
    326 #define	WM_F_HAS_MII		0x01	/* has MII */
    327 #define	WM_F_EEPROM_HANDSHAKE	0x02	/* requires EEPROM handshake */
    328 #define	WM_F_EEPROM_SPI		0x04	/* EEPROM is SPI */
    329 #define	WM_F_IOH_VALID		0x10	/* I/O handle is valid */
    330 #define	WM_F_BUS64		0x20	/* bus is 64-bit */
    331 #define	WM_F_PCIX		0x40	/* bus is PCI-X */
    332 
    333 #ifdef WM_EVENT_COUNTERS
    334 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    335 #else
    336 #define	WM_EVCNT_INCR(ev)	/* nothing */
    337 #endif
    338 
    339 #define	CSR_READ(sc, reg)						\
    340 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    341 #define	CSR_WRITE(sc, reg, val)						\
    342 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    343 
    344 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    345 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    346 
    347 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    348 do {									\
    349 	int __x, __n;							\
    350 									\
    351 	__x = (x);							\
    352 	__n = (n);							\
    353 									\
    354 	/* If it will wrap around, sync to the end of the ring. */	\
    355 	if ((__x + __n) > WM_NTXDESC) {					\
    356 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    357 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    358 		    (WM_NTXDESC - __x), (ops));				\
    359 		__n -= (WM_NTXDESC - __x);				\
    360 		__x = 0;						\
    361 	}								\
    362 									\
    363 	/* Now sync whatever is left. */				\
    364 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    365 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    366 } while (/*CONSTCOND*/0)
    367 
    368 #define	WM_CDRXSYNC(sc, x, ops)						\
    369 do {									\
    370 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    371 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    372 } while (/*CONSTCOND*/0)
    373 
    374 #define	WM_INIT_RXDESC(sc, x)						\
    375 do {									\
    376 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    377 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    378 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    379 									\
    380 	/*								\
    381 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    382 	 * so that the payload after the Ethernet header is aligned	\
    383 	 * to a 4-byte boundary.					\
    384 	 *								\
    385 	 * XXX BRAINDAMAGE ALERT!					\
    386 	 * The stupid chip uses the same size for every buffer, which	\
    387 	 * is set in the Receive Control register.  We are using the 2K	\
    388 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    389 	 * reason, we can't "scoot" packets longer than the standard	\
    390 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    391 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    392 	 * the upper layer copy the headers.				\
    393 	 */								\
    394 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    395 									\
    396 	__rxd->wrx_addr.wa_low =					\
    397 	    htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 		\
    398 		(sc)->sc_align_tweak);					\
    399 	__rxd->wrx_addr.wa_high = 0;					\
    400 	__rxd->wrx_len = 0;						\
    401 	__rxd->wrx_cksum = 0;						\
    402 	__rxd->wrx_status = 0;						\
    403 	__rxd->wrx_errors = 0;						\
    404 	__rxd->wrx_special = 0;						\
    405 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    406 									\
    407 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    408 } while (/*CONSTCOND*/0)
    409 
    410 static void	wm_start(struct ifnet *);
    411 static void	wm_watchdog(struct ifnet *);
    412 static int	wm_ioctl(struct ifnet *, u_long, caddr_t);
    413 static int	wm_init(struct ifnet *);
    414 static void	wm_stop(struct ifnet *, int);
    415 
    416 static void	wm_shutdown(void *);
    417 
    418 static void	wm_reset(struct wm_softc *);
    419 static void	wm_rxdrain(struct wm_softc *);
    420 static int	wm_add_rxbuf(struct wm_softc *, int);
    421 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
    422 static void	wm_tick(void *);
    423 
    424 static void	wm_set_filter(struct wm_softc *);
    425 
    426 static int	wm_intr(void *);
    427 static void	wm_txintr(struct wm_softc *);
    428 static void	wm_rxintr(struct wm_softc *);
    429 static void	wm_linkintr(struct wm_softc *, uint32_t);
    430 
    431 static void	wm_tbi_mediainit(struct wm_softc *);
    432 static int	wm_tbi_mediachange(struct ifnet *);
    433 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    434 
    435 static void	wm_tbi_set_linkled(struct wm_softc *);
    436 static void	wm_tbi_check_link(struct wm_softc *);
    437 
    438 static void	wm_gmii_reset(struct wm_softc *);
    439 
    440 static int	wm_gmii_i82543_readreg(struct device *, int, int);
    441 static void	wm_gmii_i82543_writereg(struct device *, int, int, int);
    442 
    443 static int	wm_gmii_i82544_readreg(struct device *, int, int);
    444 static void	wm_gmii_i82544_writereg(struct device *, int, int, int);
    445 
    446 static void	wm_gmii_statchg(struct device *);
    447 
    448 static void	wm_gmii_mediainit(struct wm_softc *);
    449 static int	wm_gmii_mediachange(struct ifnet *);
    450 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    451 
    452 static int	wm_match(struct device *, struct cfdata *, void *);
    453 static void	wm_attach(struct device *, struct device *, void *);
    454 
    455 CFATTACH_DECL(wm, sizeof(struct wm_softc),
    456     wm_match, wm_attach, NULL, NULL);
    457 
    458 /*
    459  * Devices supported by this driver.
    460  */
    461 const struct wm_product {
    462 	pci_vendor_id_t		wmp_vendor;
    463 	pci_product_id_t	wmp_product;
    464 	const char		*wmp_name;
    465 	wm_chip_type		wmp_type;
    466 	int			wmp_flags;
    467 #define	WMP_F_1000X		0x01
    468 #define	WMP_F_1000T		0x02
    469 } wm_products[] = {
    470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    471 	  "Intel i82542 1000BASE-X Ethernet",
    472 	  WM_T_82542_2_1,	WMP_F_1000X },
    473 
    474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    475 	  "Intel i82543GC 1000BASE-X Ethernet",
    476 	  WM_T_82543,		WMP_F_1000X },
    477 
    478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    479 	  "Intel i82543GC 1000BASE-T Ethernet",
    480 	  WM_T_82543,		WMP_F_1000T },
    481 
    482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    483 	  "Intel i82544EI 1000BASE-T Ethernet",
    484 	  WM_T_82544,		WMP_F_1000T },
    485 
    486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    487 	  "Intel i82544EI 1000BASE-X Ethernet",
    488 	  WM_T_82544,		WMP_F_1000X },
    489 
    490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    491 	  "Intel i82544GC 1000BASE-T Ethernet",
    492 	  WM_T_82544,		WMP_F_1000T },
    493 
    494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    495 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    496 	  WM_T_82544,		WMP_F_1000T },
    497 
    498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    499 	  "Intel i82540EM 1000BASE-T Ethernet",
    500 	  WM_T_82540,		WMP_F_1000T },
    501 
    502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    503 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    504 	  WM_T_82540,		WMP_F_1000T },
    505 
    506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    507 	  "Intel i82540EP 1000BASE-T Ethernet",
    508 	  WM_T_82540,		WMP_F_1000T },
    509 
    510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    511 	  "Intel i82540EP 1000BASE-T Ethernet",
    512 	  WM_T_82540,		WMP_F_1000T },
    513 
    514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    515 	  "Intel i82540EP 1000BASE-T Ethernet",
    516 	  WM_T_82540,		WMP_F_1000T },
    517 
    518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    519 	  "Intel i82545EM 1000BASE-T Ethernet",
    520 	  WM_T_82545,		WMP_F_1000T },
    521 
    522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    523 	  "Intel i82545GM 1000BASE-T Ethernet",
    524 	  WM_T_82545_3,		WMP_F_1000T },
    525 
    526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    527 	  "Intel i82545GM 1000BASE-X Ethernet",
    528 	  WM_T_82545_3,		WMP_F_1000X },
    529 #if 0
    530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    531 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    532 	  WM_T_82545_3,		WMP_F_SERDES },
    533 #endif
    534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    535 	  "Intel i82546EB 1000BASE-T Ethernet",
    536 	  WM_T_82546,		WMP_F_1000T },
    537 
    538 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
    539 	  "Intel i82546EB 1000BASE-T Ethernet",
    540 	  WM_T_82546,		WMP_F_1000T },
    541 
    542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    543 	  "Intel i82545EM 1000BASE-X Ethernet",
    544 	  WM_T_82545,		WMP_F_1000X },
    545 
    546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    547 	  "Intel i82546EB 1000BASE-X Ethernet",
    548 	  WM_T_82546,		WMP_F_1000X },
    549 
    550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    551 	  "Intel i82546GB 1000BASE-T Ethernet",
    552 	  WM_T_82546_3,		WMP_F_1000T },
    553 
    554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    555 	  "Intel i82546GB 1000BASE-X Ethernet",
    556 	  WM_T_82546_3,		WMP_F_1000X },
    557 #if 0
    558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    559 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    560 	  WM_T_82546_3,		WMP_F_SERDES },
    561 #endif
    562 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    563 	  "Intel i82541EI 1000BASE-T Ethernet",
    564 	  WM_T_82541,		WMP_F_1000T },
    565 
    566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    567 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    568 	  WM_T_82541,		WMP_F_1000T },
    569 
    570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    571 	  "Intel i82541ER 1000BASE-T Ethernet",
    572 	  WM_T_82541_2,		WMP_F_1000T },
    573 
    574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    575 	  "Intel i82541GI 1000BASE-T Ethernet",
    576 	  WM_T_82541_2,		WMP_F_1000T },
    577 
    578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    579 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    580 	  WM_T_82541_2,		WMP_F_1000T },
    581 
    582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    583 	  "Intel i82547EI 1000BASE-T Ethernet",
    584 	  WM_T_82547,		WMP_F_1000T },
    585 
    586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    587 	  "Intel i82547GI 1000BASE-T Ethernet",
    588 	  WM_T_82547_2,		WMP_F_1000T },
    589 	{ 0,			0,
    590 	  NULL,
    591 	  0,			0 },
    592 };
    593 
    594 #ifdef WM_EVENT_COUNTERS
    595 #if WM_NTXSEGS != 40
    596 #error Update wm_txseg_evcnt_names
    597 #endif
    598 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
    599 	"txseg1",
    600 	"txseg2",
    601 	"txseg3",
    602 	"txseg4",
    603 	"txseg5",
    604 	"txseg6",
    605 	"txseg7",
    606 	"txseg8",
    607 	"txseg9",
    608 	"txseg10",
    609 	"txseg11",
    610 	"txseg12",
    611 	"txseg13",
    612 	"txseg14",
    613 	"txseg15",
    614 	"txseg16",
    615 	"txseg17",
    616 	"txseg18",
    617 	"txseg19",
    618 	"txseg21",
    619 	"txseg22",
    620 	"txseg23",
    621 	"txseg24",
    622 	"txseg25",
    623 	"txseg26",
    624 	"txseg27",
    625 	"txseg28",
    626 	"txseg29",
    627 	"txseg30",
    628 	"txseg31",
    629 	"txseg32",
    630 	"txseg33",
    631 	"txseg34",
    632 	"txseg35",
    633 	"txseg36",
    634 	"txseg37",
    635 	"txseg38",
    636 	"txseg39",
    637 	"txseg40",
    638 };
    639 #endif /* WM_EVENT_COUNTERS */
    640 
    641 #if 0 /* Not currently used */
    642 static __inline uint32_t
    643 wm_io_read(struct wm_softc *sc, int reg)
    644 {
    645 
    646 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    647 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
    648 }
    649 #endif
    650 
    651 static __inline void
    652 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
    653 {
    654 
    655 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    656 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
    657 }
    658 
    659 static const struct wm_product *
    660 wm_lookup(const struct pci_attach_args *pa)
    661 {
    662 	const struct wm_product *wmp;
    663 
    664 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
    665 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
    666 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
    667 			return (wmp);
    668 	}
    669 	return (NULL);
    670 }
    671 
    672 static int
    673 wm_match(struct device *parent, struct cfdata *cf, void *aux)
    674 {
    675 	struct pci_attach_args *pa = aux;
    676 
    677 	if (wm_lookup(pa) != NULL)
    678 		return (1);
    679 
    680 	return (0);
    681 }
    682 
    683 static void
    684 wm_attach(struct device *parent, struct device *self, void *aux)
    685 {
    686 	struct wm_softc *sc = (void *) self;
    687 	struct pci_attach_args *pa = aux;
    688 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    689 	pci_chipset_tag_t pc = pa->pa_pc;
    690 	pci_intr_handle_t ih;
    691 	const char *intrstr = NULL;
    692 	const char *eetype;
    693 	bus_space_tag_t memt;
    694 	bus_space_handle_t memh;
    695 	bus_dma_segment_t seg;
    696 	int memh_valid;
    697 	int i, rseg, error;
    698 	const struct wm_product *wmp;
    699 	uint8_t enaddr[ETHER_ADDR_LEN];
    700 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
    701 	pcireg_t preg, memtype;
    702 	uint32_t reg;
    703 	int pmreg;
    704 
    705 	callout_init(&sc->sc_tick_ch);
    706 
    707 	wmp = wm_lookup(pa);
    708 	if (wmp == NULL) {
    709 		printf("\n");
    710 		panic("wm_attach: impossible");
    711 	}
    712 
    713 	sc->sc_dmat = pa->pa_dmat;
    714 
    715 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
    716 	aprint_naive(": Ethernet controller\n");
    717 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
    718 
    719 	sc->sc_type = wmp->wmp_type;
    720 	if (sc->sc_type < WM_T_82543) {
    721 		if (preg < 2) {
    722 			aprint_error("%s: i82542 must be at least rev. 2\n",
    723 			    sc->sc_dev.dv_xname);
    724 			return;
    725 		}
    726 		if (preg < 3)
    727 			sc->sc_type = WM_T_82542_2_0;
    728 	}
    729 
    730 	/*
    731 	 * Map the device.  All devices support memory-mapped acccess,
    732 	 * and it is really required for normal operation.
    733 	 */
    734 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
    735 	switch (memtype) {
    736 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
    737 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
    738 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
    739 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
    740 		break;
    741 	default:
    742 		memh_valid = 0;
    743 	}
    744 
    745 	if (memh_valid) {
    746 		sc->sc_st = memt;
    747 		sc->sc_sh = memh;
    748 	} else {
    749 		aprint_error("%s: unable to map device registers\n",
    750 		    sc->sc_dev.dv_xname);
    751 		return;
    752 	}
    753 
    754 	/*
    755 	 * In addition, i82544 and later support I/O mapped indirect
    756 	 * register access.  It is not desirable (nor supported in
    757 	 * this driver) to use it for normal operation, though it is
    758 	 * required to work around bugs in some chip versions.
    759 	 */
    760 	if (sc->sc_type >= WM_T_82544) {
    761 		/* First we have to find the I/O BAR. */
    762 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
    763 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
    764 			    PCI_MAPREG_TYPE_IO)
    765 				break;
    766 		}
    767 		if (i == PCI_MAPREG_END)
    768 			aprint_error("%s: WARNING: unable to find I/O BAR\n",
    769 			    sc->sc_dev.dv_xname);
    770 		else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
    771 					0, &sc->sc_iot, &sc->sc_ioh,
    772 					NULL, NULL) == 0)
    773 			sc->sc_flags |= WM_F_IOH_VALID;
    774 		else
    775 			aprint_error("%s: WARNING: unable to map I/O space\n",
    776 			    sc->sc_dev.dv_xname);
    777 	}
    778 
    779 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
    780 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    781 	preg |= PCI_COMMAND_MASTER_ENABLE;
    782 	if (sc->sc_type < WM_T_82542_2_1)
    783 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
    784 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
    785 
    786 	/* Get it out of power save mode, if needed. */
    787 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
    788 		preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
    789 		    PCI_PMCSR_STATE_MASK;
    790 		if (preg == PCI_PMCSR_STATE_D3) {
    791 			/*
    792 			 * The card has lost all configuration data in
    793 			 * this state, so punt.
    794 			 */
    795 			aprint_error("%s: unable to wake from power state D3\n",
    796 			    sc->sc_dev.dv_xname);
    797 			return;
    798 		}
    799 		if (preg != PCI_PMCSR_STATE_D0) {
    800 			aprint_normal("%s: waking up from power state D%d\n",
    801 			    sc->sc_dev.dv_xname, preg);
    802 			pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
    803 			    PCI_PMCSR_STATE_D0);
    804 		}
    805 	}
    806 
    807 	/*
    808 	 * Map and establish our interrupt.
    809 	 */
    810 	if (pci_intr_map(pa, &ih)) {
    811 		aprint_error("%s: unable to map interrupt\n",
    812 		    sc->sc_dev.dv_xname);
    813 		return;
    814 	}
    815 	intrstr = pci_intr_string(pc, ih);
    816 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
    817 	if (sc->sc_ih == NULL) {
    818 		aprint_error("%s: unable to establish interrupt",
    819 		    sc->sc_dev.dv_xname);
    820 		if (intrstr != NULL)
    821 			aprint_normal(" at %s", intrstr);
    822 		aprint_normal("\n");
    823 		return;
    824 	}
    825 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
    826 
    827 	/*
    828 	 * Determine a few things about the bus we're connected to.
    829 	 */
    830 	if (sc->sc_type < WM_T_82543) {
    831 		/* We don't really know the bus characteristics here. */
    832 		sc->sc_bus_speed = 33;
    833 	} else  {
    834 		reg = CSR_READ(sc, WMREG_STATUS);
    835 		if (reg & STATUS_BUS64)
    836 			sc->sc_flags |= WM_F_BUS64;
    837 		if (sc->sc_type >= WM_T_82544 &&
    838 		    (reg & STATUS_PCIX_MODE) != 0) {
    839 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
    840 
    841 			sc->sc_flags |= WM_F_PCIX;
    842 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
    843 					       PCI_CAP_PCIX,
    844 					       &sc->sc_pcix_offset, NULL) == 0)
    845 				aprint_error("%s: unable to find PCIX "
    846 				    "capability\n", sc->sc_dev.dv_xname);
    847 			else if (sc->sc_type != WM_T_82545_3 &&
    848 				 sc->sc_type != WM_T_82546_3) {
    849 				/*
    850 				 * Work around a problem caused by the BIOS
    851 				 * setting the max memory read byte count
    852 				 * incorrectly.
    853 				 */
    854 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
    855 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
    856 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
    857 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
    858 
    859 				bytecnt =
    860 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
    861 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
    862 				maxb =
    863 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
    864 				    PCI_PCIX_STATUS_MAXB_SHIFT;
    865 				if (bytecnt > maxb) {
    866 					aprint_verbose("%s: resetting PCI-X "
    867 					    "MMRBC: %d -> %d\n",
    868 					    sc->sc_dev.dv_xname,
    869 					    512 << bytecnt, 512 << maxb);
    870 					pcix_cmd = (pcix_cmd &
    871 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
    872 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
    873 					pci_conf_write(pa->pa_pc, pa->pa_tag,
    874 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
    875 					    pcix_cmd);
    876 				}
    877 			}
    878 		}
    879 		/*
    880 		 * The quad port adapter is special; it has a PCIX-PCIX
    881 		 * bridge on the board, and can run the secondary bus at
    882 		 * a higher speed.
    883 		 */
    884 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
    885 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
    886 								      : 66;
    887 		} else if (sc->sc_flags & WM_F_PCIX) {
    888 			switch (reg & STATUS_PCIXSPD_MASK) {
    889 			case STATUS_PCIXSPD_50_66:
    890 				sc->sc_bus_speed = 66;
    891 				break;
    892 			case STATUS_PCIXSPD_66_100:
    893 				sc->sc_bus_speed = 100;
    894 				break;
    895 			case STATUS_PCIXSPD_100_133:
    896 				sc->sc_bus_speed = 133;
    897 				break;
    898 			default:
    899 				aprint_error(
    900 				    "%s: unknown PCIXSPD %d; assuming 66MHz\n",
    901 				    sc->sc_dev.dv_xname,
    902 				    reg & STATUS_PCIXSPD_MASK);
    903 				sc->sc_bus_speed = 66;
    904 			}
    905 		} else
    906 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
    907 		aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
    908 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
    909 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
    910 	}
    911 
    912 	/*
    913 	 * Allocate the control data structures, and create and load the
    914 	 * DMA map for it.
    915 	 */
    916 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    917 	    sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
    918 	    0)) != 0) {
    919 		aprint_error(
    920 		    "%s: unable to allocate control data, error = %d\n",
    921 		    sc->sc_dev.dv_xname, error);
    922 		goto fail_0;
    923 	}
    924 
    925 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    926 	    sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
    927 	    0)) != 0) {
    928 		aprint_error("%s: unable to map control data, error = %d\n",
    929 		    sc->sc_dev.dv_xname, error);
    930 		goto fail_1;
    931 	}
    932 
    933 	if ((error = bus_dmamap_create(sc->sc_dmat,
    934 	    sizeof(struct wm_control_data), 1,
    935 	    sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
    936 		aprint_error("%s: unable to create control data DMA map, "
    937 		    "error = %d\n", sc->sc_dev.dv_xname, error);
    938 		goto fail_2;
    939 	}
    940 
    941 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
    942 	    sc->sc_control_data, sizeof(struct wm_control_data), NULL,
    943 	    0)) != 0) {
    944 		aprint_error(
    945 		    "%s: unable to load control data DMA map, error = %d\n",
    946 		    sc->sc_dev.dv_xname, error);
    947 		goto fail_3;
    948 	}
    949 
    950 	/*
    951 	 * Create the transmit buffer DMA maps.
    952 	 */
    953 	for (i = 0; i < WM_TXQUEUELEN; i++) {
    954 		if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
    955 		    WM_NTXSEGS, MCLBYTES, 0, 0,
    956 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
    957 			aprint_error("%s: unable to create Tx DMA map %d, "
    958 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    959 			goto fail_4;
    960 		}
    961 	}
    962 
    963 	/*
    964 	 * Create the receive buffer DMA maps.
    965 	 */
    966 	for (i = 0; i < WM_NRXDESC; i++) {
    967 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    968 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
    969 			aprint_error("%s: unable to create Rx DMA map %d, "
    970 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    971 			goto fail_5;
    972 		}
    973 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
    974 	}
    975 
    976 	/*
    977 	 * Reset the chip to a known state.
    978 	 */
    979 	wm_reset(sc);
    980 
    981 	/*
    982 	 * Get some information about the EEPROM.
    983 	 */
    984 	if (sc->sc_type >= WM_T_82540)
    985 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
    986 	if (sc->sc_type <= WM_T_82544)
    987 		sc->sc_ee_addrbits = 6;
    988 	else if (sc->sc_type <= WM_T_82546_3) {
    989 		reg = CSR_READ(sc, WMREG_EECD);
    990 		if (reg & EECD_EE_SIZE)
    991 			sc->sc_ee_addrbits = 8;
    992 		else
    993 			sc->sc_ee_addrbits = 6;
    994 	} else if (sc->sc_type <= WM_T_82547_2) {
    995 		reg = CSR_READ(sc, WMREG_EECD);
    996 		if (reg & EECD_EE_TYPE) {
    997 			sc->sc_flags |= WM_F_EEPROM_SPI;
    998 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
    999 		} else
   1000 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
   1001 	} else {
   1002 		/* Assume everything else is SPI. */
   1003 		reg = CSR_READ(sc, WMREG_EECD);
   1004 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1005 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   1006 	}
   1007 	if (sc->sc_flags & WM_F_EEPROM_SPI)
   1008 		eetype = "SPI";
   1009 	else
   1010 		eetype = "MicroWire";
   1011 	aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
   1012 	    sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
   1013 	    sc->sc_ee_addrbits, eetype);
   1014 
   1015 	/*
   1016 	 * Read the Ethernet address from the EEPROM.
   1017 	 */
   1018 	if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
   1019 	    sizeof(myea) / sizeof(myea[0]), myea)) {
   1020 		aprint_error("%s: unable to read Ethernet address\n",
   1021 		    sc->sc_dev.dv_xname);
   1022 		return;
   1023 	}
   1024 	enaddr[0] = myea[0] & 0xff;
   1025 	enaddr[1] = myea[0] >> 8;
   1026 	enaddr[2] = myea[1] & 0xff;
   1027 	enaddr[3] = myea[1] >> 8;
   1028 	enaddr[4] = myea[2] & 0xff;
   1029 	enaddr[5] = myea[2] >> 8;
   1030 
   1031 	/*
   1032 	 * Toggle the LSB of the MAC address on the second port
   1033 	 * of the i82546.
   1034 	 */
   1035 	if (sc->sc_type == WM_T_82546) {
   1036 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
   1037 			enaddr[5] ^= 1;
   1038 	}
   1039 
   1040 	aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
   1041 	    ether_sprintf(enaddr));
   1042 
   1043 	/*
   1044 	 * Read the config info from the EEPROM, and set up various
   1045 	 * bits in the control registers based on their contents.
   1046 	 */
   1047 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
   1048 		aprint_error("%s: unable to read CFG1 from EEPROM\n",
   1049 		    sc->sc_dev.dv_xname);
   1050 		return;
   1051 	}
   1052 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
   1053 		aprint_error("%s: unable to read CFG2 from EEPROM\n",
   1054 		    sc->sc_dev.dv_xname);
   1055 		return;
   1056 	}
   1057 	if (sc->sc_type >= WM_T_82544) {
   1058 		if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
   1059 			aprint_error("%s: unable to read SWDPIN from EEPROM\n",
   1060 			    sc->sc_dev.dv_xname);
   1061 			return;
   1062 		}
   1063 	}
   1064 
   1065 	if (cfg1 & EEPROM_CFG1_ILOS)
   1066 		sc->sc_ctrl |= CTRL_ILOS;
   1067 	if (sc->sc_type >= WM_T_82544) {
   1068 		sc->sc_ctrl |=
   1069 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1070 		    CTRL_SWDPIO_SHIFT;
   1071 		sc->sc_ctrl |=
   1072 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1073 		    CTRL_SWDPINS_SHIFT;
   1074 	} else {
   1075 		sc->sc_ctrl |=
   1076 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1077 		    CTRL_SWDPIO_SHIFT;
   1078 	}
   1079 
   1080 #if 0
   1081 	if (sc->sc_type >= WM_T_82544) {
   1082 		if (cfg1 & EEPROM_CFG1_IPS0)
   1083 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1084 		if (cfg1 & EEPROM_CFG1_IPS1)
   1085 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1086 		sc->sc_ctrl_ext |=
   1087 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1088 		    CTRL_EXT_SWDPIO_SHIFT;
   1089 		sc->sc_ctrl_ext |=
   1090 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1091 		    CTRL_EXT_SWDPINS_SHIFT;
   1092 	} else {
   1093 		sc->sc_ctrl_ext |=
   1094 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1095 		    CTRL_EXT_SWDPIO_SHIFT;
   1096 	}
   1097 #endif
   1098 
   1099 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1100 #if 0
   1101 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1102 #endif
   1103 
   1104 	/*
   1105 	 * Set up some register offsets that are different between
   1106 	 * the i82542 and the i82543 and later chips.
   1107 	 */
   1108 	if (sc->sc_type < WM_T_82543) {
   1109 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1110 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1111 	} else {
   1112 		sc->sc_rdt_reg = WMREG_RDT;
   1113 		sc->sc_tdt_reg = WMREG_TDT;
   1114 	}
   1115 
   1116 	/*
   1117 	 * Determine if we should use flow control.  We should
   1118 	 * always use it, unless we're on a i82542 < 2.1.
   1119 	 */
   1120 	if (sc->sc_type >= WM_T_82542_2_1)
   1121 		sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
   1122 
   1123 	/*
   1124 	 * Determine if we're TBI or GMII mode, and initialize the
   1125 	 * media structures accordingly.
   1126 	 */
   1127 	if (sc->sc_type < WM_T_82543 ||
   1128 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   1129 		if (wmp->wmp_flags & WMP_F_1000T)
   1130 			aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
   1131 			    "product!\n", sc->sc_dev.dv_xname);
   1132 		wm_tbi_mediainit(sc);
   1133 	} else {
   1134 		if (wmp->wmp_flags & WMP_F_1000X)
   1135 			aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
   1136 			    "product!\n", sc->sc_dev.dv_xname);
   1137 		wm_gmii_mediainit(sc);
   1138 	}
   1139 
   1140 	ifp = &sc->sc_ethercom.ec_if;
   1141 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
   1142 	ifp->if_softc = sc;
   1143 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1144 	ifp->if_ioctl = wm_ioctl;
   1145 	ifp->if_start = wm_start;
   1146 	ifp->if_watchdog = wm_watchdog;
   1147 	ifp->if_init = wm_init;
   1148 	ifp->if_stop = wm_stop;
   1149 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   1150 	IFQ_SET_READY(&ifp->if_snd);
   1151 
   1152 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1153 
   1154 	/*
   1155 	 * If we're a i82543 or greater, we can support VLANs.
   1156 	 */
   1157 	if (sc->sc_type >= WM_T_82543)
   1158 		sc->sc_ethercom.ec_capabilities |=
   1159 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
   1160 
   1161 	/*
   1162 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   1163 	 * on i82543 and later.
   1164 	 */
   1165 	if (sc->sc_type >= WM_T_82543)
   1166 		ifp->if_capabilities |=
   1167 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
   1168 
   1169 	/*
   1170 	 * Attach the interface.
   1171 	 */
   1172 	if_attach(ifp);
   1173 	ether_ifattach(ifp, enaddr);
   1174 #if NRND > 0
   1175 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
   1176 	    RND_TYPE_NET, 0);
   1177 #endif
   1178 
   1179 #ifdef WM_EVENT_COUNTERS
   1180 	/* Attach event counters. */
   1181 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   1182 	    NULL, sc->sc_dev.dv_xname, "txsstall");
   1183 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   1184 	    NULL, sc->sc_dev.dv_xname, "txdstall");
   1185 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
   1186 	    NULL, sc->sc_dev.dv_xname, "txforceintr");
   1187 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   1188 	    NULL, sc->sc_dev.dv_xname, "txdw");
   1189 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   1190 	    NULL, sc->sc_dev.dv_xname, "txqe");
   1191 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   1192 	    NULL, sc->sc_dev.dv_xname, "rxintr");
   1193 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   1194 	    NULL, sc->sc_dev.dv_xname, "linkintr");
   1195 
   1196 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   1197 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
   1198 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   1199 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
   1200 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   1201 	    NULL, sc->sc_dev.dv_xname, "txipsum");
   1202 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   1203 	    NULL, sc->sc_dev.dv_xname, "txtusum");
   1204 
   1205 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
   1206 	    NULL, sc->sc_dev.dv_xname, "txctx init");
   1207 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
   1208 	    NULL, sc->sc_dev.dv_xname, "txctx hit");
   1209 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
   1210 	    NULL, sc->sc_dev.dv_xname, "txctx miss");
   1211 
   1212 	for (i = 0; i < WM_NTXSEGS; i++)
   1213 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   1214 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
   1215 
   1216 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   1217 	    NULL, sc->sc_dev.dv_xname, "txdrop");
   1218 
   1219 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   1220 	    NULL, sc->sc_dev.dv_xname, "tu");
   1221 #endif /* WM_EVENT_COUNTERS */
   1222 
   1223 	/*
   1224 	 * Make sure the interface is shutdown during reboot.
   1225 	 */
   1226 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
   1227 	if (sc->sc_sdhook == NULL)
   1228 		aprint_error("%s: WARNING: unable to establish shutdown hook\n",
   1229 		    sc->sc_dev.dv_xname);
   1230 	return;
   1231 
   1232 	/*
   1233 	 * Free any resources we've allocated during the failed attach
   1234 	 * attempt.  Do this in reverse order and fall through.
   1235 	 */
   1236  fail_5:
   1237 	for (i = 0; i < WM_NRXDESC; i++) {
   1238 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   1239 			bus_dmamap_destroy(sc->sc_dmat,
   1240 			    sc->sc_rxsoft[i].rxs_dmamap);
   1241 	}
   1242  fail_4:
   1243 	for (i = 0; i < WM_TXQUEUELEN; i++) {
   1244 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   1245 			bus_dmamap_destroy(sc->sc_dmat,
   1246 			    sc->sc_txsoft[i].txs_dmamap);
   1247 	}
   1248 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   1249  fail_3:
   1250 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   1251  fail_2:
   1252 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
   1253 	    sizeof(struct wm_control_data));
   1254  fail_1:
   1255 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
   1256  fail_0:
   1257 	return;
   1258 }
   1259 
   1260 /*
   1261  * wm_shutdown:
   1262  *
   1263  *	Make sure the interface is stopped at reboot time.
   1264  */
   1265 static void
   1266 wm_shutdown(void *arg)
   1267 {
   1268 	struct wm_softc *sc = arg;
   1269 
   1270 	wm_stop(&sc->sc_ethercom.ec_if, 1);
   1271 }
   1272 
   1273 /*
   1274  * wm_tx_cksum:
   1275  *
   1276  *	Set up TCP/IP checksumming parameters for the
   1277  *	specified packet.
   1278  */
   1279 static int
   1280 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   1281     uint32_t *fieldsp)
   1282 {
   1283 	struct mbuf *m0 = txs->txs_mbuf;
   1284 	struct livengood_tcpip_ctxdesc *t;
   1285 	uint32_t fields = 0, ipcs, tucs;
   1286 	struct ip *ip;
   1287 	struct ether_header *eh;
   1288 	int offset, iphl;
   1289 
   1290 	/*
   1291 	 * XXX It would be nice if the mbuf pkthdr had offset
   1292 	 * fields for the protocol headers.
   1293 	 */
   1294 
   1295 	eh = mtod(m0, struct ether_header *);
   1296 	switch (htons(eh->ether_type)) {
   1297 	case ETHERTYPE_IP:
   1298 		iphl = sizeof(struct ip);
   1299 		offset = ETHER_HDR_LEN;
   1300 		break;
   1301 
   1302 	case ETHERTYPE_VLAN:
   1303 		iphl = sizeof(struct ip);
   1304 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1305 		break;
   1306 
   1307 	default:
   1308 		/*
   1309 		 * Don't support this protocol or encapsulation.
   1310 		 */
   1311 		*fieldsp = 0;
   1312 		*cmdp = 0;
   1313 		return (0);
   1314 	}
   1315 
   1316 	if (m0->m_len < (offset + iphl)) {
   1317 		if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
   1318 			printf("%s: wm_tx_cksum: mbuf allocation failed, "
   1319 			    "packet dropped\n", sc->sc_dev.dv_xname);
   1320 			return (ENOMEM);
   1321 		}
   1322 		m0 = txs->txs_mbuf;
   1323 	}
   1324 
   1325 	ip = (struct ip *) (mtod(m0, caddr_t) + offset);
   1326 	iphl = ip->ip_hl << 2;
   1327 
   1328 	/*
   1329 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   1330 	 * offload feature, if we load the context descriptor, we
   1331 	 * MUST provide valid values for IPCSS and TUCSS fields.
   1332 	 */
   1333 
   1334 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   1335 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   1336 		fields |= htole32(WTX_IXSM);
   1337 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
   1338 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1339 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
   1340 	} else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
   1341 		/* Use the cached value. */
   1342 		ipcs = sc->sc_txctx_ipcs;
   1343 	} else {
   1344 		/* Just initialize it to the likely value anyway. */
   1345 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
   1346 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1347 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
   1348 	}
   1349 
   1350 	offset += iphl;
   1351 
   1352 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1353 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   1354 		fields |= htole32(WTX_TXSM);
   1355 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
   1356 		    WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
   1357 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
   1358 	} else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
   1359 		/* Use the cached value. */
   1360 		tucs = sc->sc_txctx_tucs;
   1361 	} else {
   1362 		/* Just initialize it to a valid TCP context. */
   1363 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
   1364 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   1365 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
   1366 	}
   1367 
   1368 	if (sc->sc_txctx_ipcs == ipcs &&
   1369 	    sc->sc_txctx_tucs == tucs) {
   1370 		/* Cached context is fine. */
   1371 		WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
   1372 	} else {
   1373 		/* Fill in the context descriptor. */
   1374 #ifdef WM_EVENT_COUNTERS
   1375 		if (sc->sc_txctx_ipcs == 0xffffffff &&
   1376 		    sc->sc_txctx_tucs == 0xffffffff)
   1377 			WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
   1378 		else
   1379 			WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
   1380 #endif
   1381 		t = (struct livengood_tcpip_ctxdesc *)
   1382 		    &sc->sc_txdescs[sc->sc_txnext];
   1383 		t->tcpip_ipcs = ipcs;
   1384 		t->tcpip_tucs = tucs;
   1385 		t->tcpip_cmdlen =
   1386 		    htole32(WTX_CMD_DEXT | WTX_DTYP_C);
   1387 		t->tcpip_seg = 0;
   1388 		WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   1389 
   1390 		sc->sc_txctx_ipcs = ipcs;
   1391 		sc->sc_txctx_tucs = tucs;
   1392 
   1393 		sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
   1394 		txs->txs_ndesc++;
   1395 	}
   1396 
   1397 	*cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
   1398 	*fieldsp = fields;
   1399 
   1400 	return (0);
   1401 }
   1402 
   1403 /*
   1404  * wm_start:		[ifnet interface function]
   1405  *
   1406  *	Start packet transmission on the interface.
   1407  */
   1408 static void
   1409 wm_start(struct ifnet *ifp)
   1410 {
   1411 	struct wm_softc *sc = ifp->if_softc;
   1412 	struct mbuf *m0;
   1413 #if 0 /* XXXJRT */
   1414 	struct m_tag *mtag;
   1415 #endif
   1416 	struct wm_txsoft *txs;
   1417 	bus_dmamap_t dmamap;
   1418 	int error, nexttx, lasttx = -1, ofree, seg;
   1419 	uint32_t cksumcmd, cksumfields;
   1420 
   1421 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   1422 		return;
   1423 
   1424 	/*
   1425 	 * Remember the previous number of free descriptors.
   1426 	 */
   1427 	ofree = sc->sc_txfree;
   1428 
   1429 	/*
   1430 	 * Loop through the send queue, setting up transmit descriptors
   1431 	 * until we drain the queue, or use up all available transmit
   1432 	 * descriptors.
   1433 	 */
   1434 	for (;;) {
   1435 		/* Grab a packet off the queue. */
   1436 		IFQ_POLL(&ifp->if_snd, m0);
   1437 		if (m0 == NULL)
   1438 			break;
   1439 
   1440 		DPRINTF(WM_DEBUG_TX,
   1441 		    ("%s: TX: have packet to transmit: %p\n",
   1442 		    sc->sc_dev.dv_xname, m0));
   1443 
   1444 		/* Get a work queue entry. */
   1445 		if (sc->sc_txsfree < WM_TXQUEUE_GC) {
   1446 			wm_txintr(sc);
   1447 			if (sc->sc_txsfree == 0) {
   1448 				DPRINTF(WM_DEBUG_TX,
   1449 				    ("%s: TX: no free job descriptors\n",
   1450 					sc->sc_dev.dv_xname));
   1451 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   1452 				break;
   1453 			}
   1454 		}
   1455 
   1456 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   1457 		dmamap = txs->txs_dmamap;
   1458 
   1459 		/*
   1460 		 * Load the DMA map.  If this fails, the packet either
   1461 		 * didn't fit in the allotted number of segments, or we
   1462 		 * were short on resources.  For the too-many-segments
   1463 		 * case, we simply report an error and drop the packet,
   1464 		 * since we can't sanely copy a jumbo packet to a single
   1465 		 * buffer.
   1466 		 */
   1467 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   1468 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1469 		if (error) {
   1470 			if (error == EFBIG) {
   1471 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   1472 				printf("%s: Tx packet consumes too many "
   1473 				    "DMA segments, dropping...\n",
   1474 				    sc->sc_dev.dv_xname);
   1475 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   1476 				m_freem(m0);
   1477 				continue;
   1478 			}
   1479 			/*
   1480 			 * Short on resources, just stop for now.
   1481 			 */
   1482 			DPRINTF(WM_DEBUG_TX,
   1483 			    ("%s: TX: dmamap load failed: %d\n",
   1484 			    sc->sc_dev.dv_xname, error));
   1485 			break;
   1486 		}
   1487 
   1488 		/*
   1489 		 * Ensure we have enough descriptors free to describe
   1490 		 * the packet.  Note, we always reserve one descriptor
   1491 		 * at the end of the ring due to the semantics of the
   1492 		 * TDT register, plus one more in the event we need
   1493 		 * to re-load checksum offload context.
   1494 		 */
   1495 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
   1496 			/*
   1497 			 * Not enough free descriptors to transmit this
   1498 			 * packet.  We haven't committed anything yet,
   1499 			 * so just unload the DMA map, put the packet
   1500 			 * pack on the queue, and punt.  Notify the upper
   1501 			 * layer that there are no more slots left.
   1502 			 */
   1503 			DPRINTF(WM_DEBUG_TX,
   1504 			    ("%s: TX: need %d descriptors, have %d\n",
   1505 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs,
   1506 			    sc->sc_txfree - 1));
   1507 			ifp->if_flags |= IFF_OACTIVE;
   1508 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   1509 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   1510 			break;
   1511 		}
   1512 
   1513 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   1514 
   1515 		/*
   1516 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   1517 		 */
   1518 
   1519 		/* Sync the DMA map. */
   1520 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   1521 		    BUS_DMASYNC_PREWRITE);
   1522 
   1523 		DPRINTF(WM_DEBUG_TX,
   1524 		    ("%s: TX: packet has %d DMA segments\n",
   1525 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs));
   1526 
   1527 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   1528 
   1529 		/*
   1530 		 * Store a pointer to the packet so that we can free it
   1531 		 * later.
   1532 		 *
   1533 		 * Initially, we consider the number of descriptors the
   1534 		 * packet uses the number of DMA segments.  This may be
   1535 		 * incremented by 1 if we do checksum offload (a descriptor
   1536 		 * is used to set the checksum context).
   1537 		 */
   1538 		txs->txs_mbuf = m0;
   1539 		txs->txs_firstdesc = sc->sc_txnext;
   1540 		txs->txs_ndesc = dmamap->dm_nsegs;
   1541 
   1542 		/*
   1543 		 * Set up checksum offload parameters for
   1544 		 * this packet.
   1545 		 */
   1546 		if (m0->m_pkthdr.csum_flags &
   1547 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1548 			if (wm_tx_cksum(sc, txs, &cksumcmd,
   1549 					&cksumfields) != 0) {
   1550 				/* Error message already displayed. */
   1551 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   1552 				continue;
   1553 			}
   1554 		} else {
   1555 			cksumcmd = 0;
   1556 			cksumfields = 0;
   1557 		}
   1558 
   1559 		cksumcmd |= htole32(WTX_CMD_IDE);
   1560 
   1561 		/*
   1562 		 * Initialize the transmit descriptor.
   1563 		 */
   1564 		for (nexttx = sc->sc_txnext, seg = 0;
   1565 		     seg < dmamap->dm_nsegs;
   1566 		     seg++, nexttx = WM_NEXTTX(nexttx)) {
   1567 			/*
   1568 			 * Note: we currently only use 32-bit DMA
   1569 			 * addresses.
   1570 			 */
   1571 			sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
   1572 			sc->sc_txdescs[nexttx].wtx_addr.wa_low =
   1573 			    htole32(dmamap->dm_segs[seg].ds_addr);
   1574 			sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
   1575 			    htole32(dmamap->dm_segs[seg].ds_len);
   1576 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
   1577 			    cksumfields;
   1578 			lasttx = nexttx;
   1579 
   1580 			DPRINTF(WM_DEBUG_TX,
   1581 			    ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
   1582 			    sc->sc_dev.dv_xname, nexttx,
   1583 			    (uint32_t) dmamap->dm_segs[seg].ds_addr,
   1584 			    (uint32_t) dmamap->dm_segs[seg].ds_len));
   1585 		}
   1586 
   1587 		KASSERT(lasttx != -1);
   1588 
   1589 		/*
   1590 		 * Set up the command byte on the last descriptor of
   1591 		 * the packet.  If we're in the interrupt delay window,
   1592 		 * delay the interrupt.
   1593 		 */
   1594 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1595 		    htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
   1596 
   1597 #if 0 /* XXXJRT */
   1598 		/*
   1599 		 * If VLANs are enabled and the packet has a VLAN tag, set
   1600 		 * up the descriptor to encapsulate the packet for us.
   1601 		 *
   1602 		 * This is only valid on the last descriptor of the packet.
   1603 		 */
   1604 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   1605 		    (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
   1606 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1607 			    htole32(WTX_CMD_VLE);
   1608 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
   1609 			    = htole16(*(u_int *)(mtag + 1) & 0xffff);
   1610 		}
   1611 #endif /* XXXJRT */
   1612 
   1613 		txs->txs_lastdesc = lasttx;
   1614 
   1615 		DPRINTF(WM_DEBUG_TX,
   1616 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
   1617 		    lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
   1618 
   1619 		/* Sync the descriptors we're using. */
   1620 		WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
   1621 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1622 
   1623 		/* Give the packet to the chip. */
   1624 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   1625 
   1626 		DPRINTF(WM_DEBUG_TX,
   1627 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
   1628 
   1629 		DPRINTF(WM_DEBUG_TX,
   1630 		    ("%s: TX: finished transmitting packet, job %d\n",
   1631 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
   1632 
   1633 		/* Advance the tx pointer. */
   1634 		sc->sc_txfree -= txs->txs_ndesc;
   1635 		sc->sc_txnext = nexttx;
   1636 
   1637 		sc->sc_txsfree--;
   1638 		sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
   1639 
   1640 #if NBPFILTER > 0
   1641 		/* Pass the packet to any BPF listeners. */
   1642 		if (ifp->if_bpf)
   1643 			bpf_mtap(ifp->if_bpf, m0);
   1644 #endif /* NBPFILTER > 0 */
   1645 	}
   1646 
   1647 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   1648 		/* No more slots; notify upper layer. */
   1649 		ifp->if_flags |= IFF_OACTIVE;
   1650 	}
   1651 
   1652 	if (sc->sc_txfree != ofree) {
   1653 		/* Set a watchdog timer in case the chip flakes out. */
   1654 		ifp->if_timer = 5;
   1655 	}
   1656 }
   1657 
   1658 /*
   1659  * wm_watchdog:		[ifnet interface function]
   1660  *
   1661  *	Watchdog timer handler.
   1662  */
   1663 static void
   1664 wm_watchdog(struct ifnet *ifp)
   1665 {
   1666 	struct wm_softc *sc = ifp->if_softc;
   1667 
   1668 	/*
   1669 	 * Since we're using delayed interrupts, sweep up
   1670 	 * before we report an error.
   1671 	 */
   1672 	wm_txintr(sc);
   1673 
   1674 	if (sc->sc_txfree != WM_NTXDESC) {
   1675 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   1676 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
   1677 		    sc->sc_txnext);
   1678 		ifp->if_oerrors++;
   1679 
   1680 		/* Reset the interface. */
   1681 		(void) wm_init(ifp);
   1682 	}
   1683 
   1684 	/* Try to get more packets going. */
   1685 	wm_start(ifp);
   1686 }
   1687 
   1688 /*
   1689  * wm_ioctl:		[ifnet interface function]
   1690  *
   1691  *	Handle control requests from the operator.
   1692  */
   1693 static int
   1694 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
   1695 {
   1696 	struct wm_softc *sc = ifp->if_softc;
   1697 	struct ifreq *ifr = (struct ifreq *) data;
   1698 	int s, error;
   1699 
   1700 	s = splnet();
   1701 
   1702 	switch (cmd) {
   1703 	case SIOCSIFMEDIA:
   1704 	case SIOCGIFMEDIA:
   1705 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   1706 		break;
   1707 	default:
   1708 		error = ether_ioctl(ifp, cmd, data);
   1709 		if (error == ENETRESET) {
   1710 			/*
   1711 			 * Multicast list has changed; set the hardware filter
   1712 			 * accordingly.
   1713 			 */
   1714 			wm_set_filter(sc);
   1715 			error = 0;
   1716 		}
   1717 		break;
   1718 	}
   1719 
   1720 	/* Try to get more packets going. */
   1721 	wm_start(ifp);
   1722 
   1723 	splx(s);
   1724 	return (error);
   1725 }
   1726 
   1727 /*
   1728  * wm_intr:
   1729  *
   1730  *	Interrupt service routine.
   1731  */
   1732 static int
   1733 wm_intr(void *arg)
   1734 {
   1735 	struct wm_softc *sc = arg;
   1736 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1737 	uint32_t icr;
   1738 	int wantinit, handled = 0;
   1739 
   1740 	for (wantinit = 0; wantinit == 0;) {
   1741 		icr = CSR_READ(sc, WMREG_ICR);
   1742 		if ((icr & sc->sc_icr) == 0)
   1743 			break;
   1744 
   1745 #if 0 /*NRND > 0*/
   1746 		if (RND_ENABLED(&sc->rnd_source))
   1747 			rnd_add_uint32(&sc->rnd_source, icr);
   1748 #endif
   1749 
   1750 		handled = 1;
   1751 
   1752 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   1753 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   1754 			DPRINTF(WM_DEBUG_RX,
   1755 			    ("%s: RX: got Rx intr 0x%08x\n",
   1756 			    sc->sc_dev.dv_xname,
   1757 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   1758 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   1759 		}
   1760 #endif
   1761 		wm_rxintr(sc);
   1762 
   1763 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   1764 		if (icr & ICR_TXDW) {
   1765 			DPRINTF(WM_DEBUG_TX,
   1766 			    ("%s: TX: got TDXW interrupt\n",
   1767 			    sc->sc_dev.dv_xname));
   1768 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   1769 		}
   1770 #endif
   1771 		wm_txintr(sc);
   1772 
   1773 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   1774 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   1775 			wm_linkintr(sc, icr);
   1776 		}
   1777 
   1778 		if (icr & ICR_RXO) {
   1779 			printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
   1780 			wantinit = 1;
   1781 		}
   1782 	}
   1783 
   1784 	if (handled) {
   1785 		if (wantinit)
   1786 			wm_init(ifp);
   1787 
   1788 		/* Try to get more packets going. */
   1789 		wm_start(ifp);
   1790 	}
   1791 
   1792 	return (handled);
   1793 }
   1794 
   1795 /*
   1796  * wm_txintr:
   1797  *
   1798  *	Helper; handle transmit interrupts.
   1799  */
   1800 static void
   1801 wm_txintr(struct wm_softc *sc)
   1802 {
   1803 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1804 	struct wm_txsoft *txs;
   1805 	uint8_t status;
   1806 	int i;
   1807 
   1808 	ifp->if_flags &= ~IFF_OACTIVE;
   1809 
   1810 	/*
   1811 	 * Go through the Tx list and free mbufs for those
   1812 	 * frames which have been transmitted.
   1813 	 */
   1814 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
   1815 	     i = WM_NEXTTXS(i), sc->sc_txsfree++) {
   1816 		txs = &sc->sc_txsoft[i];
   1817 
   1818 		DPRINTF(WM_DEBUG_TX,
   1819 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
   1820 
   1821 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
   1822 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1823 
   1824 		status = le32toh(sc->sc_txdescs[
   1825 		    txs->txs_lastdesc].wtx_fields.wtxu_bits);
   1826 		if ((status & WTX_ST_DD) == 0) {
   1827 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   1828 			    BUS_DMASYNC_PREREAD);
   1829 			break;
   1830 		}
   1831 
   1832 		DPRINTF(WM_DEBUG_TX,
   1833 		    ("%s: TX: job %d done: descs %d..%d\n",
   1834 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
   1835 		    txs->txs_lastdesc));
   1836 
   1837 		/*
   1838 		 * XXX We should probably be using the statistics
   1839 		 * XXX registers, but I don't know if they exist
   1840 		 * XXX on chips before the i82544.
   1841 		 */
   1842 
   1843 #ifdef WM_EVENT_COUNTERS
   1844 		if (status & WTX_ST_TU)
   1845 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   1846 #endif /* WM_EVENT_COUNTERS */
   1847 
   1848 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   1849 			ifp->if_oerrors++;
   1850 			if (status & WTX_ST_LC)
   1851 				printf("%s: late collision\n",
   1852 				    sc->sc_dev.dv_xname);
   1853 			else if (status & WTX_ST_EC) {
   1854 				ifp->if_collisions += 16;
   1855 				printf("%s: excessive collisions\n",
   1856 				    sc->sc_dev.dv_xname);
   1857 			}
   1858 		} else
   1859 			ifp->if_opackets++;
   1860 
   1861 		sc->sc_txfree += txs->txs_ndesc;
   1862 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   1863 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1864 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1865 		m_freem(txs->txs_mbuf);
   1866 		txs->txs_mbuf = NULL;
   1867 	}
   1868 
   1869 	/* Update the dirty transmit buffer pointer. */
   1870 	sc->sc_txsdirty = i;
   1871 	DPRINTF(WM_DEBUG_TX,
   1872 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
   1873 
   1874 	/*
   1875 	 * If there are no more pending transmissions, cancel the watchdog
   1876 	 * timer.
   1877 	 */
   1878 	if (sc->sc_txsfree == WM_TXQUEUELEN)
   1879 		ifp->if_timer = 0;
   1880 }
   1881 
   1882 /*
   1883  * wm_rxintr:
   1884  *
   1885  *	Helper; handle receive interrupts.
   1886  */
   1887 static void
   1888 wm_rxintr(struct wm_softc *sc)
   1889 {
   1890 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1891 	struct wm_rxsoft *rxs;
   1892 	struct mbuf *m;
   1893 	int i, len;
   1894 	uint8_t status, errors;
   1895 
   1896 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   1897 		rxs = &sc->sc_rxsoft[i];
   1898 
   1899 		DPRINTF(WM_DEBUG_RX,
   1900 		    ("%s: RX: checking descriptor %d\n",
   1901 		    sc->sc_dev.dv_xname, i));
   1902 
   1903 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1904 
   1905 		status = sc->sc_rxdescs[i].wrx_status;
   1906 		errors = sc->sc_rxdescs[i].wrx_errors;
   1907 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   1908 
   1909 		if ((status & WRX_ST_DD) == 0) {
   1910 			/*
   1911 			 * We have processed all of the receive descriptors.
   1912 			 */
   1913 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   1914 			break;
   1915 		}
   1916 
   1917 		if (__predict_false(sc->sc_rxdiscard)) {
   1918 			DPRINTF(WM_DEBUG_RX,
   1919 			    ("%s: RX: discarding contents of descriptor %d\n",
   1920 			    sc->sc_dev.dv_xname, i));
   1921 			WM_INIT_RXDESC(sc, i);
   1922 			if (status & WRX_ST_EOP) {
   1923 				/* Reset our state. */
   1924 				DPRINTF(WM_DEBUG_RX,
   1925 				    ("%s: RX: resetting rxdiscard -> 0\n",
   1926 				    sc->sc_dev.dv_xname));
   1927 				sc->sc_rxdiscard = 0;
   1928 			}
   1929 			continue;
   1930 		}
   1931 
   1932 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1933 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1934 
   1935 		m = rxs->rxs_mbuf;
   1936 
   1937 		/*
   1938 		 * Add a new receive buffer to the ring.
   1939 		 */
   1940 		if (wm_add_rxbuf(sc, i) != 0) {
   1941 			/*
   1942 			 * Failed, throw away what we've done so
   1943 			 * far, and discard the rest of the packet.
   1944 			 */
   1945 			ifp->if_ierrors++;
   1946 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1947 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1948 			WM_INIT_RXDESC(sc, i);
   1949 			if ((status & WRX_ST_EOP) == 0)
   1950 				sc->sc_rxdiscard = 1;
   1951 			if (sc->sc_rxhead != NULL)
   1952 				m_freem(sc->sc_rxhead);
   1953 			WM_RXCHAIN_RESET(sc);
   1954 			DPRINTF(WM_DEBUG_RX,
   1955 			    ("%s: RX: Rx buffer allocation failed, "
   1956 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
   1957 			    sc->sc_rxdiscard ? " (discard)" : ""));
   1958 			continue;
   1959 		}
   1960 
   1961 		WM_RXCHAIN_LINK(sc, m);
   1962 
   1963 		m->m_len = len;
   1964 
   1965 		DPRINTF(WM_DEBUG_RX,
   1966 		    ("%s: RX: buffer at %p len %d\n",
   1967 		    sc->sc_dev.dv_xname, m->m_data, len));
   1968 
   1969 		/*
   1970 		 * If this is not the end of the packet, keep
   1971 		 * looking.
   1972 		 */
   1973 		if ((status & WRX_ST_EOP) == 0) {
   1974 			sc->sc_rxlen += len;
   1975 			DPRINTF(WM_DEBUG_RX,
   1976 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   1977 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
   1978 			continue;
   1979 		}
   1980 
   1981 		/*
   1982 		 * Okay, we have the entire packet now...
   1983 		 */
   1984 		*sc->sc_rxtailp = NULL;
   1985 		m = sc->sc_rxhead;
   1986 		len += sc->sc_rxlen;
   1987 
   1988 		WM_RXCHAIN_RESET(sc);
   1989 
   1990 		DPRINTF(WM_DEBUG_RX,
   1991 		    ("%s: RX: have entire packet, len -> %d\n",
   1992 		    sc->sc_dev.dv_xname, len));
   1993 
   1994 		/*
   1995 		 * If an error occurred, update stats and drop the packet.
   1996 		 */
   1997 		if (errors &
   1998 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   1999 			ifp->if_ierrors++;
   2000 			if (errors & WRX_ER_SE)
   2001 				printf("%s: symbol error\n",
   2002 				    sc->sc_dev.dv_xname);
   2003 			else if (errors & WRX_ER_SEQ)
   2004 				printf("%s: receive sequence error\n",
   2005 				    sc->sc_dev.dv_xname);
   2006 			else if (errors & WRX_ER_CE)
   2007 				printf("%s: CRC error\n",
   2008 				    sc->sc_dev.dv_xname);
   2009 			m_freem(m);
   2010 			continue;
   2011 		}
   2012 
   2013 		/*
   2014 		 * No errors.  Receive the packet.
   2015 		 *
   2016 		 * Note, we have configured the chip to include the
   2017 		 * CRC with every packet.
   2018 		 */
   2019 		m->m_flags |= M_HASFCS;
   2020 		m->m_pkthdr.rcvif = ifp;
   2021 		m->m_pkthdr.len = len;
   2022 
   2023 #if 0 /* XXXJRT */
   2024 		/*
   2025 		 * If VLANs are enabled, VLAN packets have been unwrapped
   2026 		 * for us.  Associate the tag with the packet.
   2027 		 */
   2028 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   2029 		    (status & WRX_ST_VP) != 0) {
   2030 			struct m_tag *vtag;
   2031 
   2032 			vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
   2033 			    M_NOWAIT);
   2034 			if (vtag == NULL) {
   2035 				ifp->if_ierrors++;
   2036 				printf("%s: unable to allocate VLAN tag\n",
   2037 				    sc->sc_dev.dv_xname);
   2038 				m_freem(m);
   2039 				continue;
   2040 			}
   2041 
   2042 			*(u_int *)(vtag + 1) =
   2043 			    le16toh(sc->sc_rxdescs[i].wrx_special);
   2044 		}
   2045 #endif /* XXXJRT */
   2046 
   2047 		/*
   2048 		 * Set up checksum info for this packet.
   2049 		 */
   2050 		if (status & WRX_ST_IPCS) {
   2051 			WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   2052 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   2053 			if (errors & WRX_ER_IPE)
   2054 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   2055 		}
   2056 		if (status & WRX_ST_TCPCS) {
   2057 			/*
   2058 			 * Note: we don't know if this was TCP or UDP,
   2059 			 * so we just set both bits, and expect the
   2060 			 * upper layers to deal.
   2061 			 */
   2062 			WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   2063 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
   2064 			if (errors & WRX_ER_TCPE)
   2065 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   2066 		}
   2067 
   2068 		ifp->if_ipackets++;
   2069 
   2070 #if NBPFILTER > 0
   2071 		/* Pass this up to any BPF listeners. */
   2072 		if (ifp->if_bpf)
   2073 			bpf_mtap(ifp->if_bpf, m);
   2074 #endif /* NBPFILTER > 0 */
   2075 
   2076 		/* Pass it on. */
   2077 		(*ifp->if_input)(ifp, m);
   2078 	}
   2079 
   2080 	/* Update the receive pointer. */
   2081 	sc->sc_rxptr = i;
   2082 
   2083 	DPRINTF(WM_DEBUG_RX,
   2084 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
   2085 }
   2086 
   2087 /*
   2088  * wm_linkintr:
   2089  *
   2090  *	Helper; handle link interrupts.
   2091  */
   2092 static void
   2093 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   2094 {
   2095 	uint32_t status;
   2096 
   2097 	/*
   2098 	 * If we get a link status interrupt on a 1000BASE-T
   2099 	 * device, just fall into the normal MII tick path.
   2100 	 */
   2101 	if (sc->sc_flags & WM_F_HAS_MII) {
   2102 		if (icr & ICR_LSC) {
   2103 			DPRINTF(WM_DEBUG_LINK,
   2104 			    ("%s: LINK: LSC -> mii_tick\n",
   2105 			    sc->sc_dev.dv_xname));
   2106 			mii_tick(&sc->sc_mii);
   2107 		} else if (icr & ICR_RXSEQ) {
   2108 			DPRINTF(WM_DEBUG_LINK,
   2109 			    ("%s: LINK Receive sequence error\n",
   2110 			    sc->sc_dev.dv_xname));
   2111 		}
   2112 		return;
   2113 	}
   2114 
   2115 	/*
   2116 	 * If we are now receiving /C/, check for link again in
   2117 	 * a couple of link clock ticks.
   2118 	 */
   2119 	if (icr & ICR_RXCFG) {
   2120 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   2121 		    sc->sc_dev.dv_xname));
   2122 		sc->sc_tbi_anstate = 2;
   2123 	}
   2124 
   2125 	if (icr & ICR_LSC) {
   2126 		status = CSR_READ(sc, WMREG_STATUS);
   2127 		if (status & STATUS_LU) {
   2128 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   2129 			    sc->sc_dev.dv_xname,
   2130 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   2131 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2132 			if (status & STATUS_FD)
   2133 				sc->sc_tctl |=
   2134 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2135 			else
   2136 				sc->sc_tctl |=
   2137 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2138 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2139 			sc->sc_tbi_linkup = 1;
   2140 		} else {
   2141 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   2142 			    sc->sc_dev.dv_xname));
   2143 			sc->sc_tbi_linkup = 0;
   2144 		}
   2145 		sc->sc_tbi_anstate = 2;
   2146 		wm_tbi_set_linkled(sc);
   2147 	} else if (icr & ICR_RXSEQ) {
   2148 		DPRINTF(WM_DEBUG_LINK,
   2149 		    ("%s: LINK: Receive sequence error\n",
   2150 		    sc->sc_dev.dv_xname));
   2151 	}
   2152 }
   2153 
   2154 /*
   2155  * wm_tick:
   2156  *
   2157  *	One second timer, used to check link status, sweep up
   2158  *	completed transmit jobs, etc.
   2159  */
   2160 static void
   2161 wm_tick(void *arg)
   2162 {
   2163 	struct wm_softc *sc = arg;
   2164 	int s;
   2165 
   2166 	s = splnet();
   2167 
   2168 	if (sc->sc_flags & WM_F_HAS_MII)
   2169 		mii_tick(&sc->sc_mii);
   2170 	else
   2171 		wm_tbi_check_link(sc);
   2172 
   2173 	splx(s);
   2174 
   2175 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2176 }
   2177 
   2178 /*
   2179  * wm_reset:
   2180  *
   2181  *	Reset the i82542 chip.
   2182  */
   2183 static void
   2184 wm_reset(struct wm_softc *sc)
   2185 {
   2186 	int i;
   2187 
   2188 	switch (sc->sc_type) {
   2189 	case WM_T_82544:
   2190 	case WM_T_82540:
   2191 	case WM_T_82545:
   2192 	case WM_T_82546:
   2193 	case WM_T_82541:
   2194 	case WM_T_82541_2:
   2195 		/*
   2196 		 * These chips have a problem with the memory-mapped
   2197 		 * write cycle when issuing the reset, so use I/O-mapped
   2198 		 * access, if possible.
   2199 		 */
   2200 		if (sc->sc_flags & WM_F_IOH_VALID)
   2201 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   2202 		else
   2203 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2204 		break;
   2205 
   2206 	case WM_T_82545_3:
   2207 	case WM_T_82546_3:
   2208 		/* Use the shadow control register on these chips. */
   2209 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   2210 		break;
   2211 
   2212 	default:
   2213 		/* Everything else can safely use the documented method. */
   2214 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2215 		break;
   2216 	}
   2217 	delay(10000);
   2218 
   2219 	for (i = 0; i < 1000; i++) {
   2220 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
   2221 			return;
   2222 		delay(20);
   2223 	}
   2224 
   2225 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
   2226 		printf("%s: WARNING: reset failed to complete\n",
   2227 		    sc->sc_dev.dv_xname);
   2228 }
   2229 
   2230 /*
   2231  * wm_init:		[ifnet interface function]
   2232  *
   2233  *	Initialize the interface.  Must be called at splnet().
   2234  */
   2235 static int
   2236 wm_init(struct ifnet *ifp)
   2237 {
   2238 	struct wm_softc *sc = ifp->if_softc;
   2239 	struct wm_rxsoft *rxs;
   2240 	int i, error = 0;
   2241 	uint32_t reg;
   2242 
   2243 	/*
   2244 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   2245 	 * There is a small but measurable benefit to avoiding the adjusment
   2246 	 * of the descriptor so that the headers are aligned, for normal mtu,
   2247 	 * on such platforms.  One possibility is that the DMA itself is
   2248 	 * slightly more efficient if the front of the entire packet (instead
   2249 	 * of the front of the headers) is aligned.
   2250 	 *
   2251 	 * Note we must always set align_tweak to 0 if we are using
   2252 	 * jumbo frames.
   2253 	 */
   2254 #ifdef __NO_STRICT_ALIGNMENT
   2255 	sc->sc_align_tweak = 0;
   2256 #else
   2257 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   2258 		sc->sc_align_tweak = 0;
   2259 	else
   2260 		sc->sc_align_tweak = 2;
   2261 #endif /* __NO_STRICT_ALIGNMENT */
   2262 
   2263 	/* Cancel any pending I/O. */
   2264 	wm_stop(ifp, 0);
   2265 
   2266 	/* Reset the chip to a known state. */
   2267 	wm_reset(sc);
   2268 
   2269 	/* Initialize the transmit descriptor ring. */
   2270 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
   2271 	WM_CDTXSYNC(sc, 0, WM_NTXDESC,
   2272 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   2273 	sc->sc_txfree = WM_NTXDESC;
   2274 	sc->sc_txnext = 0;
   2275 
   2276 	sc->sc_txctx_ipcs = 0xffffffff;
   2277 	sc->sc_txctx_tucs = 0xffffffff;
   2278 
   2279 	if (sc->sc_type < WM_T_82543) {
   2280 		CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
   2281 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
   2282 		CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
   2283 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   2284 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   2285 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   2286 	} else {
   2287 		CSR_WRITE(sc, WMREG_TBDAH, 0);
   2288 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
   2289 		CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
   2290 		CSR_WRITE(sc, WMREG_TDH, 0);
   2291 		CSR_WRITE(sc, WMREG_TDT, 0);
   2292 		CSR_WRITE(sc, WMREG_TIDV, 128);
   2293 
   2294 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   2295 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   2296 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   2297 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   2298 	}
   2299 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   2300 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   2301 
   2302 	/* Initialize the transmit job descriptors. */
   2303 	for (i = 0; i < WM_TXQUEUELEN; i++)
   2304 		sc->sc_txsoft[i].txs_mbuf = NULL;
   2305 	sc->sc_txsfree = WM_TXQUEUELEN;
   2306 	sc->sc_txsnext = 0;
   2307 	sc->sc_txsdirty = 0;
   2308 
   2309 	/*
   2310 	 * Initialize the receive descriptor and receive job
   2311 	 * descriptor rings.
   2312 	 */
   2313 	if (sc->sc_type < WM_T_82543) {
   2314 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
   2315 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
   2316 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   2317 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   2318 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   2319 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   2320 
   2321 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   2322 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   2323 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   2324 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   2325 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   2326 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   2327 	} else {
   2328 		CSR_WRITE(sc, WMREG_RDBAH, 0);
   2329 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
   2330 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   2331 		CSR_WRITE(sc, WMREG_RDH, 0);
   2332 		CSR_WRITE(sc, WMREG_RDT, 0);
   2333 		CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
   2334 	}
   2335 	for (i = 0; i < WM_NRXDESC; i++) {
   2336 		rxs = &sc->sc_rxsoft[i];
   2337 		if (rxs->rxs_mbuf == NULL) {
   2338 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   2339 				printf("%s: unable to allocate or map rx "
   2340 				    "buffer %d, error = %d\n",
   2341 				    sc->sc_dev.dv_xname, i, error);
   2342 				/*
   2343 				 * XXX Should attempt to run with fewer receive
   2344 				 * XXX buffers instead of just failing.
   2345 				 */
   2346 				wm_rxdrain(sc);
   2347 				goto out;
   2348 			}
   2349 		} else
   2350 			WM_INIT_RXDESC(sc, i);
   2351 	}
   2352 	sc->sc_rxptr = 0;
   2353 	sc->sc_rxdiscard = 0;
   2354 	WM_RXCHAIN_RESET(sc);
   2355 
   2356 	/*
   2357 	 * Clear out the VLAN table -- we don't use it (yet).
   2358 	 */
   2359 	CSR_WRITE(sc, WMREG_VET, 0);
   2360 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   2361 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   2362 
   2363 	/*
   2364 	 * Set up flow-control parameters.
   2365 	 *
   2366 	 * XXX Values could probably stand some tuning.
   2367 	 */
   2368 	if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
   2369 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   2370 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   2371 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   2372 
   2373 		if (sc->sc_type < WM_T_82543) {
   2374 			CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   2375 			CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
   2376 		} else {
   2377 			CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   2378 			CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
   2379 		}
   2380 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   2381 	}
   2382 
   2383 #if 0 /* XXXJRT */
   2384 	/* Deal with VLAN enables. */
   2385 	if (sc->sc_ethercom.ec_nvlans != 0)
   2386 		sc->sc_ctrl |= CTRL_VME;
   2387 	else
   2388 #endif /* XXXJRT */
   2389 		sc->sc_ctrl &= ~CTRL_VME;
   2390 
   2391 	/* Write the control registers. */
   2392 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2393 #if 0
   2394 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2395 #endif
   2396 
   2397 	/*
   2398 	 * Set up checksum offload parameters.
   2399 	 */
   2400 	reg = CSR_READ(sc, WMREG_RXCSUM);
   2401 	if (ifp->if_capenable & IFCAP_CSUM_IPv4)
   2402 		reg |= RXCSUM_IPOFL;
   2403 	else
   2404 		reg &= ~RXCSUM_IPOFL;
   2405 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
   2406 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   2407 	else {
   2408 		reg &= ~RXCSUM_TUOFL;
   2409 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
   2410 			reg &= ~RXCSUM_IPOFL;
   2411 	}
   2412 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   2413 
   2414 	/*
   2415 	 * Set up the interrupt registers.
   2416 	 */
   2417 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   2418 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   2419 	    ICR_RXO | ICR_RXT0;
   2420 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   2421 		sc->sc_icr |= ICR_RXCFG;
   2422 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   2423 
   2424 	/* Set up the inter-packet gap. */
   2425 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   2426 
   2427 #if 0 /* XXXJRT */
   2428 	/* Set the VLAN ethernetype. */
   2429 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   2430 #endif
   2431 
   2432 	/*
   2433 	 * Set up the transmit control register; we start out with
   2434 	 * a collision distance suitable for FDX, but update it whe
   2435 	 * we resolve the media type.
   2436 	 */
   2437 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
   2438 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2439 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2440 
   2441 	/* Set the media. */
   2442 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
   2443 
   2444 	/*
   2445 	 * Set up the receive control register; we actually program
   2446 	 * the register when we set the receive filter.  Use multicast
   2447 	 * address offset type 0.
   2448 	 *
   2449 	 * Only the i82544 has the ability to strip the incoming
   2450 	 * CRC, so we don't enable that feature.
   2451 	 */
   2452 	sc->sc_mchash_type = 0;
   2453 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
   2454 	    RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
   2455 
   2456 	if(MCLBYTES == 2048) {
   2457 		sc->sc_rctl |= RCTL_2k;
   2458 	} else {
   2459 	/*
   2460 	 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
   2461 	 * XXX segments, dropping" -- why?
   2462 	 */
   2463 #if 0
   2464 		if(sc->sc_type >= WM_T_82543) {
   2465 			switch(MCLBYTES) {
   2466 			case 4096:
   2467 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   2468 				break;
   2469 			case 8192:
   2470 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   2471 				break;
   2472 			case 16384:
   2473 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   2474 				break;
   2475 			default:
   2476 				panic("wm_init: MCLBYTES %d unsupported",
   2477 				    MCLBYTES);
   2478 				break;
   2479 			}
   2480 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   2481 #else
   2482 		panic("wm_init: MCLBYTES > 2048 not supported.");
   2483 #endif
   2484 	}
   2485 
   2486 	/* Set the receive filter. */
   2487 	wm_set_filter(sc);
   2488 
   2489 	/* Start the one second link check clock. */
   2490 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2491 
   2492 	/* ...all done! */
   2493 	ifp->if_flags |= IFF_RUNNING;
   2494 	ifp->if_flags &= ~IFF_OACTIVE;
   2495 
   2496  out:
   2497 	if (error)
   2498 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
   2499 	return (error);
   2500 }
   2501 
   2502 /*
   2503  * wm_rxdrain:
   2504  *
   2505  *	Drain the receive queue.
   2506  */
   2507 static void
   2508 wm_rxdrain(struct wm_softc *sc)
   2509 {
   2510 	struct wm_rxsoft *rxs;
   2511 	int i;
   2512 
   2513 	for (i = 0; i < WM_NRXDESC; i++) {
   2514 		rxs = &sc->sc_rxsoft[i];
   2515 		if (rxs->rxs_mbuf != NULL) {
   2516 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2517 			m_freem(rxs->rxs_mbuf);
   2518 			rxs->rxs_mbuf = NULL;
   2519 		}
   2520 	}
   2521 }
   2522 
   2523 /*
   2524  * wm_stop:		[ifnet interface function]
   2525  *
   2526  *	Stop transmission on the interface.
   2527  */
   2528 static void
   2529 wm_stop(struct ifnet *ifp, int disable)
   2530 {
   2531 	struct wm_softc *sc = ifp->if_softc;
   2532 	struct wm_txsoft *txs;
   2533 	int i;
   2534 
   2535 	/* Stop the one second clock. */
   2536 	callout_stop(&sc->sc_tick_ch);
   2537 
   2538 	if (sc->sc_flags & WM_F_HAS_MII) {
   2539 		/* Down the MII. */
   2540 		mii_down(&sc->sc_mii);
   2541 	}
   2542 
   2543 	/* Stop the transmit and receive processes. */
   2544 	CSR_WRITE(sc, WMREG_TCTL, 0);
   2545 	CSR_WRITE(sc, WMREG_RCTL, 0);
   2546 
   2547 	/* Release any queued transmit buffers. */
   2548 	for (i = 0; i < WM_TXQUEUELEN; i++) {
   2549 		txs = &sc->sc_txsoft[i];
   2550 		if (txs->txs_mbuf != NULL) {
   2551 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   2552 			m_freem(txs->txs_mbuf);
   2553 			txs->txs_mbuf = NULL;
   2554 		}
   2555 	}
   2556 
   2557 	if (disable)
   2558 		wm_rxdrain(sc);
   2559 
   2560 	/* Mark the interface as down and cancel the watchdog timer. */
   2561 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2562 	ifp->if_timer = 0;
   2563 }
   2564 
   2565 /*
   2566  * wm_acquire_eeprom:
   2567  *
   2568  *	Perform the EEPROM handshake required on some chips.
   2569  */
   2570 static int
   2571 wm_acquire_eeprom(struct wm_softc *sc)
   2572 {
   2573 	uint32_t reg;
   2574 	int x;
   2575 
   2576 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
   2577 		reg = CSR_READ(sc, WMREG_EECD);
   2578 
   2579 		/* Request EEPROM access. */
   2580 		reg |= EECD_EE_REQ;
   2581 		CSR_WRITE(sc, WMREG_EECD, reg);
   2582 
   2583 		/* ..and wait for it to be granted. */
   2584 		for (x = 0; x < 100; x++) {
   2585 			reg = CSR_READ(sc, WMREG_EECD);
   2586 			if (reg & EECD_EE_GNT)
   2587 				break;
   2588 			delay(5);
   2589 		}
   2590 		if ((reg & EECD_EE_GNT) == 0) {
   2591 			aprint_error("%s: could not acquire EEPROM GNT\n",
   2592 			    sc->sc_dev.dv_xname);
   2593 			reg &= ~EECD_EE_REQ;
   2594 			CSR_WRITE(sc, WMREG_EECD, reg);
   2595 			return (1);
   2596 		}
   2597 	}
   2598 
   2599 	return (0);
   2600 }
   2601 
   2602 /*
   2603  * wm_release_eeprom:
   2604  *
   2605  *	Release the EEPROM mutex.
   2606  */
   2607 static void
   2608 wm_release_eeprom(struct wm_softc *sc)
   2609 {
   2610 	uint32_t reg;
   2611 
   2612 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
   2613 		reg = CSR_READ(sc, WMREG_EECD);
   2614 		reg &= ~EECD_EE_REQ;
   2615 		CSR_WRITE(sc, WMREG_EECD, reg);
   2616 	}
   2617 }
   2618 
   2619 /*
   2620  * wm_eeprom_sendbits:
   2621  *
   2622  *	Send a series of bits to the EEPROM.
   2623  */
   2624 static void
   2625 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   2626 {
   2627 	uint32_t reg;
   2628 	int x;
   2629 
   2630 	reg = CSR_READ(sc, WMREG_EECD);
   2631 
   2632 	for (x = nbits; x > 0; x--) {
   2633 		if (bits & (1U << (x - 1)))
   2634 			reg |= EECD_DI;
   2635 		else
   2636 			reg &= ~EECD_DI;
   2637 		CSR_WRITE(sc, WMREG_EECD, reg);
   2638 		delay(2);
   2639 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2640 		delay(2);
   2641 		CSR_WRITE(sc, WMREG_EECD, reg);
   2642 		delay(2);
   2643 	}
   2644 }
   2645 
   2646 /*
   2647  * wm_eeprom_recvbits:
   2648  *
   2649  *	Receive a series of bits from the EEPROM.
   2650  */
   2651 static void
   2652 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   2653 {
   2654 	uint32_t reg, val;
   2655 	int x;
   2656 
   2657 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   2658 
   2659 	val = 0;
   2660 	for (x = nbits; x > 0; x--) {
   2661 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2662 		delay(2);
   2663 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   2664 			val |= (1U << (x - 1));
   2665 		CSR_WRITE(sc, WMREG_EECD, reg);
   2666 		delay(2);
   2667 	}
   2668 	*valp = val;
   2669 }
   2670 
   2671 /*
   2672  * wm_read_eeprom_uwire:
   2673  *
   2674  *	Read a word from the EEPROM using the MicroWire protocol.
   2675  */
   2676 static int
   2677 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2678 {
   2679 	uint32_t reg, val;
   2680 	int i;
   2681 
   2682 	for (i = 0; i < wordcnt; i++) {
   2683 		/* Clear SK and DI. */
   2684 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   2685 		CSR_WRITE(sc, WMREG_EECD, reg);
   2686 
   2687 		/* Set CHIP SELECT. */
   2688 		reg |= EECD_CS;
   2689 		CSR_WRITE(sc, WMREG_EECD, reg);
   2690 		delay(2);
   2691 
   2692 		/* Shift in the READ command. */
   2693 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   2694 
   2695 		/* Shift in address. */
   2696 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   2697 
   2698 		/* Shift out the data. */
   2699 		wm_eeprom_recvbits(sc, &val, 16);
   2700 		data[i] = val & 0xffff;
   2701 
   2702 		/* Clear CHIP SELECT. */
   2703 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   2704 		CSR_WRITE(sc, WMREG_EECD, reg);
   2705 		delay(2);
   2706 	}
   2707 
   2708 	return (0);
   2709 }
   2710 
   2711 /*
   2712  * wm_spi_eeprom_ready:
   2713  *
   2714  *	Wait for a SPI EEPROM to be ready for commands.
   2715  */
   2716 static int
   2717 wm_spi_eeprom_ready(struct wm_softc *sc)
   2718 {
   2719 	uint32_t val;
   2720 	int usec;
   2721 
   2722 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   2723 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   2724 		wm_eeprom_recvbits(sc, &val, 8);
   2725 		if ((val & SPI_SR_RDY) == 0)
   2726 			break;
   2727 	}
   2728 	if (usec >= SPI_MAX_RETRIES) {
   2729 		aprint_error("%s: EEPROM failed to become ready\n",
   2730 		    sc->sc_dev.dv_xname);
   2731 		return (1);
   2732 	}
   2733 	return (0);
   2734 }
   2735 
   2736 /*
   2737  * wm_read_eeprom_spi:
   2738  *
   2739  *	Read a work from the EEPROM using the SPI protocol.
   2740  */
   2741 static int
   2742 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2743 {
   2744 	uint32_t reg, val;
   2745 	int i;
   2746 	uint8_t opc;
   2747 
   2748 	/* Clear SK and CS. */
   2749 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   2750 	CSR_WRITE(sc, WMREG_EECD, reg);
   2751 	delay(2);
   2752 
   2753 	if (wm_spi_eeprom_ready(sc))
   2754 		return (1);
   2755 
   2756 	/* Toggle CS to flush commands. */
   2757 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   2758 	delay(2);
   2759 	CSR_WRITE(sc, WMREG_EECD, reg);
   2760 	delay(2);
   2761 
   2762 	opc = SPI_OPC_READ;
   2763 	if (sc->sc_ee_addrbits == 8 && word >= 128)
   2764 		opc |= SPI_OPC_A8;
   2765 
   2766 	wm_eeprom_sendbits(sc, opc, 8);
   2767 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
   2768 
   2769 	for (i = 0; i < wordcnt; i++) {
   2770 		wm_eeprom_recvbits(sc, &val, 16);
   2771 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   2772 	}
   2773 
   2774 	/* Raise CS and clear SK. */
   2775 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   2776 	CSR_WRITE(sc, WMREG_EECD, reg);
   2777 	delay(2);
   2778 
   2779 	return (0);
   2780 }
   2781 
   2782 /*
   2783  * wm_read_eeprom:
   2784  *
   2785  *	Read data from the serial EEPROM.
   2786  */
   2787 static int
   2788 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2789 {
   2790 	int rv;
   2791 
   2792 	if (wm_acquire_eeprom(sc))
   2793 		return (1);
   2794 
   2795 	if (sc->sc_flags & WM_F_EEPROM_SPI)
   2796 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
   2797 	else
   2798 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
   2799 
   2800 	wm_release_eeprom(sc);
   2801 	return (rv);
   2802 }
   2803 
   2804 /*
   2805  * wm_add_rxbuf:
   2806  *
   2807  *	Add a receive buffer to the indiciated descriptor.
   2808  */
   2809 static int
   2810 wm_add_rxbuf(struct wm_softc *sc, int idx)
   2811 {
   2812 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   2813 	struct mbuf *m;
   2814 	int error;
   2815 
   2816 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   2817 	if (m == NULL)
   2818 		return (ENOBUFS);
   2819 
   2820 	MCLGET(m, M_DONTWAIT);
   2821 	if ((m->m_flags & M_EXT) == 0) {
   2822 		m_freem(m);
   2823 		return (ENOBUFS);
   2824 	}
   2825 
   2826 	if (rxs->rxs_mbuf != NULL)
   2827 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2828 
   2829 	rxs->rxs_mbuf = m;
   2830 
   2831 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   2832 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   2833 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   2834 	if (error) {
   2835 		printf("%s: unable to load rx DMA map %d, error = %d\n",
   2836 		    sc->sc_dev.dv_xname, idx, error);
   2837 		panic("wm_add_rxbuf");	/* XXX XXX XXX */
   2838 	}
   2839 
   2840 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   2841 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   2842 
   2843 	WM_INIT_RXDESC(sc, idx);
   2844 
   2845 	return (0);
   2846 }
   2847 
   2848 /*
   2849  * wm_set_ral:
   2850  *
   2851  *	Set an entery in the receive address list.
   2852  */
   2853 static void
   2854 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2855 {
   2856 	uint32_t ral_lo, ral_hi;
   2857 
   2858 	if (enaddr != NULL) {
   2859 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2860 		    (enaddr[3] << 24);
   2861 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2862 		ral_hi |= RAL_AV;
   2863 	} else {
   2864 		ral_lo = 0;
   2865 		ral_hi = 0;
   2866 	}
   2867 
   2868 	if (sc->sc_type >= WM_T_82544) {
   2869 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2870 		    ral_lo);
   2871 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2872 		    ral_hi);
   2873 	} else {
   2874 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2875 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2876 	}
   2877 }
   2878 
   2879 /*
   2880  * wm_mchash:
   2881  *
   2882  *	Compute the hash of the multicast address for the 4096-bit
   2883  *	multicast filter.
   2884  */
   2885 static uint32_t
   2886 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2887 {
   2888 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2889 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2890 	uint32_t hash;
   2891 
   2892 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   2893 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   2894 
   2895 	return (hash & 0xfff);
   2896 }
   2897 
   2898 /*
   2899  * wm_set_filter:
   2900  *
   2901  *	Set up the receive filter.
   2902  */
   2903 static void
   2904 wm_set_filter(struct wm_softc *sc)
   2905 {
   2906 	struct ethercom *ec = &sc->sc_ethercom;
   2907 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2908 	struct ether_multi *enm;
   2909 	struct ether_multistep step;
   2910 	bus_addr_t mta_reg;
   2911 	uint32_t hash, reg, bit;
   2912 	int i;
   2913 
   2914 	if (sc->sc_type >= WM_T_82544)
   2915 		mta_reg = WMREG_CORDOVA_MTA;
   2916 	else
   2917 		mta_reg = WMREG_MTA;
   2918 
   2919 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   2920 
   2921 	if (ifp->if_flags & IFF_BROADCAST)
   2922 		sc->sc_rctl |= RCTL_BAM;
   2923 	if (ifp->if_flags & IFF_PROMISC) {
   2924 		sc->sc_rctl |= RCTL_UPE;
   2925 		goto allmulti;
   2926 	}
   2927 
   2928 	/*
   2929 	 * Set the station address in the first RAL slot, and
   2930 	 * clear the remaining slots.
   2931 	 */
   2932 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
   2933 	for (i = 1; i < WM_RAL_TABSIZE; i++)
   2934 		wm_set_ral(sc, NULL, i);
   2935 
   2936 	/* Clear out the multicast table. */
   2937 	for (i = 0; i < WM_MC_TABSIZE; i++)
   2938 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   2939 
   2940 	ETHER_FIRST_MULTI(step, ec, enm);
   2941 	while (enm != NULL) {
   2942 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2943 			/*
   2944 			 * We must listen to a range of multicast addresses.
   2945 			 * For now, just accept all multicasts, rather than
   2946 			 * trying to set only those filter bits needed to match
   2947 			 * the range.  (At this time, the only use of address
   2948 			 * ranges is for IP multicast routing, for which the
   2949 			 * range is big enough to require all bits set.)
   2950 			 */
   2951 			goto allmulti;
   2952 		}
   2953 
   2954 		hash = wm_mchash(sc, enm->enm_addrlo);
   2955 
   2956 		reg = (hash >> 5) & 0x7f;
   2957 		bit = hash & 0x1f;
   2958 
   2959 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   2960 		hash |= 1U << bit;
   2961 
   2962 		/* XXX Hardware bug?? */
   2963 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   2964 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   2965 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2966 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   2967 		} else
   2968 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2969 
   2970 		ETHER_NEXT_MULTI(step, enm);
   2971 	}
   2972 
   2973 	ifp->if_flags &= ~IFF_ALLMULTI;
   2974 	goto setit;
   2975 
   2976  allmulti:
   2977 	ifp->if_flags |= IFF_ALLMULTI;
   2978 	sc->sc_rctl |= RCTL_MPE;
   2979 
   2980  setit:
   2981 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   2982 }
   2983 
   2984 /*
   2985  * wm_tbi_mediainit:
   2986  *
   2987  *	Initialize media for use on 1000BASE-X devices.
   2988  */
   2989 static void
   2990 wm_tbi_mediainit(struct wm_softc *sc)
   2991 {
   2992 	const char *sep = "";
   2993 
   2994 	if (sc->sc_type < WM_T_82543)
   2995 		sc->sc_tipg = TIPG_WM_DFLT;
   2996 	else
   2997 		sc->sc_tipg = TIPG_LG_DFLT;
   2998 
   2999 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   3000 	    wm_tbi_mediastatus);
   3001 
   3002 	/*
   3003 	 * SWD Pins:
   3004 	 *
   3005 	 *	0 = Link LED (output)
   3006 	 *	1 = Loss Of Signal (input)
   3007 	 */
   3008 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   3009 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   3010 
   3011 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3012 
   3013 #define	ADD(ss, mm, dd)							\
   3014 do {									\
   3015 	printf("%s%s", sep, ss);					\
   3016 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   3017 	sep = ", ";							\
   3018 } while (/*CONSTCOND*/0)
   3019 
   3020 	printf("%s: ", sc->sc_dev.dv_xname);
   3021 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   3022 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   3023 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   3024 	printf("\n");
   3025 
   3026 #undef ADD
   3027 
   3028 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   3029 }
   3030 
   3031 /*
   3032  * wm_tbi_mediastatus:	[ifmedia interface function]
   3033  *
   3034  *	Get the current interface media status on a 1000BASE-X device.
   3035  */
   3036 static void
   3037 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   3038 {
   3039 	struct wm_softc *sc = ifp->if_softc;
   3040 
   3041 	ifmr->ifm_status = IFM_AVALID;
   3042 	ifmr->ifm_active = IFM_ETHER;
   3043 
   3044 	if (sc->sc_tbi_linkup == 0) {
   3045 		ifmr->ifm_active |= IFM_NONE;
   3046 		return;
   3047 	}
   3048 
   3049 	ifmr->ifm_status |= IFM_ACTIVE;
   3050 	ifmr->ifm_active |= IFM_1000_SX;
   3051 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   3052 		ifmr->ifm_active |= IFM_FDX;
   3053 }
   3054 
   3055 /*
   3056  * wm_tbi_mediachange:	[ifmedia interface function]
   3057  *
   3058  *	Set hardware to newly-selected media on a 1000BASE-X device.
   3059  */
   3060 static int
   3061 wm_tbi_mediachange(struct ifnet *ifp)
   3062 {
   3063 	struct wm_softc *sc = ifp->if_softc;
   3064 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   3065 	uint32_t status;
   3066 	int i;
   3067 
   3068 	sc->sc_txcw = ife->ifm_data;
   3069 	if (sc->sc_ctrl & CTRL_RFCE)
   3070 		sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
   3071 	if (sc->sc_ctrl & CTRL_TFCE)
   3072 		sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
   3073 	sc->sc_txcw |= TXCW_ANE;
   3074 
   3075 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   3076 	delay(10000);
   3077 
   3078 	sc->sc_tbi_anstate = 0;
   3079 
   3080 	if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
   3081 		/* Have signal; wait for the link to come up. */
   3082 		for (i = 0; i < 50; i++) {
   3083 			delay(10000);
   3084 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   3085 				break;
   3086 		}
   3087 
   3088 		status = CSR_READ(sc, WMREG_STATUS);
   3089 		if (status & STATUS_LU) {
   3090 			/* Link is up. */
   3091 			DPRINTF(WM_DEBUG_LINK,
   3092 			    ("%s: LINK: set media -> link up %s\n",
   3093 			    sc->sc_dev.dv_xname,
   3094 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   3095 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3096 			if (status & STATUS_FD)
   3097 				sc->sc_tctl |=
   3098 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3099 			else
   3100 				sc->sc_tctl |=
   3101 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3102 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3103 			sc->sc_tbi_linkup = 1;
   3104 		} else {
   3105 			/* Link is down. */
   3106 			DPRINTF(WM_DEBUG_LINK,
   3107 			    ("%s: LINK: set media -> link down\n",
   3108 			    sc->sc_dev.dv_xname));
   3109 			sc->sc_tbi_linkup = 0;
   3110 		}
   3111 	} else {
   3112 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   3113 		    sc->sc_dev.dv_xname));
   3114 		sc->sc_tbi_linkup = 0;
   3115 	}
   3116 
   3117 	wm_tbi_set_linkled(sc);
   3118 
   3119 	return (0);
   3120 }
   3121 
   3122 /*
   3123  * wm_tbi_set_linkled:
   3124  *
   3125  *	Update the link LED on 1000BASE-X devices.
   3126  */
   3127 static void
   3128 wm_tbi_set_linkled(struct wm_softc *sc)
   3129 {
   3130 
   3131 	if (sc->sc_tbi_linkup)
   3132 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   3133 	else
   3134 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   3135 
   3136 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3137 }
   3138 
   3139 /*
   3140  * wm_tbi_check_link:
   3141  *
   3142  *	Check the link on 1000BASE-X devices.
   3143  */
   3144 static void
   3145 wm_tbi_check_link(struct wm_softc *sc)
   3146 {
   3147 	uint32_t rxcw, ctrl, status;
   3148 
   3149 	if (sc->sc_tbi_anstate == 0)
   3150 		return;
   3151 	else if (sc->sc_tbi_anstate > 1) {
   3152 		DPRINTF(WM_DEBUG_LINK,
   3153 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
   3154 		    sc->sc_tbi_anstate));
   3155 		sc->sc_tbi_anstate--;
   3156 		return;
   3157 	}
   3158 
   3159 	sc->sc_tbi_anstate = 0;
   3160 
   3161 	rxcw = CSR_READ(sc, WMREG_RXCW);
   3162 	ctrl = CSR_READ(sc, WMREG_CTRL);
   3163 	status = CSR_READ(sc, WMREG_STATUS);
   3164 
   3165 	if ((status & STATUS_LU) == 0) {
   3166 		DPRINTF(WM_DEBUG_LINK,
   3167 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
   3168 		sc->sc_tbi_linkup = 0;
   3169 	} else {
   3170 		DPRINTF(WM_DEBUG_LINK,
   3171 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
   3172 		    (status & STATUS_FD) ? "FDX" : "HDX"));
   3173 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3174 		if (status & STATUS_FD)
   3175 			sc->sc_tctl |=
   3176 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3177 		else
   3178 			sc->sc_tctl |=
   3179 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3180 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3181 		sc->sc_tbi_linkup = 1;
   3182 	}
   3183 
   3184 	wm_tbi_set_linkled(sc);
   3185 }
   3186 
   3187 /*
   3188  * wm_gmii_reset:
   3189  *
   3190  *	Reset the PHY.
   3191  */
   3192 static void
   3193 wm_gmii_reset(struct wm_softc *sc)
   3194 {
   3195 	uint32_t reg;
   3196 
   3197 	if (sc->sc_type >= WM_T_82544) {
   3198 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   3199 		delay(20000);
   3200 
   3201 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3202 		delay(20000);
   3203 	} else {
   3204 		/* The PHY reset pin is active-low. */
   3205 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3206 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   3207 		    CTRL_EXT_SWDPIN(4));
   3208 		reg |= CTRL_EXT_SWDPIO(4);
   3209 
   3210 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   3211 		delay(10);
   3212 
   3213 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3214 		delay(10);
   3215 
   3216 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   3217 		delay(10);
   3218 #if 0
   3219 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   3220 #endif
   3221 	}
   3222 }
   3223 
   3224 /*
   3225  * wm_gmii_mediainit:
   3226  *
   3227  *	Initialize media for use on 1000BASE-T devices.
   3228  */
   3229 static void
   3230 wm_gmii_mediainit(struct wm_softc *sc)
   3231 {
   3232 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3233 
   3234 	/* We have MII. */
   3235 	sc->sc_flags |= WM_F_HAS_MII;
   3236 
   3237 	sc->sc_tipg = TIPG_1000T_DFLT;
   3238 
   3239 	/*
   3240 	 * Let the chip set speed/duplex on its own based on
   3241 	 * signals from the PHY.
   3242 	 */
   3243 	sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
   3244 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3245 
   3246 	/* Initialize our media structures and probe the GMII. */
   3247 	sc->sc_mii.mii_ifp = ifp;
   3248 
   3249 	if (sc->sc_type >= WM_T_82544) {
   3250 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
   3251 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
   3252 	} else {
   3253 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
   3254 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
   3255 	}
   3256 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
   3257 
   3258 	wm_gmii_reset(sc);
   3259 
   3260 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
   3261 	    wm_gmii_mediastatus);
   3262 
   3263 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   3264 	    MII_OFFSET_ANY, 0);
   3265 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
   3266 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   3267 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
   3268 	} else
   3269 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   3270 }
   3271 
   3272 /*
   3273  * wm_gmii_mediastatus:	[ifmedia interface function]
   3274  *
   3275  *	Get the current interface media status on a 1000BASE-T device.
   3276  */
   3277 static void
   3278 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   3279 {
   3280 	struct wm_softc *sc = ifp->if_softc;
   3281 
   3282 	mii_pollstat(&sc->sc_mii);
   3283 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   3284 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   3285 }
   3286 
   3287 /*
   3288  * wm_gmii_mediachange:	[ifmedia interface function]
   3289  *
   3290  *	Set hardware to newly-selected media on a 1000BASE-T device.
   3291  */
   3292 static int
   3293 wm_gmii_mediachange(struct ifnet *ifp)
   3294 {
   3295 	struct wm_softc *sc = ifp->if_softc;
   3296 
   3297 	if (ifp->if_flags & IFF_UP)
   3298 		mii_mediachg(&sc->sc_mii);
   3299 	return (0);
   3300 }
   3301 
   3302 #define	MDI_IO		CTRL_SWDPIN(2)
   3303 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   3304 #define	MDI_CLK		CTRL_SWDPIN(3)
   3305 
   3306 static void
   3307 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   3308 {
   3309 	uint32_t i, v;
   3310 
   3311 	v = CSR_READ(sc, WMREG_CTRL);
   3312 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   3313 	v |= MDI_DIR | CTRL_SWDPIO(3);
   3314 
   3315 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   3316 		if (data & i)
   3317 			v |= MDI_IO;
   3318 		else
   3319 			v &= ~MDI_IO;
   3320 		CSR_WRITE(sc, WMREG_CTRL, v);
   3321 		delay(10);
   3322 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3323 		delay(10);
   3324 		CSR_WRITE(sc, WMREG_CTRL, v);
   3325 		delay(10);
   3326 	}
   3327 }
   3328 
   3329 static uint32_t
   3330 i82543_mii_recvbits(struct wm_softc *sc)
   3331 {
   3332 	uint32_t v, i, data = 0;
   3333 
   3334 	v = CSR_READ(sc, WMREG_CTRL);
   3335 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   3336 	v |= CTRL_SWDPIO(3);
   3337 
   3338 	CSR_WRITE(sc, WMREG_CTRL, v);
   3339 	delay(10);
   3340 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3341 	delay(10);
   3342 	CSR_WRITE(sc, WMREG_CTRL, v);
   3343 	delay(10);
   3344 
   3345 	for (i = 0; i < 16; i++) {
   3346 		data <<= 1;
   3347 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3348 		delay(10);
   3349 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   3350 			data |= 1;
   3351 		CSR_WRITE(sc, WMREG_CTRL, v);
   3352 		delay(10);
   3353 	}
   3354 
   3355 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3356 	delay(10);
   3357 	CSR_WRITE(sc, WMREG_CTRL, v);
   3358 	delay(10);
   3359 
   3360 	return (data);
   3361 }
   3362 
   3363 #undef MDI_IO
   3364 #undef MDI_DIR
   3365 #undef MDI_CLK
   3366 
   3367 /*
   3368  * wm_gmii_i82543_readreg:	[mii interface function]
   3369  *
   3370  *	Read a PHY register on the GMII (i82543 version).
   3371  */
   3372 static int
   3373 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
   3374 {
   3375 	struct wm_softc *sc = (void *) self;
   3376 	int rv;
   3377 
   3378 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   3379 	i82543_mii_sendbits(sc, reg | (phy << 5) |
   3380 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   3381 	rv = i82543_mii_recvbits(sc) & 0xffff;
   3382 
   3383 	DPRINTF(WM_DEBUG_GMII,
   3384 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   3385 	    sc->sc_dev.dv_xname, phy, reg, rv));
   3386 
   3387 	return (rv);
   3388 }
   3389 
   3390 /*
   3391  * wm_gmii_i82543_writereg:	[mii interface function]
   3392  *
   3393  *	Write a PHY register on the GMII (i82543 version).
   3394  */
   3395 static void
   3396 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
   3397 {
   3398 	struct wm_softc *sc = (void *) self;
   3399 
   3400 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   3401 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   3402 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   3403 	    (MII_COMMAND_START << 30), 32);
   3404 }
   3405 
   3406 /*
   3407  * wm_gmii_i82544_readreg:	[mii interface function]
   3408  *
   3409  *	Read a PHY register on the GMII.
   3410  */
   3411 static int
   3412 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
   3413 {
   3414 	struct wm_softc *sc = (void *) self;
   3415 	uint32_t mdic = 0;
   3416 	int i, rv;
   3417 
   3418 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   3419 	    MDIC_REGADD(reg));
   3420 
   3421 	for (i = 0; i < 100; i++) {
   3422 		mdic = CSR_READ(sc, WMREG_MDIC);
   3423 		if (mdic & MDIC_READY)
   3424 			break;
   3425 		delay(10);
   3426 	}
   3427 
   3428 	if ((mdic & MDIC_READY) == 0) {
   3429 		printf("%s: MDIC read timed out: phy %d reg %d\n",
   3430 		    sc->sc_dev.dv_xname, phy, reg);
   3431 		rv = 0;
   3432 	} else if (mdic & MDIC_E) {
   3433 #if 0 /* This is normal if no PHY is present. */
   3434 		printf("%s: MDIC read error: phy %d reg %d\n",
   3435 		    sc->sc_dev.dv_xname, phy, reg);
   3436 #endif
   3437 		rv = 0;
   3438 	} else {
   3439 		rv = MDIC_DATA(mdic);
   3440 		if (rv == 0xffff)
   3441 			rv = 0;
   3442 	}
   3443 
   3444 	return (rv);
   3445 }
   3446 
   3447 /*
   3448  * wm_gmii_i82544_writereg:	[mii interface function]
   3449  *
   3450  *	Write a PHY register on the GMII.
   3451  */
   3452 static void
   3453 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
   3454 {
   3455 	struct wm_softc *sc = (void *) self;
   3456 	uint32_t mdic = 0;
   3457 	int i;
   3458 
   3459 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   3460 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   3461 
   3462 	for (i = 0; i < 100; i++) {
   3463 		mdic = CSR_READ(sc, WMREG_MDIC);
   3464 		if (mdic & MDIC_READY)
   3465 			break;
   3466 		delay(10);
   3467 	}
   3468 
   3469 	if ((mdic & MDIC_READY) == 0)
   3470 		printf("%s: MDIC write timed out: phy %d reg %d\n",
   3471 		    sc->sc_dev.dv_xname, phy, reg);
   3472 	else if (mdic & MDIC_E)
   3473 		printf("%s: MDIC write error: phy %d reg %d\n",
   3474 		    sc->sc_dev.dv_xname, phy, reg);
   3475 }
   3476 
   3477 /*
   3478  * wm_gmii_statchg:	[mii interface function]
   3479  *
   3480  *	Callback from MII layer when media changes.
   3481  */
   3482 static void
   3483 wm_gmii_statchg(struct device *self)
   3484 {
   3485 	struct wm_softc *sc = (void *) self;
   3486 
   3487 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3488 
   3489 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   3490 		DPRINTF(WM_DEBUG_LINK,
   3491 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
   3492 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3493 	} else  {
   3494 		DPRINTF(WM_DEBUG_LINK,
   3495 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
   3496 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3497 	}
   3498 
   3499 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3500 }
   3501