Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.61
      1 /*	$NetBSD: if_wm.c,v 1.61 2003/11/04 19:09:39 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     40  *
     41  * TODO (in order of importance):
     42  *
     43  *	- Rework how parameters are loaded from the EEPROM.
     44  *	- Figure out performance stability issue on i82547 (fvdl).
     45  *	- Figure out what to do with the i82545GM and i82546GB
     46  *	  SERDES controllers.
     47  *	- Fix hw VLAN assist.
     48  */
     49 
     50 #include <sys/cdefs.h>
     51 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.61 2003/11/04 19:09:39 thorpej Exp $");
     52 
     53 #include "bpfilter.h"
     54 #include "rnd.h"
     55 
     56 #include <sys/param.h>
     57 #include <sys/systm.h>
     58 #include <sys/callout.h>
     59 #include <sys/mbuf.h>
     60 #include <sys/malloc.h>
     61 #include <sys/kernel.h>
     62 #include <sys/socket.h>
     63 #include <sys/ioctl.h>
     64 #include <sys/errno.h>
     65 #include <sys/device.h>
     66 #include <sys/queue.h>
     67 
     68 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
     69 
     70 #if NRND > 0
     71 #include <sys/rnd.h>
     72 #endif
     73 
     74 #include <net/if.h>
     75 #include <net/if_dl.h>
     76 #include <net/if_media.h>
     77 #include <net/if_ether.h>
     78 
     79 #if NBPFILTER > 0
     80 #include <net/bpf.h>
     81 #endif
     82 
     83 #include <netinet/in.h>			/* XXX for struct ip */
     84 #include <netinet/in_systm.h>		/* XXX for struct ip */
     85 #include <netinet/ip.h>			/* XXX for struct ip */
     86 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
     87 
     88 #include <machine/bus.h>
     89 #include <machine/intr.h>
     90 #include <machine/endian.h>
     91 
     92 #include <dev/mii/mii.h>
     93 #include <dev/mii/miivar.h>
     94 #include <dev/mii/mii_bitbang.h>
     95 
     96 #include <dev/pci/pcireg.h>
     97 #include <dev/pci/pcivar.h>
     98 #include <dev/pci/pcidevs.h>
     99 
    100 #include <dev/pci/if_wmreg.h>
    101 
    102 #ifdef WM_DEBUG
    103 #define	WM_DEBUG_LINK		0x01
    104 #define	WM_DEBUG_TX		0x02
    105 #define	WM_DEBUG_RX		0x04
    106 #define	WM_DEBUG_GMII		0x08
    107 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
    108 
    109 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    110 #else
    111 #define	DPRINTF(x, y)	/* nothing */
    112 #endif /* WM_DEBUG */
    113 
    114 /*
    115  * Transmit descriptor list size.  Due to errata, we can only have
    116  * 256 hardware descriptors in the ring.  We tell the upper layers
    117  * that they can queue a lot of packets, and we go ahead and manage
    118  * up to 64 of them at a time.  We allow up to 16 DMA segments per
    119  * packet.
    120  */
    121 #define	WM_NTXSEGS		16
    122 #define	WM_IFQUEUELEN		256
    123 #define	WM_TXQUEUELEN		64
    124 #define	WM_TXQUEUELEN_MASK	(WM_TXQUEUELEN - 1)
    125 #define	WM_TXQUEUE_GC		(WM_TXQUEUELEN / 8)
    126 #define	WM_NTXDESC		256
    127 #define	WM_NTXDESC_MASK		(WM_NTXDESC - 1)
    128 #define	WM_NEXTTX(x)		(((x) + 1) & WM_NTXDESC_MASK)
    129 #define	WM_NEXTTXS(x)		(((x) + 1) & WM_TXQUEUELEN_MASK)
    130 
    131 /*
    132  * Receive descriptor list size.  We have one Rx buffer for normal
    133  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    134  * packet.  We allocate 256 receive descriptors, each with a 2k
    135  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    136  */
    137 #define	WM_NRXDESC		256
    138 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    139 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    140 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    141 
    142 /*
    143  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    144  * a single clump that maps to a single DMA segment to make serveral things
    145  * easier.
    146  */
    147 struct wm_control_data {
    148 	/*
    149 	 * The transmit descriptors.
    150 	 */
    151 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
    152 
    153 	/*
    154 	 * The receive descriptors.
    155 	 */
    156 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    157 };
    158 
    159 #define	WM_CDOFF(x)	offsetof(struct wm_control_data, x)
    160 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
    161 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    162 
    163 /*
    164  * Software state for transmit jobs.
    165  */
    166 struct wm_txsoft {
    167 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    168 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    169 	int txs_firstdesc;		/* first descriptor in packet */
    170 	int txs_lastdesc;		/* last descriptor in packet */
    171 	int txs_ndesc;			/* # of descriptors used */
    172 };
    173 
    174 /*
    175  * Software state for receive buffers.  Each descriptor gets a
    176  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    177  * more than one buffer, we chain them together.
    178  */
    179 struct wm_rxsoft {
    180 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    181 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    182 };
    183 
    184 typedef enum {
    185 	WM_T_unknown		= 0,
    186 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
    187 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
    188 	WM_T_82543,			/* i82543 */
    189 	WM_T_82544,			/* i82544 */
    190 	WM_T_82540,			/* i82540 */
    191 	WM_T_82545,			/* i82545 */
    192 	WM_T_82545_3,			/* i82545 3.0+ */
    193 	WM_T_82546,			/* i82546 */
    194 	WM_T_82546_3,			/* i82546 3.0+ */
    195 	WM_T_82541,			/* i82541 */
    196 	WM_T_82541_2,			/* i82541 2.0+ */
    197 	WM_T_82547,			/* i82547 */
    198 	WM_T_82547_2,			/* i82547 2.0+ */
    199 } wm_chip_type;
    200 
    201 /*
    202  * Software state per device.
    203  */
    204 struct wm_softc {
    205 	struct device sc_dev;		/* generic device information */
    206 	bus_space_tag_t sc_st;		/* bus space tag */
    207 	bus_space_handle_t sc_sh;	/* bus space handle */
    208 	bus_space_tag_t sc_iot;		/* I/O space tag */
    209 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    210 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    211 	struct ethercom sc_ethercom;	/* ethernet common data */
    212 	void *sc_sdhook;		/* shutdown hook */
    213 
    214 	wm_chip_type sc_type;		/* chip type */
    215 	int sc_flags;			/* flags; see below */
    216 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    217 	int sc_pcix_offset;		/* PCIX capability register offset */
    218 
    219 	void *sc_ih;			/* interrupt cookie */
    220 
    221 	int sc_ee_addrbits;		/* EEPROM address bits */
    222 
    223 	struct mii_data sc_mii;		/* MII/media information */
    224 
    225 	struct callout sc_tick_ch;	/* tick callout */
    226 
    227 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    228 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    229 
    230 	int		sc_align_tweak;
    231 
    232 	/*
    233 	 * Software state for the transmit and receive descriptors.
    234 	 */
    235 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
    236 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    237 
    238 	/*
    239 	 * Control data structures.
    240 	 */
    241 	struct wm_control_data *sc_control_data;
    242 #define	sc_txdescs	sc_control_data->wcd_txdescs
    243 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    244 
    245 #ifdef WM_EVENT_COUNTERS
    246 	/* Event counters. */
    247 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    248 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    249 	struct evcnt sc_ev_txforceintr;	/* Tx interrupts forced */
    250 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    251 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    252 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    253 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    254 
    255 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    256 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    257 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    258 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    259 
    260 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
    261 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
    262 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
    263 
    264 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    265 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    266 
    267 	struct evcnt sc_ev_tu;		/* Tx underrun */
    268 #endif /* WM_EVENT_COUNTERS */
    269 
    270 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    271 
    272 	int	sc_txfree;		/* number of free Tx descriptors */
    273 	int	sc_txnext;		/* next ready Tx descriptor */
    274 
    275 	int	sc_txsfree;		/* number of free Tx jobs */
    276 	int	sc_txsnext;		/* next free Tx job */
    277 	int	sc_txsdirty;		/* dirty Tx jobs */
    278 
    279 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
    280 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
    281 
    282 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    283 
    284 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    285 	int	sc_rxdiscard;
    286 	int	sc_rxlen;
    287 	struct mbuf *sc_rxhead;
    288 	struct mbuf *sc_rxtail;
    289 	struct mbuf **sc_rxtailp;
    290 
    291 	uint32_t sc_ctrl;		/* prototype CTRL register */
    292 #if 0
    293 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    294 #endif
    295 	uint32_t sc_icr;		/* prototype interrupt bits */
    296 	uint32_t sc_tctl;		/* prototype TCTL register */
    297 	uint32_t sc_rctl;		/* prototype RCTL register */
    298 	uint32_t sc_txcw;		/* prototype TXCW register */
    299 	uint32_t sc_tipg;		/* prototype TIPG register */
    300 
    301 	int sc_tbi_linkup;		/* TBI link status */
    302 	int sc_tbi_anstate;		/* autonegotiation state */
    303 
    304 	int sc_mchash_type;		/* multicast filter offset */
    305 
    306 #if NRND > 0
    307 	rndsource_element_t rnd_source;	/* random source */
    308 #endif
    309 };
    310 
    311 #define	WM_RXCHAIN_RESET(sc)						\
    312 do {									\
    313 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    314 	*(sc)->sc_rxtailp = NULL;					\
    315 	(sc)->sc_rxlen = 0;						\
    316 } while (/*CONSTCOND*/0)
    317 
    318 #define	WM_RXCHAIN_LINK(sc, m)						\
    319 do {									\
    320 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    321 	(sc)->sc_rxtailp = &(m)->m_next;				\
    322 } while (/*CONSTCOND*/0)
    323 
    324 /* sc_flags */
    325 #define	WM_F_HAS_MII		0x01	/* has MII */
    326 #define	WM_F_EEPROM_HANDSHAKE	0x02	/* requires EEPROM handshake */
    327 #define	WM_F_EEPROM_SPI		0x04	/* EEPROM is SPI */
    328 #define	WM_F_IOH_VALID		0x10	/* I/O handle is valid */
    329 #define	WM_F_BUS64		0x20	/* bus is 64-bit */
    330 #define	WM_F_PCIX		0x40	/* bus is PCI-X */
    331 
    332 #ifdef WM_EVENT_COUNTERS
    333 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    334 #else
    335 #define	WM_EVCNT_INCR(ev)	/* nothing */
    336 #endif
    337 
    338 #define	CSR_READ(sc, reg)						\
    339 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    340 #define	CSR_WRITE(sc, reg, val)						\
    341 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    342 
    343 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    344 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    345 
    346 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    347 do {									\
    348 	int __x, __n;							\
    349 									\
    350 	__x = (x);							\
    351 	__n = (n);							\
    352 									\
    353 	/* If it will wrap around, sync to the end of the ring. */	\
    354 	if ((__x + __n) > WM_NTXDESC) {					\
    355 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    356 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    357 		    (WM_NTXDESC - __x), (ops));				\
    358 		__n -= (WM_NTXDESC - __x);				\
    359 		__x = 0;						\
    360 	}								\
    361 									\
    362 	/* Now sync whatever is left. */				\
    363 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    364 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    365 } while (/*CONSTCOND*/0)
    366 
    367 #define	WM_CDRXSYNC(sc, x, ops)						\
    368 do {									\
    369 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    370 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    371 } while (/*CONSTCOND*/0)
    372 
    373 #define	WM_INIT_RXDESC(sc, x)						\
    374 do {									\
    375 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    376 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    377 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    378 									\
    379 	/*								\
    380 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    381 	 * so that the payload after the Ethernet header is aligned	\
    382 	 * to a 4-byte boundary.					\
    383 	 *								\
    384 	 * XXX BRAINDAMAGE ALERT!					\
    385 	 * The stupid chip uses the same size for every buffer, which	\
    386 	 * is set in the Receive Control register.  We are using the 2K	\
    387 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    388 	 * reason, we can't "scoot" packets longer than the standard	\
    389 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
    390 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
    391 	 * the upper layer copy the headers.				\
    392 	 */								\
    393 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
    394 									\
    395 	__rxd->wrx_addr.wa_low =					\
    396 	    htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 		\
    397 		(sc)->sc_align_tweak);					\
    398 	__rxd->wrx_addr.wa_high = 0;					\
    399 	__rxd->wrx_len = 0;						\
    400 	__rxd->wrx_cksum = 0;						\
    401 	__rxd->wrx_status = 0;						\
    402 	__rxd->wrx_errors = 0;						\
    403 	__rxd->wrx_special = 0;						\
    404 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    405 									\
    406 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    407 } while (/*CONSTCOND*/0)
    408 
    409 static void	wm_start(struct ifnet *);
    410 static void	wm_watchdog(struct ifnet *);
    411 static int	wm_ioctl(struct ifnet *, u_long, caddr_t);
    412 static int	wm_init(struct ifnet *);
    413 static void	wm_stop(struct ifnet *, int);
    414 
    415 static void	wm_shutdown(void *);
    416 
    417 static void	wm_reset(struct wm_softc *);
    418 static void	wm_rxdrain(struct wm_softc *);
    419 static int	wm_add_rxbuf(struct wm_softc *, int);
    420 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
    421 static void	wm_tick(void *);
    422 
    423 static void	wm_set_filter(struct wm_softc *);
    424 
    425 static int	wm_intr(void *);
    426 static void	wm_txintr(struct wm_softc *);
    427 static void	wm_rxintr(struct wm_softc *);
    428 static void	wm_linkintr(struct wm_softc *, uint32_t);
    429 
    430 static void	wm_tbi_mediainit(struct wm_softc *);
    431 static int	wm_tbi_mediachange(struct ifnet *);
    432 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    433 
    434 static void	wm_tbi_set_linkled(struct wm_softc *);
    435 static void	wm_tbi_check_link(struct wm_softc *);
    436 
    437 static void	wm_gmii_reset(struct wm_softc *);
    438 
    439 static int	wm_gmii_i82543_readreg(struct device *, int, int);
    440 static void	wm_gmii_i82543_writereg(struct device *, int, int, int);
    441 
    442 static int	wm_gmii_i82544_readreg(struct device *, int, int);
    443 static void	wm_gmii_i82544_writereg(struct device *, int, int, int);
    444 
    445 static void	wm_gmii_statchg(struct device *);
    446 
    447 static void	wm_gmii_mediainit(struct wm_softc *);
    448 static int	wm_gmii_mediachange(struct ifnet *);
    449 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    450 
    451 static int	wm_match(struct device *, struct cfdata *, void *);
    452 static void	wm_attach(struct device *, struct device *, void *);
    453 
    454 CFATTACH_DECL(wm, sizeof(struct wm_softc),
    455     wm_match, wm_attach, NULL, NULL);
    456 
    457 /*
    458  * Devices supported by this driver.
    459  */
    460 const struct wm_product {
    461 	pci_vendor_id_t		wmp_vendor;
    462 	pci_product_id_t	wmp_product;
    463 	const char		*wmp_name;
    464 	wm_chip_type		wmp_type;
    465 	int			wmp_flags;
    466 #define	WMP_F_1000X		0x01
    467 #define	WMP_F_1000T		0x02
    468 } wm_products[] = {
    469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    470 	  "Intel i82542 1000BASE-X Ethernet",
    471 	  WM_T_82542_2_1,	WMP_F_1000X },
    472 
    473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    474 	  "Intel i82543GC 1000BASE-X Ethernet",
    475 	  WM_T_82543,		WMP_F_1000X },
    476 
    477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    478 	  "Intel i82543GC 1000BASE-T Ethernet",
    479 	  WM_T_82543,		WMP_F_1000T },
    480 
    481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    482 	  "Intel i82544EI 1000BASE-T Ethernet",
    483 	  WM_T_82544,		WMP_F_1000T },
    484 
    485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    486 	  "Intel i82544EI 1000BASE-X Ethernet",
    487 	  WM_T_82544,		WMP_F_1000X },
    488 
    489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    490 	  "Intel i82544GC 1000BASE-T Ethernet",
    491 	  WM_T_82544,		WMP_F_1000T },
    492 
    493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    494 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    495 	  WM_T_82544,		WMP_F_1000T },
    496 
    497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    498 	  "Intel i82540EM 1000BASE-T Ethernet",
    499 	  WM_T_82540,		WMP_F_1000T },
    500 
    501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    502 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    503 	  WM_T_82540,		WMP_F_1000T },
    504 
    505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    506 	  "Intel i82540EP 1000BASE-T Ethernet",
    507 	  WM_T_82540,		WMP_F_1000T },
    508 
    509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    510 	  "Intel i82540EP 1000BASE-T Ethernet",
    511 	  WM_T_82540,		WMP_F_1000T },
    512 
    513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    514 	  "Intel i82540EP 1000BASE-T Ethernet",
    515 	  WM_T_82540,		WMP_F_1000T },
    516 
    517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    518 	  "Intel i82545EM 1000BASE-T Ethernet",
    519 	  WM_T_82545,		WMP_F_1000T },
    520 
    521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    522 	  "Intel i82545GM 1000BASE-T Ethernet",
    523 	  WM_T_82545_3,		WMP_F_1000T },
    524 
    525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    526 	  "Intel i82545GM 1000BASE-X Ethernet",
    527 	  WM_T_82545_3,		WMP_F_1000X },
    528 #if 0
    529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    530 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    531 	  WM_T_82545_3,		WMP_F_SERDES },
    532 #endif
    533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    534 	  "Intel i82546EB 1000BASE-T Ethernet",
    535 	  WM_T_82546,		WMP_F_1000T },
    536 
    537 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
    538 	  "Intel i82546EB 1000BASE-T Ethernet",
    539 	  WM_T_82546,		WMP_F_1000T },
    540 
    541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    542 	  "Intel i82545EM 1000BASE-X Ethernet",
    543 	  WM_T_82545,		WMP_F_1000X },
    544 
    545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    546 	  "Intel i82546EB 1000BASE-X Ethernet",
    547 	  WM_T_82546,		WMP_F_1000X },
    548 
    549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    550 	  "Intel i82546GB 1000BASE-T Ethernet",
    551 	  WM_T_82546_3,		WMP_F_1000T },
    552 
    553 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    554 	  "Intel i82546GB 1000BASE-X Ethernet",
    555 	  WM_T_82546_3,		WMP_F_1000X },
    556 #if 0
    557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    558 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    559 	  WM_T_82546_3,		WMP_F_SERDES },
    560 #endif
    561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    562 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    563 	  WM_T_82541,		WMP_F_1000T },
    564 
    565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    566 	  "Intel i82541ER 1000BASE-T Ethernet",
    567 	  WM_T_82541_2,		WMP_F_1000T },
    568 
    569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    570 	  "Intel i82541GI 1000BASE-T Ethernet",
    571 	  WM_T_82541_2,		WMP_F_1000T },
    572 
    573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    574 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    575 	  WM_T_82541_2,		WMP_F_1000T },
    576 
    577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    578 	  "Intel i82547EI 1000BASE-T Ethernet",
    579 	  WM_T_82547,		WMP_F_1000T },
    580 
    581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    582 	  "Intel i82547GI 1000BASE-T Ethernet",
    583 	  WM_T_82547_2,		WMP_F_1000T },
    584 	{ 0,			0,
    585 	  NULL,
    586 	  0,			0 },
    587 };
    588 
    589 #ifdef WM_EVENT_COUNTERS
    590 #if WM_NTXSEGS != 16
    591 #error Update wm_txseg_evcnt_names
    592 #endif
    593 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
    594 	"txseg1",
    595 	"txseg2",
    596 	"txseg3",
    597 	"txseg4",
    598 	"txseg5",
    599 	"txseg6",
    600 	"txseg7",
    601 	"txseg8",
    602 	"txseg9",
    603 	"txseg10",
    604 	"txseg11",
    605 	"txseg12",
    606 	"txseg13",
    607 	"txseg14",
    608 	"txseg15",
    609 	"txseg16",
    610 };
    611 #endif /* WM_EVENT_COUNTERS */
    612 
    613 #if 0 /* Not currently used */
    614 static __inline uint32_t
    615 wm_io_read(struct wm_softc *sc, int reg)
    616 {
    617 
    618 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    619 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
    620 }
    621 #endif
    622 
    623 static __inline void
    624 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
    625 {
    626 
    627 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
    628 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
    629 }
    630 
    631 static const struct wm_product *
    632 wm_lookup(const struct pci_attach_args *pa)
    633 {
    634 	const struct wm_product *wmp;
    635 
    636 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
    637 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
    638 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
    639 			return (wmp);
    640 	}
    641 	return (NULL);
    642 }
    643 
    644 static int
    645 wm_match(struct device *parent, struct cfdata *cf, void *aux)
    646 {
    647 	struct pci_attach_args *pa = aux;
    648 
    649 	if (wm_lookup(pa) != NULL)
    650 		return (1);
    651 
    652 	return (0);
    653 }
    654 
    655 static void
    656 wm_attach(struct device *parent, struct device *self, void *aux)
    657 {
    658 	struct wm_softc *sc = (void *) self;
    659 	struct pci_attach_args *pa = aux;
    660 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    661 	pci_chipset_tag_t pc = pa->pa_pc;
    662 	pci_intr_handle_t ih;
    663 	const char *intrstr = NULL;
    664 	const char *eetype;
    665 	bus_space_tag_t memt;
    666 	bus_space_handle_t memh;
    667 	bus_dma_segment_t seg;
    668 	int memh_valid;
    669 	int i, rseg, error;
    670 	const struct wm_product *wmp;
    671 	uint8_t enaddr[ETHER_ADDR_LEN];
    672 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
    673 	pcireg_t preg, memtype;
    674 	uint32_t reg;
    675 	int pmreg;
    676 
    677 	callout_init(&sc->sc_tick_ch);
    678 
    679 	wmp = wm_lookup(pa);
    680 	if (wmp == NULL) {
    681 		printf("\n");
    682 		panic("wm_attach: impossible");
    683 	}
    684 
    685 	sc->sc_dmat = pa->pa_dmat;
    686 
    687 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
    688 	aprint_naive(": Ethernet controller\n");
    689 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
    690 
    691 	sc->sc_type = wmp->wmp_type;
    692 	if (sc->sc_type < WM_T_82543) {
    693 		if (preg < 2) {
    694 			aprint_error("%s: i82542 must be at least rev. 2\n",
    695 			    sc->sc_dev.dv_xname);
    696 			return;
    697 		}
    698 		if (preg < 3)
    699 			sc->sc_type = WM_T_82542_2_0;
    700 	}
    701 
    702 	/*
    703 	 * Map the device.  All devices support memory-mapped acccess,
    704 	 * and it is really required for normal operation.
    705 	 */
    706 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
    707 	switch (memtype) {
    708 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
    709 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
    710 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
    711 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
    712 		break;
    713 	default:
    714 		memh_valid = 0;
    715 	}
    716 
    717 	if (memh_valid) {
    718 		sc->sc_st = memt;
    719 		sc->sc_sh = memh;
    720 	} else {
    721 		aprint_error("%s: unable to map device registers\n",
    722 		    sc->sc_dev.dv_xname);
    723 		return;
    724 	}
    725 
    726 	/*
    727 	 * In addition, i82544 and later support I/O mapped indirect
    728 	 * register access.  It is not desirable (nor supported in
    729 	 * this driver) to use it for normal operation, though it is
    730 	 * required to work around bugs in some chip versions.
    731 	 */
    732 	if (sc->sc_type >= WM_T_82544) {
    733 		/* First we have to find the I/O BAR. */
    734 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
    735 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
    736 			    PCI_MAPREG_TYPE_IO)
    737 				break;
    738 		}
    739 		if (i == PCI_MAPREG_END)
    740 			aprint_error("%s: WARNING: unable to find I/O BAR\n",
    741 			    sc->sc_dev.dv_xname);
    742 		else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
    743 					0, &sc->sc_iot, &sc->sc_ioh,
    744 					NULL, NULL) == 0)
    745 			sc->sc_flags |= WM_F_IOH_VALID;
    746 		else
    747 			aprint_error("%s: WARNING: unable to map I/O space\n",
    748 			    sc->sc_dev.dv_xname);
    749 	}
    750 
    751 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
    752 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    753 	preg |= PCI_COMMAND_MASTER_ENABLE;
    754 	if (sc->sc_type < WM_T_82542_2_1)
    755 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
    756 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
    757 
    758 	/* Get it out of power save mode, if needed. */
    759 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
    760 		preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
    761 		    PCI_PMCSR_STATE_MASK;
    762 		if (preg == PCI_PMCSR_STATE_D3) {
    763 			/*
    764 			 * The card has lost all configuration data in
    765 			 * this state, so punt.
    766 			 */
    767 			aprint_error("%s: unable to wake from power state D3\n",
    768 			    sc->sc_dev.dv_xname);
    769 			return;
    770 		}
    771 		if (preg != PCI_PMCSR_STATE_D0) {
    772 			aprint_normal("%s: waking up from power state D%d\n",
    773 			    sc->sc_dev.dv_xname, preg);
    774 			pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
    775 			    PCI_PMCSR_STATE_D0);
    776 		}
    777 	}
    778 
    779 	/*
    780 	 * Map and establish our interrupt.
    781 	 */
    782 	if (pci_intr_map(pa, &ih)) {
    783 		aprint_error("%s: unable to map interrupt\n",
    784 		    sc->sc_dev.dv_xname);
    785 		return;
    786 	}
    787 	intrstr = pci_intr_string(pc, ih);
    788 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
    789 	if (sc->sc_ih == NULL) {
    790 		aprint_error("%s: unable to establish interrupt",
    791 		    sc->sc_dev.dv_xname);
    792 		if (intrstr != NULL)
    793 			aprint_normal(" at %s", intrstr);
    794 		aprint_normal("\n");
    795 		return;
    796 	}
    797 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
    798 
    799 	/*
    800 	 * Determine a few things about the bus we're connected to.
    801 	 */
    802 	if (sc->sc_type < WM_T_82543) {
    803 		/* We don't really know the bus characteristics here. */
    804 		sc->sc_bus_speed = 33;
    805 	} else  {
    806 		reg = CSR_READ(sc, WMREG_STATUS);
    807 		if (reg & STATUS_BUS64)
    808 			sc->sc_flags |= WM_F_BUS64;
    809 		if (sc->sc_type >= WM_T_82544 &&
    810 		    (reg & STATUS_PCIX_MODE) != 0) {
    811 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
    812 
    813 			sc->sc_flags |= WM_F_PCIX;
    814 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
    815 					       PCI_CAP_PCIX,
    816 					       &sc->sc_pcix_offset, NULL) == 0)
    817 				aprint_error("%s: unable to find PCIX "
    818 				    "capability\n", sc->sc_dev.dv_xname);
    819 			else if (sc->sc_type != WM_T_82545_3 &&
    820 				 sc->sc_type != WM_T_82546_3) {
    821 				/*
    822 				 * Work around a problem caused by the BIOS
    823 				 * setting the max memory read byte count
    824 				 * incorrectly.
    825 				 */
    826 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
    827 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
    828 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
    829 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
    830 
    831 				bytecnt =
    832 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
    833 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
    834 				maxb =
    835 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
    836 				    PCI_PCIX_STATUS_MAXB_SHIFT;
    837 				if (bytecnt > maxb) {
    838 					aprint_verbose("%s: resetting PCI-X "
    839 					    "MMRBC: %d -> %d\n",
    840 					    sc->sc_dev.dv_xname,
    841 					    512 << bytecnt, 512 << maxb);
    842 					pcix_cmd = (pcix_cmd &
    843 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
    844 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
    845 					pci_conf_write(pa->pa_pc, pa->pa_tag,
    846 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
    847 					    pcix_cmd);
    848 				}
    849 			}
    850 		}
    851 		/*
    852 		 * The quad port adapter is special; it has a PCIX-PCIX
    853 		 * bridge on the board, and can run the secondary bus at
    854 		 * a higher speed.
    855 		 */
    856 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
    857 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
    858 								      : 66;
    859 		} else if (sc->sc_flags & WM_F_PCIX) {
    860 			switch (STATUS_PCIXSPD(reg)) {
    861 			case STATUS_PCIXSPD_50_66:
    862 				sc->sc_bus_speed = 66;
    863 				break;
    864 			case STATUS_PCIXSPD_66_100:
    865 				sc->sc_bus_speed = 100;
    866 				break;
    867 			case STATUS_PCIXSPD_100_133:
    868 				sc->sc_bus_speed = 133;
    869 				break;
    870 			default:
    871 				aprint_error(
    872 				    "%s: unknown PCIXSPD %d; assuming 66MHz\n",
    873 				    sc->sc_dev.dv_xname, STATUS_PCIXSPD(reg));
    874 				sc->sc_bus_speed = 66;
    875 			}
    876 		} else
    877 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
    878 		aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
    879 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
    880 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
    881 	}
    882 
    883 	/*
    884 	 * Allocate the control data structures, and create and load the
    885 	 * DMA map for it.
    886 	 */
    887 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    888 	    sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
    889 	    0)) != 0) {
    890 		aprint_error(
    891 		    "%s: unable to allocate control data, error = %d\n",
    892 		    sc->sc_dev.dv_xname, error);
    893 		goto fail_0;
    894 	}
    895 
    896 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    897 	    sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
    898 	    0)) != 0) {
    899 		aprint_error("%s: unable to map control data, error = %d\n",
    900 		    sc->sc_dev.dv_xname, error);
    901 		goto fail_1;
    902 	}
    903 
    904 	if ((error = bus_dmamap_create(sc->sc_dmat,
    905 	    sizeof(struct wm_control_data), 1,
    906 	    sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
    907 		aprint_error("%s: unable to create control data DMA map, "
    908 		    "error = %d\n", sc->sc_dev.dv_xname, error);
    909 		goto fail_2;
    910 	}
    911 
    912 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
    913 	    sc->sc_control_data, sizeof(struct wm_control_data), NULL,
    914 	    0)) != 0) {
    915 		aprint_error(
    916 		    "%s: unable to load control data DMA map, error = %d\n",
    917 		    sc->sc_dev.dv_xname, error);
    918 		goto fail_3;
    919 	}
    920 
    921 	/*
    922 	 * Create the transmit buffer DMA maps.
    923 	 */
    924 	for (i = 0; i < WM_TXQUEUELEN; i++) {
    925 		if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
    926 		    WM_NTXSEGS, MCLBYTES, 0, 0,
    927 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
    928 			aprint_error("%s: unable to create Tx DMA map %d, "
    929 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    930 			goto fail_4;
    931 		}
    932 	}
    933 
    934 	/*
    935 	 * Create the receive buffer DMA maps.
    936 	 */
    937 	for (i = 0; i < WM_NRXDESC; i++) {
    938 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    939 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
    940 			aprint_error("%s: unable to create Rx DMA map %d, "
    941 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    942 			goto fail_5;
    943 		}
    944 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
    945 	}
    946 
    947 	/*
    948 	 * Reset the chip to a known state.
    949 	 */
    950 	wm_reset(sc);
    951 
    952 	/*
    953 	 * Get some information about the EEPROM.
    954 	 */
    955 	if (sc->sc_type >= WM_T_82540)
    956 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
    957 	if (sc->sc_type <= WM_T_82544)
    958 		sc->sc_ee_addrbits = 6;
    959 	else if (sc->sc_type <= WM_T_82546_3) {
    960 		reg = CSR_READ(sc, WMREG_EECD);
    961 		if (reg & EECD_EE_SIZE)
    962 			sc->sc_ee_addrbits = 8;
    963 		else
    964 			sc->sc_ee_addrbits = 6;
    965 	} else if (sc->sc_type <= WM_T_82547_2) {
    966 		reg = CSR_READ(sc, WMREG_EECD);
    967 		if (reg & EECD_EE_TYPE) {
    968 			sc->sc_flags |= WM_F_EEPROM_SPI;
    969 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
    970 		} else
    971 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
    972 	} else {
    973 		/* Assume everything else is SPI. */
    974 		reg = CSR_READ(sc, WMREG_EECD);
    975 		sc->sc_flags |= WM_F_EEPROM_SPI;
    976 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
    977 	}
    978 	if (sc->sc_flags & WM_F_EEPROM_SPI)
    979 		eetype = "SPI";
    980 	else
    981 		eetype = "MicroWire";
    982 	aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
    983 	    sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
    984 	    sc->sc_ee_addrbits, eetype);
    985 
    986 	/*
    987 	 * Read the Ethernet address from the EEPROM.
    988 	 */
    989 	if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
    990 	    sizeof(myea) / sizeof(myea[0]), myea)) {
    991 		aprint_error("%s: unable to read Ethernet address\n",
    992 		    sc->sc_dev.dv_xname);
    993 		return;
    994 	}
    995 	enaddr[0] = myea[0] & 0xff;
    996 	enaddr[1] = myea[0] >> 8;
    997 	enaddr[2] = myea[1] & 0xff;
    998 	enaddr[3] = myea[1] >> 8;
    999 	enaddr[4] = myea[2] & 0xff;
   1000 	enaddr[5] = myea[2] >> 8;
   1001 
   1002 	/*
   1003 	 * Toggle the LSB of the MAC address on the second port
   1004 	 * of the i82546.
   1005 	 */
   1006 	if (sc->sc_type == WM_T_82546) {
   1007 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
   1008 			enaddr[5] ^= 1;
   1009 	}
   1010 
   1011 	aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
   1012 	    ether_sprintf(enaddr));
   1013 
   1014 	/*
   1015 	 * Read the config info from the EEPROM, and set up various
   1016 	 * bits in the control registers based on their contents.
   1017 	 */
   1018 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
   1019 		aprint_error("%s: unable to read CFG1 from EEPROM\n",
   1020 		    sc->sc_dev.dv_xname);
   1021 		return;
   1022 	}
   1023 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
   1024 		aprint_error("%s: unable to read CFG2 from EEPROM\n",
   1025 		    sc->sc_dev.dv_xname);
   1026 		return;
   1027 	}
   1028 	if (sc->sc_type >= WM_T_82544) {
   1029 		if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
   1030 			aprint_error("%s: unable to read SWDPIN from EEPROM\n",
   1031 			    sc->sc_dev.dv_xname);
   1032 			return;
   1033 		}
   1034 	}
   1035 
   1036 	if (cfg1 & EEPROM_CFG1_ILOS)
   1037 		sc->sc_ctrl |= CTRL_ILOS;
   1038 	if (sc->sc_type >= WM_T_82544) {
   1039 		sc->sc_ctrl |=
   1040 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   1041 		    CTRL_SWDPIO_SHIFT;
   1042 		sc->sc_ctrl |=
   1043 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   1044 		    CTRL_SWDPINS_SHIFT;
   1045 	} else {
   1046 		sc->sc_ctrl |=
   1047 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   1048 		    CTRL_SWDPIO_SHIFT;
   1049 	}
   1050 
   1051 #if 0
   1052 	if (sc->sc_type >= WM_T_82544) {
   1053 		if (cfg1 & EEPROM_CFG1_IPS0)
   1054 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   1055 		if (cfg1 & EEPROM_CFG1_IPS1)
   1056 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   1057 		sc->sc_ctrl_ext |=
   1058 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   1059 		    CTRL_EXT_SWDPIO_SHIFT;
   1060 		sc->sc_ctrl_ext |=
   1061 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   1062 		    CTRL_EXT_SWDPINS_SHIFT;
   1063 	} else {
   1064 		sc->sc_ctrl_ext |=
   1065 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   1066 		    CTRL_EXT_SWDPIO_SHIFT;
   1067 	}
   1068 #endif
   1069 
   1070 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   1071 #if 0
   1072 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   1073 #endif
   1074 
   1075 	/*
   1076 	 * Set up some register offsets that are different between
   1077 	 * the i82542 and the i82543 and later chips.
   1078 	 */
   1079 	if (sc->sc_type < WM_T_82543) {
   1080 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
   1081 		sc->sc_tdt_reg = WMREG_OLD_TDT;
   1082 	} else {
   1083 		sc->sc_rdt_reg = WMREG_RDT;
   1084 		sc->sc_tdt_reg = WMREG_TDT;
   1085 	}
   1086 
   1087 	/*
   1088 	 * Determine if we should use flow control.  We should
   1089 	 * always use it, unless we're on a i82542 < 2.1.
   1090 	 */
   1091 	if (sc->sc_type >= WM_T_82542_2_1)
   1092 		sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
   1093 
   1094 	/*
   1095 	 * Determine if we're TBI or GMII mode, and initialize the
   1096 	 * media structures accordingly.
   1097 	 */
   1098 	if (sc->sc_type < WM_T_82543 ||
   1099 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   1100 		if (wmp->wmp_flags & WMP_F_1000T)
   1101 			aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
   1102 			    "product!\n", sc->sc_dev.dv_xname);
   1103 		wm_tbi_mediainit(sc);
   1104 	} else {
   1105 		if (wmp->wmp_flags & WMP_F_1000X)
   1106 			aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
   1107 			    "product!\n", sc->sc_dev.dv_xname);
   1108 		wm_gmii_mediainit(sc);
   1109 	}
   1110 
   1111 	ifp = &sc->sc_ethercom.ec_if;
   1112 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
   1113 	ifp->if_softc = sc;
   1114 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1115 	ifp->if_ioctl = wm_ioctl;
   1116 	ifp->if_start = wm_start;
   1117 	ifp->if_watchdog = wm_watchdog;
   1118 	ifp->if_init = wm_init;
   1119 	ifp->if_stop = wm_stop;
   1120 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   1121 	IFQ_SET_READY(&ifp->if_snd);
   1122 
   1123 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   1124 
   1125 	/*
   1126 	 * If we're a i82543 or greater, we can support VLANs.
   1127 	 */
   1128 	if (sc->sc_type >= WM_T_82543)
   1129 		sc->sc_ethercom.ec_capabilities |=
   1130 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
   1131 
   1132 	/*
   1133 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   1134 	 * on i82543 and later.
   1135 	 */
   1136 	if (sc->sc_type >= WM_T_82543)
   1137 		ifp->if_capabilities |=
   1138 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
   1139 
   1140 	/*
   1141 	 * Attach the interface.
   1142 	 */
   1143 	if_attach(ifp);
   1144 	ether_ifattach(ifp, enaddr);
   1145 #if NRND > 0
   1146 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
   1147 	    RND_TYPE_NET, 0);
   1148 #endif
   1149 
   1150 #ifdef WM_EVENT_COUNTERS
   1151 	/* Attach event counters. */
   1152 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   1153 	    NULL, sc->sc_dev.dv_xname, "txsstall");
   1154 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   1155 	    NULL, sc->sc_dev.dv_xname, "txdstall");
   1156 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
   1157 	    NULL, sc->sc_dev.dv_xname, "txforceintr");
   1158 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   1159 	    NULL, sc->sc_dev.dv_xname, "txdw");
   1160 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   1161 	    NULL, sc->sc_dev.dv_xname, "txqe");
   1162 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   1163 	    NULL, sc->sc_dev.dv_xname, "rxintr");
   1164 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   1165 	    NULL, sc->sc_dev.dv_xname, "linkintr");
   1166 
   1167 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   1168 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
   1169 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   1170 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
   1171 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   1172 	    NULL, sc->sc_dev.dv_xname, "txipsum");
   1173 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   1174 	    NULL, sc->sc_dev.dv_xname, "txtusum");
   1175 
   1176 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
   1177 	    NULL, sc->sc_dev.dv_xname, "txctx init");
   1178 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
   1179 	    NULL, sc->sc_dev.dv_xname, "txctx hit");
   1180 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
   1181 	    NULL, sc->sc_dev.dv_xname, "txctx miss");
   1182 
   1183 	for (i = 0; i < WM_NTXSEGS; i++)
   1184 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   1185 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
   1186 
   1187 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   1188 	    NULL, sc->sc_dev.dv_xname, "txdrop");
   1189 
   1190 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   1191 	    NULL, sc->sc_dev.dv_xname, "tu");
   1192 #endif /* WM_EVENT_COUNTERS */
   1193 
   1194 	/*
   1195 	 * Make sure the interface is shutdown during reboot.
   1196 	 */
   1197 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
   1198 	if (sc->sc_sdhook == NULL)
   1199 		aprint_error("%s: WARNING: unable to establish shutdown hook\n",
   1200 		    sc->sc_dev.dv_xname);
   1201 	return;
   1202 
   1203 	/*
   1204 	 * Free any resources we've allocated during the failed attach
   1205 	 * attempt.  Do this in reverse order and fall through.
   1206 	 */
   1207  fail_5:
   1208 	for (i = 0; i < WM_NRXDESC; i++) {
   1209 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
   1210 			bus_dmamap_destroy(sc->sc_dmat,
   1211 			    sc->sc_rxsoft[i].rxs_dmamap);
   1212 	}
   1213  fail_4:
   1214 	for (i = 0; i < WM_TXQUEUELEN; i++) {
   1215 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
   1216 			bus_dmamap_destroy(sc->sc_dmat,
   1217 			    sc->sc_txsoft[i].txs_dmamap);
   1218 	}
   1219 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
   1220  fail_3:
   1221 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
   1222  fail_2:
   1223 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
   1224 	    sizeof(struct wm_control_data));
   1225  fail_1:
   1226 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
   1227  fail_0:
   1228 	return;
   1229 }
   1230 
   1231 /*
   1232  * wm_shutdown:
   1233  *
   1234  *	Make sure the interface is stopped at reboot time.
   1235  */
   1236 static void
   1237 wm_shutdown(void *arg)
   1238 {
   1239 	struct wm_softc *sc = arg;
   1240 
   1241 	wm_stop(&sc->sc_ethercom.ec_if, 1);
   1242 }
   1243 
   1244 /*
   1245  * wm_tx_cksum:
   1246  *
   1247  *	Set up TCP/IP checksumming parameters for the
   1248  *	specified packet.
   1249  */
   1250 static int
   1251 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   1252     uint32_t *fieldsp)
   1253 {
   1254 	struct mbuf *m0 = txs->txs_mbuf;
   1255 	struct livengood_tcpip_ctxdesc *t;
   1256 	uint32_t fields = 0, ipcs, tucs;
   1257 	struct ip *ip;
   1258 	struct ether_header *eh;
   1259 	int offset, iphl;
   1260 
   1261 	/*
   1262 	 * XXX It would be nice if the mbuf pkthdr had offset
   1263 	 * fields for the protocol headers.
   1264 	 */
   1265 
   1266 	eh = mtod(m0, struct ether_header *);
   1267 	switch (htons(eh->ether_type)) {
   1268 	case ETHERTYPE_IP:
   1269 		iphl = sizeof(struct ip);
   1270 		offset = ETHER_HDR_LEN;
   1271 		break;
   1272 
   1273 	case ETHERTYPE_VLAN:
   1274 		iphl = sizeof(struct ip);
   1275 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   1276 		break;
   1277 
   1278 	default:
   1279 		/*
   1280 		 * Don't support this protocol or encapsulation.
   1281 		 */
   1282 		*fieldsp = 0;
   1283 		*cmdp = 0;
   1284 		return (0);
   1285 	}
   1286 
   1287 	if (m0->m_len < (offset + iphl)) {
   1288 		if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
   1289 			printf("%s: wm_tx_cksum: mbuf allocation failed, "
   1290 			    "packet dropped\n", sc->sc_dev.dv_xname);
   1291 			return (ENOMEM);
   1292 		}
   1293 		m0 = txs->txs_mbuf;
   1294 	}
   1295 
   1296 	ip = (struct ip *) (mtod(m0, caddr_t) + offset);
   1297 	iphl = ip->ip_hl << 2;
   1298 
   1299 	/*
   1300 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   1301 	 * offload feature, if we load the context descriptor, we
   1302 	 * MUST provide valid values for IPCSS and TUCSS fields.
   1303 	 */
   1304 
   1305 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   1306 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   1307 		fields |= htole32(WTX_IXSM);
   1308 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
   1309 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1310 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
   1311 	} else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
   1312 		/* Use the cached value. */
   1313 		ipcs = sc->sc_txctx_ipcs;
   1314 	} else {
   1315 		/* Just initialize it to the likely value anyway. */
   1316 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
   1317 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1318 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
   1319 	}
   1320 
   1321 	offset += iphl;
   1322 
   1323 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1324 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   1325 		fields |= htole32(WTX_TXSM);
   1326 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
   1327 		    WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
   1328 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
   1329 	} else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
   1330 		/* Use the cached value. */
   1331 		tucs = sc->sc_txctx_tucs;
   1332 	} else {
   1333 		/* Just initialize it to a valid TCP context. */
   1334 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
   1335 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   1336 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
   1337 	}
   1338 
   1339 	if (sc->sc_txctx_ipcs == ipcs &&
   1340 	    sc->sc_txctx_tucs == tucs) {
   1341 		/* Cached context is fine. */
   1342 		WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
   1343 	} else {
   1344 		/* Fill in the context descriptor. */
   1345 #ifdef WM_EVENT_COUNTERS
   1346 		if (sc->sc_txctx_ipcs == 0xffffffff &&
   1347 		    sc->sc_txctx_tucs == 0xffffffff)
   1348 			WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
   1349 		else
   1350 			WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
   1351 #endif
   1352 		t = (struct livengood_tcpip_ctxdesc *)
   1353 		    &sc->sc_txdescs[sc->sc_txnext];
   1354 		t->tcpip_ipcs = ipcs;
   1355 		t->tcpip_tucs = tucs;
   1356 		t->tcpip_cmdlen =
   1357 		    htole32(WTX_CMD_DEXT | WTX_DTYP_C);
   1358 		t->tcpip_seg = 0;
   1359 		WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   1360 
   1361 		sc->sc_txctx_ipcs = ipcs;
   1362 		sc->sc_txctx_tucs = tucs;
   1363 
   1364 		sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
   1365 		txs->txs_ndesc++;
   1366 	}
   1367 
   1368 	*cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
   1369 	*fieldsp = fields;
   1370 
   1371 	return (0);
   1372 }
   1373 
   1374 /*
   1375  * wm_start:		[ifnet interface function]
   1376  *
   1377  *	Start packet transmission on the interface.
   1378  */
   1379 static void
   1380 wm_start(struct ifnet *ifp)
   1381 {
   1382 	struct wm_softc *sc = ifp->if_softc;
   1383 	struct mbuf *m0;
   1384 #if 0 /* XXXJRT */
   1385 	struct m_tag *mtag;
   1386 #endif
   1387 	struct wm_txsoft *txs;
   1388 	bus_dmamap_t dmamap;
   1389 	int error, nexttx, lasttx = -1, ofree, seg;
   1390 	uint32_t cksumcmd, cksumfields;
   1391 
   1392 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   1393 		return;
   1394 
   1395 	/*
   1396 	 * Remember the previous number of free descriptors.
   1397 	 */
   1398 	ofree = sc->sc_txfree;
   1399 
   1400 	/*
   1401 	 * Loop through the send queue, setting up transmit descriptors
   1402 	 * until we drain the queue, or use up all available transmit
   1403 	 * descriptors.
   1404 	 */
   1405 	for (;;) {
   1406 		/* Grab a packet off the queue. */
   1407 		IFQ_POLL(&ifp->if_snd, m0);
   1408 		if (m0 == NULL)
   1409 			break;
   1410 
   1411 		DPRINTF(WM_DEBUG_TX,
   1412 		    ("%s: TX: have packet to transmit: %p\n",
   1413 		    sc->sc_dev.dv_xname, m0));
   1414 
   1415 		/* Get a work queue entry. */
   1416 		if (sc->sc_txsfree < WM_TXQUEUE_GC) {
   1417 			wm_txintr(sc);
   1418 			if (sc->sc_txsfree == 0) {
   1419 				DPRINTF(WM_DEBUG_TX,
   1420 				    ("%s: TX: no free job descriptors\n",
   1421 					sc->sc_dev.dv_xname));
   1422 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   1423 				break;
   1424 			}
   1425 		}
   1426 
   1427 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   1428 		dmamap = txs->txs_dmamap;
   1429 
   1430 		/*
   1431 		 * Load the DMA map.  If this fails, the packet either
   1432 		 * didn't fit in the allotted number of segments, or we
   1433 		 * were short on resources.  For the too-many-segments
   1434 		 * case, we simply report an error and drop the packet,
   1435 		 * since we can't sanely copy a jumbo packet to a single
   1436 		 * buffer.
   1437 		 */
   1438 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   1439 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1440 		if (error) {
   1441 			if (error == EFBIG) {
   1442 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   1443 				printf("%s: Tx packet consumes too many "
   1444 				    "DMA segments, dropping...\n",
   1445 				    sc->sc_dev.dv_xname);
   1446 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   1447 				m_freem(m0);
   1448 				continue;
   1449 			}
   1450 			/*
   1451 			 * Short on resources, just stop for now.
   1452 			 */
   1453 			DPRINTF(WM_DEBUG_TX,
   1454 			    ("%s: TX: dmamap load failed: %d\n",
   1455 			    sc->sc_dev.dv_xname, error));
   1456 			break;
   1457 		}
   1458 
   1459 		/*
   1460 		 * Ensure we have enough descriptors free to describe
   1461 		 * the packet.  Note, we always reserve one descriptor
   1462 		 * at the end of the ring due to the semantics of the
   1463 		 * TDT register, plus one more in the event we need
   1464 		 * to re-load checksum offload context.
   1465 		 */
   1466 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
   1467 			/*
   1468 			 * Not enough free descriptors to transmit this
   1469 			 * packet.  We haven't committed anything yet,
   1470 			 * so just unload the DMA map, put the packet
   1471 			 * pack on the queue, and punt.  Notify the upper
   1472 			 * layer that there are no more slots left.
   1473 			 */
   1474 			DPRINTF(WM_DEBUG_TX,
   1475 			    ("%s: TX: need %d descriptors, have %d\n",
   1476 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs,
   1477 			    sc->sc_txfree - 1));
   1478 			ifp->if_flags |= IFF_OACTIVE;
   1479 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   1480 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   1481 			break;
   1482 		}
   1483 
   1484 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   1485 
   1486 		/*
   1487 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   1488 		 */
   1489 
   1490 		/* Sync the DMA map. */
   1491 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   1492 		    BUS_DMASYNC_PREWRITE);
   1493 
   1494 		DPRINTF(WM_DEBUG_TX,
   1495 		    ("%s: TX: packet has %d DMA segments\n",
   1496 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs));
   1497 
   1498 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   1499 
   1500 		/*
   1501 		 * Store a pointer to the packet so that we can free it
   1502 		 * later.
   1503 		 *
   1504 		 * Initially, we consider the number of descriptors the
   1505 		 * packet uses the number of DMA segments.  This may be
   1506 		 * incremented by 1 if we do checksum offload (a descriptor
   1507 		 * is used to set the checksum context).
   1508 		 */
   1509 		txs->txs_mbuf = m0;
   1510 		txs->txs_firstdesc = sc->sc_txnext;
   1511 		txs->txs_ndesc = dmamap->dm_nsegs;
   1512 
   1513 		/*
   1514 		 * Set up checksum offload parameters for
   1515 		 * this packet.
   1516 		 */
   1517 		if (m0->m_pkthdr.csum_flags &
   1518 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1519 			if (wm_tx_cksum(sc, txs, &cksumcmd,
   1520 					&cksumfields) != 0) {
   1521 				/* Error message already displayed. */
   1522 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   1523 				continue;
   1524 			}
   1525 		} else {
   1526 			cksumcmd = 0;
   1527 			cksumfields = 0;
   1528 		}
   1529 
   1530 		cksumcmd |= htole32(WTX_CMD_IDE);
   1531 
   1532 		/*
   1533 		 * Initialize the transmit descriptor.
   1534 		 */
   1535 		for (nexttx = sc->sc_txnext, seg = 0;
   1536 		     seg < dmamap->dm_nsegs;
   1537 		     seg++, nexttx = WM_NEXTTX(nexttx)) {
   1538 			/*
   1539 			 * Note: we currently only use 32-bit DMA
   1540 			 * addresses.
   1541 			 */
   1542 			sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
   1543 			sc->sc_txdescs[nexttx].wtx_addr.wa_low =
   1544 			    htole32(dmamap->dm_segs[seg].ds_addr);
   1545 			sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
   1546 			    htole32(dmamap->dm_segs[seg].ds_len);
   1547 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
   1548 			    cksumfields;
   1549 			lasttx = nexttx;
   1550 
   1551 			DPRINTF(WM_DEBUG_TX,
   1552 			    ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
   1553 			    sc->sc_dev.dv_xname, nexttx,
   1554 			    (uint32_t) dmamap->dm_segs[seg].ds_addr,
   1555 			    (uint32_t) dmamap->dm_segs[seg].ds_len));
   1556 		}
   1557 
   1558 		KASSERT(lasttx != -1);
   1559 
   1560 		/*
   1561 		 * Set up the command byte on the last descriptor of
   1562 		 * the packet.  If we're in the interrupt delay window,
   1563 		 * delay the interrupt.
   1564 		 */
   1565 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1566 		    htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
   1567 
   1568 #if 0 /* XXXJRT */
   1569 		/*
   1570 		 * If VLANs are enabled and the packet has a VLAN tag, set
   1571 		 * up the descriptor to encapsulate the packet for us.
   1572 		 *
   1573 		 * This is only valid on the last descriptor of the packet.
   1574 		 */
   1575 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   1576 		    (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
   1577 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1578 			    htole32(WTX_CMD_VLE);
   1579 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
   1580 			    = htole16(*(u_int *)(mtag + 1) & 0xffff);
   1581 		}
   1582 #endif /* XXXJRT */
   1583 
   1584 		txs->txs_lastdesc = lasttx;
   1585 
   1586 		DPRINTF(WM_DEBUG_TX,
   1587 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
   1588 		    lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
   1589 
   1590 		/* Sync the descriptors we're using. */
   1591 		WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
   1592 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1593 
   1594 		/* Give the packet to the chip. */
   1595 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   1596 
   1597 		DPRINTF(WM_DEBUG_TX,
   1598 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
   1599 
   1600 		DPRINTF(WM_DEBUG_TX,
   1601 		    ("%s: TX: finished transmitting packet, job %d\n",
   1602 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
   1603 
   1604 		/* Advance the tx pointer. */
   1605 		sc->sc_txfree -= txs->txs_ndesc;
   1606 		sc->sc_txnext = nexttx;
   1607 
   1608 		sc->sc_txsfree--;
   1609 		sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
   1610 
   1611 #if NBPFILTER > 0
   1612 		/* Pass the packet to any BPF listeners. */
   1613 		if (ifp->if_bpf)
   1614 			bpf_mtap(ifp->if_bpf, m0);
   1615 #endif /* NBPFILTER > 0 */
   1616 	}
   1617 
   1618 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   1619 		/* No more slots; notify upper layer. */
   1620 		ifp->if_flags |= IFF_OACTIVE;
   1621 	}
   1622 
   1623 	if (sc->sc_txfree != ofree) {
   1624 		/* Set a watchdog timer in case the chip flakes out. */
   1625 		ifp->if_timer = 5;
   1626 	}
   1627 }
   1628 
   1629 /*
   1630  * wm_watchdog:		[ifnet interface function]
   1631  *
   1632  *	Watchdog timer handler.
   1633  */
   1634 static void
   1635 wm_watchdog(struct ifnet *ifp)
   1636 {
   1637 	struct wm_softc *sc = ifp->if_softc;
   1638 
   1639 	/*
   1640 	 * Since we're using delayed interrupts, sweep up
   1641 	 * before we report an error.
   1642 	 */
   1643 	wm_txintr(sc);
   1644 
   1645 	if (sc->sc_txfree != WM_NTXDESC) {
   1646 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   1647 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
   1648 		    sc->sc_txnext);
   1649 		ifp->if_oerrors++;
   1650 
   1651 		/* Reset the interface. */
   1652 		(void) wm_init(ifp);
   1653 	}
   1654 
   1655 	/* Try to get more packets going. */
   1656 	wm_start(ifp);
   1657 }
   1658 
   1659 /*
   1660  * wm_ioctl:		[ifnet interface function]
   1661  *
   1662  *	Handle control requests from the operator.
   1663  */
   1664 static int
   1665 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
   1666 {
   1667 	struct wm_softc *sc = ifp->if_softc;
   1668 	struct ifreq *ifr = (struct ifreq *) data;
   1669 	int s, error;
   1670 
   1671 	s = splnet();
   1672 
   1673 	switch (cmd) {
   1674 	case SIOCSIFMEDIA:
   1675 	case SIOCGIFMEDIA:
   1676 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   1677 		break;
   1678 	default:
   1679 		error = ether_ioctl(ifp, cmd, data);
   1680 		if (error == ENETRESET) {
   1681 			/*
   1682 			 * Multicast list has changed; set the hardware filter
   1683 			 * accordingly.
   1684 			 */
   1685 			wm_set_filter(sc);
   1686 			error = 0;
   1687 		}
   1688 		break;
   1689 	}
   1690 
   1691 	/* Try to get more packets going. */
   1692 	wm_start(ifp);
   1693 
   1694 	splx(s);
   1695 	return (error);
   1696 }
   1697 
   1698 /*
   1699  * wm_intr:
   1700  *
   1701  *	Interrupt service routine.
   1702  */
   1703 static int
   1704 wm_intr(void *arg)
   1705 {
   1706 	struct wm_softc *sc = arg;
   1707 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1708 	uint32_t icr;
   1709 	int wantinit, handled = 0;
   1710 
   1711 	for (wantinit = 0; wantinit == 0;) {
   1712 		icr = CSR_READ(sc, WMREG_ICR);
   1713 		if ((icr & sc->sc_icr) == 0)
   1714 			break;
   1715 
   1716 #if 0 /*NRND > 0*/
   1717 		if (RND_ENABLED(&sc->rnd_source))
   1718 			rnd_add_uint32(&sc->rnd_source, icr);
   1719 #endif
   1720 
   1721 		handled = 1;
   1722 
   1723 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   1724 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   1725 			DPRINTF(WM_DEBUG_RX,
   1726 			    ("%s: RX: got Rx intr 0x%08x\n",
   1727 			    sc->sc_dev.dv_xname,
   1728 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   1729 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   1730 		}
   1731 #endif
   1732 		wm_rxintr(sc);
   1733 
   1734 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   1735 		if (icr & ICR_TXDW) {
   1736 			DPRINTF(WM_DEBUG_TX,
   1737 			    ("%s: TX: got TDXW interrupt\n",
   1738 			    sc->sc_dev.dv_xname));
   1739 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   1740 		}
   1741 #endif
   1742 		wm_txintr(sc);
   1743 
   1744 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   1745 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   1746 			wm_linkintr(sc, icr);
   1747 		}
   1748 
   1749 		if (icr & ICR_RXO) {
   1750 			printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
   1751 			wantinit = 1;
   1752 		}
   1753 	}
   1754 
   1755 	if (handled) {
   1756 		if (wantinit)
   1757 			wm_init(ifp);
   1758 
   1759 		/* Try to get more packets going. */
   1760 		wm_start(ifp);
   1761 	}
   1762 
   1763 	return (handled);
   1764 }
   1765 
   1766 /*
   1767  * wm_txintr:
   1768  *
   1769  *	Helper; handle transmit interrupts.
   1770  */
   1771 static void
   1772 wm_txintr(struct wm_softc *sc)
   1773 {
   1774 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1775 	struct wm_txsoft *txs;
   1776 	uint8_t status;
   1777 	int i;
   1778 
   1779 	ifp->if_flags &= ~IFF_OACTIVE;
   1780 
   1781 	/*
   1782 	 * Go through the Tx list and free mbufs for those
   1783 	 * frames which have been transmitted.
   1784 	 */
   1785 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
   1786 	     i = WM_NEXTTXS(i), sc->sc_txsfree++) {
   1787 		txs = &sc->sc_txsoft[i];
   1788 
   1789 		DPRINTF(WM_DEBUG_TX,
   1790 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
   1791 
   1792 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
   1793 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1794 
   1795 		status = le32toh(sc->sc_txdescs[
   1796 		    txs->txs_lastdesc].wtx_fields.wtxu_bits);
   1797 		if ((status & WTX_ST_DD) == 0) {
   1798 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   1799 			    BUS_DMASYNC_PREREAD);
   1800 			break;
   1801 		}
   1802 
   1803 		DPRINTF(WM_DEBUG_TX,
   1804 		    ("%s: TX: job %d done: descs %d..%d\n",
   1805 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
   1806 		    txs->txs_lastdesc));
   1807 
   1808 		/*
   1809 		 * XXX We should probably be using the statistics
   1810 		 * XXX registers, but I don't know if they exist
   1811 		 * XXX on chips before the i82544.
   1812 		 */
   1813 
   1814 #ifdef WM_EVENT_COUNTERS
   1815 		if (status & WTX_ST_TU)
   1816 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   1817 #endif /* WM_EVENT_COUNTERS */
   1818 
   1819 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   1820 			ifp->if_oerrors++;
   1821 			if (status & WTX_ST_LC)
   1822 				printf("%s: late collision\n",
   1823 				    sc->sc_dev.dv_xname);
   1824 			else if (status & WTX_ST_EC) {
   1825 				ifp->if_collisions += 16;
   1826 				printf("%s: excessive collisions\n",
   1827 				    sc->sc_dev.dv_xname);
   1828 			}
   1829 		} else
   1830 			ifp->if_opackets++;
   1831 
   1832 		sc->sc_txfree += txs->txs_ndesc;
   1833 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   1834 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1835 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1836 		m_freem(txs->txs_mbuf);
   1837 		txs->txs_mbuf = NULL;
   1838 	}
   1839 
   1840 	/* Update the dirty transmit buffer pointer. */
   1841 	sc->sc_txsdirty = i;
   1842 	DPRINTF(WM_DEBUG_TX,
   1843 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
   1844 
   1845 	/*
   1846 	 * If there are no more pending transmissions, cancel the watchdog
   1847 	 * timer.
   1848 	 */
   1849 	if (sc->sc_txsfree == WM_TXQUEUELEN)
   1850 		ifp->if_timer = 0;
   1851 }
   1852 
   1853 /*
   1854  * wm_rxintr:
   1855  *
   1856  *	Helper; handle receive interrupts.
   1857  */
   1858 static void
   1859 wm_rxintr(struct wm_softc *sc)
   1860 {
   1861 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1862 	struct wm_rxsoft *rxs;
   1863 	struct mbuf *m;
   1864 	int i, len;
   1865 	uint8_t status, errors;
   1866 
   1867 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   1868 		rxs = &sc->sc_rxsoft[i];
   1869 
   1870 		DPRINTF(WM_DEBUG_RX,
   1871 		    ("%s: RX: checking descriptor %d\n",
   1872 		    sc->sc_dev.dv_xname, i));
   1873 
   1874 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1875 
   1876 		status = sc->sc_rxdescs[i].wrx_status;
   1877 		errors = sc->sc_rxdescs[i].wrx_errors;
   1878 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   1879 
   1880 		if ((status & WRX_ST_DD) == 0) {
   1881 			/*
   1882 			 * We have processed all of the receive descriptors.
   1883 			 */
   1884 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   1885 			break;
   1886 		}
   1887 
   1888 		if (__predict_false(sc->sc_rxdiscard)) {
   1889 			DPRINTF(WM_DEBUG_RX,
   1890 			    ("%s: RX: discarding contents of descriptor %d\n",
   1891 			    sc->sc_dev.dv_xname, i));
   1892 			WM_INIT_RXDESC(sc, i);
   1893 			if (status & WRX_ST_EOP) {
   1894 				/* Reset our state. */
   1895 				DPRINTF(WM_DEBUG_RX,
   1896 				    ("%s: RX: resetting rxdiscard -> 0\n",
   1897 				    sc->sc_dev.dv_xname));
   1898 				sc->sc_rxdiscard = 0;
   1899 			}
   1900 			continue;
   1901 		}
   1902 
   1903 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1904 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1905 
   1906 		m = rxs->rxs_mbuf;
   1907 
   1908 		/*
   1909 		 * Add a new receive buffer to the ring.
   1910 		 */
   1911 		if (wm_add_rxbuf(sc, i) != 0) {
   1912 			/*
   1913 			 * Failed, throw away what we've done so
   1914 			 * far, and discard the rest of the packet.
   1915 			 */
   1916 			ifp->if_ierrors++;
   1917 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1918 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1919 			WM_INIT_RXDESC(sc, i);
   1920 			if ((status & WRX_ST_EOP) == 0)
   1921 				sc->sc_rxdiscard = 1;
   1922 			if (sc->sc_rxhead != NULL)
   1923 				m_freem(sc->sc_rxhead);
   1924 			WM_RXCHAIN_RESET(sc);
   1925 			DPRINTF(WM_DEBUG_RX,
   1926 			    ("%s: RX: Rx buffer allocation failed, "
   1927 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
   1928 			    sc->sc_rxdiscard ? " (discard)" : ""));
   1929 			continue;
   1930 		}
   1931 
   1932 		WM_RXCHAIN_LINK(sc, m);
   1933 
   1934 		m->m_len = len;
   1935 
   1936 		DPRINTF(WM_DEBUG_RX,
   1937 		    ("%s: RX: buffer at %p len %d\n",
   1938 		    sc->sc_dev.dv_xname, m->m_data, len));
   1939 
   1940 		/*
   1941 		 * If this is not the end of the packet, keep
   1942 		 * looking.
   1943 		 */
   1944 		if ((status & WRX_ST_EOP) == 0) {
   1945 			sc->sc_rxlen += len;
   1946 			DPRINTF(WM_DEBUG_RX,
   1947 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   1948 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
   1949 			continue;
   1950 		}
   1951 
   1952 		/*
   1953 		 * Okay, we have the entire packet now...
   1954 		 */
   1955 		*sc->sc_rxtailp = NULL;
   1956 		m = sc->sc_rxhead;
   1957 		len += sc->sc_rxlen;
   1958 
   1959 		WM_RXCHAIN_RESET(sc);
   1960 
   1961 		DPRINTF(WM_DEBUG_RX,
   1962 		    ("%s: RX: have entire packet, len -> %d\n",
   1963 		    sc->sc_dev.dv_xname, len));
   1964 
   1965 		/*
   1966 		 * If an error occurred, update stats and drop the packet.
   1967 		 */
   1968 		if (errors &
   1969 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   1970 			ifp->if_ierrors++;
   1971 			if (errors & WRX_ER_SE)
   1972 				printf("%s: symbol error\n",
   1973 				    sc->sc_dev.dv_xname);
   1974 			else if (errors & WRX_ER_SEQ)
   1975 				printf("%s: receive sequence error\n",
   1976 				    sc->sc_dev.dv_xname);
   1977 			else if (errors & WRX_ER_CE)
   1978 				printf("%s: CRC error\n",
   1979 				    sc->sc_dev.dv_xname);
   1980 			m_freem(m);
   1981 			continue;
   1982 		}
   1983 
   1984 		/*
   1985 		 * No errors.  Receive the packet.
   1986 		 *
   1987 		 * Note, we have configured the chip to include the
   1988 		 * CRC with every packet.
   1989 		 */
   1990 		m->m_flags |= M_HASFCS;
   1991 		m->m_pkthdr.rcvif = ifp;
   1992 		m->m_pkthdr.len = len;
   1993 
   1994 #if 0 /* XXXJRT */
   1995 		/*
   1996 		 * If VLANs are enabled, VLAN packets have been unwrapped
   1997 		 * for us.  Associate the tag with the packet.
   1998 		 */
   1999 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   2000 		    (status & WRX_ST_VP) != 0) {
   2001 			struct m_tag *vtag;
   2002 
   2003 			vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
   2004 			    M_NOWAIT);
   2005 			if (vtag == NULL) {
   2006 				ifp->if_ierrors++;
   2007 				printf("%s: unable to allocate VLAN tag\n",
   2008 				    sc->sc_dev.dv_xname);
   2009 				m_freem(m);
   2010 				continue;
   2011 			}
   2012 
   2013 			*(u_int *)(vtag + 1) =
   2014 			    le16toh(sc->sc_rxdescs[i].wrx_special);
   2015 		}
   2016 #endif /* XXXJRT */
   2017 
   2018 		/*
   2019 		 * Set up checksum info for this packet.
   2020 		 */
   2021 		if (status & WRX_ST_IPCS) {
   2022 			WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   2023 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   2024 			if (errors & WRX_ER_IPE)
   2025 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   2026 		}
   2027 		if (status & WRX_ST_TCPCS) {
   2028 			/*
   2029 			 * Note: we don't know if this was TCP or UDP,
   2030 			 * so we just set both bits, and expect the
   2031 			 * upper layers to deal.
   2032 			 */
   2033 			WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   2034 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
   2035 			if (errors & WRX_ER_TCPE)
   2036 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   2037 		}
   2038 
   2039 		ifp->if_ipackets++;
   2040 
   2041 #if NBPFILTER > 0
   2042 		/* Pass this up to any BPF listeners. */
   2043 		if (ifp->if_bpf)
   2044 			bpf_mtap(ifp->if_bpf, m);
   2045 #endif /* NBPFILTER > 0 */
   2046 
   2047 		/* Pass it on. */
   2048 		(*ifp->if_input)(ifp, m);
   2049 	}
   2050 
   2051 	/* Update the receive pointer. */
   2052 	sc->sc_rxptr = i;
   2053 
   2054 	DPRINTF(WM_DEBUG_RX,
   2055 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
   2056 }
   2057 
   2058 /*
   2059  * wm_linkintr:
   2060  *
   2061  *	Helper; handle link interrupts.
   2062  */
   2063 static void
   2064 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   2065 {
   2066 	uint32_t status;
   2067 
   2068 	/*
   2069 	 * If we get a link status interrupt on a 1000BASE-T
   2070 	 * device, just fall into the normal MII tick path.
   2071 	 */
   2072 	if (sc->sc_flags & WM_F_HAS_MII) {
   2073 		if (icr & ICR_LSC) {
   2074 			DPRINTF(WM_DEBUG_LINK,
   2075 			    ("%s: LINK: LSC -> mii_tick\n",
   2076 			    sc->sc_dev.dv_xname));
   2077 			mii_tick(&sc->sc_mii);
   2078 		} else if (icr & ICR_RXSEQ) {
   2079 			DPRINTF(WM_DEBUG_LINK,
   2080 			    ("%s: LINK Receive sequence error\n",
   2081 			    sc->sc_dev.dv_xname));
   2082 		}
   2083 		return;
   2084 	}
   2085 
   2086 	/*
   2087 	 * If we are now receiving /C/, check for link again in
   2088 	 * a couple of link clock ticks.
   2089 	 */
   2090 	if (icr & ICR_RXCFG) {
   2091 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   2092 		    sc->sc_dev.dv_xname));
   2093 		sc->sc_tbi_anstate = 2;
   2094 	}
   2095 
   2096 	if (icr & ICR_LSC) {
   2097 		status = CSR_READ(sc, WMREG_STATUS);
   2098 		if (status & STATUS_LU) {
   2099 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   2100 			    sc->sc_dev.dv_xname,
   2101 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   2102 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2103 			if (status & STATUS_FD)
   2104 				sc->sc_tctl |=
   2105 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2106 			else
   2107 				sc->sc_tctl |=
   2108 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2109 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2110 			sc->sc_tbi_linkup = 1;
   2111 		} else {
   2112 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   2113 			    sc->sc_dev.dv_xname));
   2114 			sc->sc_tbi_linkup = 0;
   2115 		}
   2116 		sc->sc_tbi_anstate = 2;
   2117 		wm_tbi_set_linkled(sc);
   2118 	} else if (icr & ICR_RXSEQ) {
   2119 		DPRINTF(WM_DEBUG_LINK,
   2120 		    ("%s: LINK: Receive sequence error\n",
   2121 		    sc->sc_dev.dv_xname));
   2122 	}
   2123 }
   2124 
   2125 /*
   2126  * wm_tick:
   2127  *
   2128  *	One second timer, used to check link status, sweep up
   2129  *	completed transmit jobs, etc.
   2130  */
   2131 static void
   2132 wm_tick(void *arg)
   2133 {
   2134 	struct wm_softc *sc = arg;
   2135 	int s;
   2136 
   2137 	s = splnet();
   2138 
   2139 	if (sc->sc_flags & WM_F_HAS_MII)
   2140 		mii_tick(&sc->sc_mii);
   2141 	else
   2142 		wm_tbi_check_link(sc);
   2143 
   2144 	splx(s);
   2145 
   2146 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2147 }
   2148 
   2149 /*
   2150  * wm_reset:
   2151  *
   2152  *	Reset the i82542 chip.
   2153  */
   2154 static void
   2155 wm_reset(struct wm_softc *sc)
   2156 {
   2157 	int i;
   2158 
   2159 	switch (sc->sc_type) {
   2160 	case WM_T_82544:
   2161 	case WM_T_82540:
   2162 	case WM_T_82545:
   2163 	case WM_T_82546:
   2164 	case WM_T_82541:
   2165 	case WM_T_82541_2:
   2166 		/*
   2167 		 * These chips have a problem with the memory-mapped
   2168 		 * write cycle when issuing the reset, so use I/O-mapped
   2169 		 * access, if possible.
   2170 		 */
   2171 		if (sc->sc_flags & WM_F_IOH_VALID)
   2172 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   2173 		else
   2174 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2175 		break;
   2176 
   2177 	case WM_T_82545_3:
   2178 	case WM_T_82546_3:
   2179 		/* Use the shadow control register on these chips. */
   2180 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   2181 		break;
   2182 
   2183 	default:
   2184 		/* Everything else can safely use the documented method. */
   2185 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   2186 		break;
   2187 	}
   2188 	delay(10000);
   2189 
   2190 	for (i = 0; i < 1000; i++) {
   2191 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
   2192 			return;
   2193 		delay(20);
   2194 	}
   2195 
   2196 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
   2197 		printf("%s: WARNING: reset failed to complete\n",
   2198 		    sc->sc_dev.dv_xname);
   2199 }
   2200 
   2201 /*
   2202  * wm_init:		[ifnet interface function]
   2203  *
   2204  *	Initialize the interface.  Must be called at splnet().
   2205  */
   2206 static int
   2207 wm_init(struct ifnet *ifp)
   2208 {
   2209 	struct wm_softc *sc = ifp->if_softc;
   2210 	struct wm_rxsoft *rxs;
   2211 	int i, error = 0;
   2212 	uint32_t reg;
   2213 
   2214 	/*
   2215 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   2216 	 * There is a small but measurable benefit to avoiding the adjusment
   2217 	 * of the descriptor so that the headers are aligned, for normal mtu,
   2218 	 * on such platforms.  One possibility is that the DMA itself is
   2219 	 * slightly more efficient if the front of the entire packet (instead
   2220 	 * of the front of the headers) is aligned.
   2221 	 *
   2222 	 * Note we must always set align_tweak to 0 if we are using
   2223 	 * jumbo frames.
   2224 	 */
   2225 #ifdef __NO_STRICT_ALIGNMENT
   2226 	sc->sc_align_tweak = 0;
   2227 #else
   2228 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   2229 		sc->sc_align_tweak = 0;
   2230 	else
   2231 		sc->sc_align_tweak = 2;
   2232 #endif /* __NO_STRICT_ALIGNMENT */
   2233 
   2234 	/* Cancel any pending I/O. */
   2235 	wm_stop(ifp, 0);
   2236 
   2237 	/* Reset the chip to a known state. */
   2238 	wm_reset(sc);
   2239 
   2240 	/* Initialize the transmit descriptor ring. */
   2241 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
   2242 	WM_CDTXSYNC(sc, 0, WM_NTXDESC,
   2243 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   2244 	sc->sc_txfree = WM_NTXDESC;
   2245 	sc->sc_txnext = 0;
   2246 
   2247 	sc->sc_txctx_ipcs = 0xffffffff;
   2248 	sc->sc_txctx_tucs = 0xffffffff;
   2249 
   2250 	if (sc->sc_type < WM_T_82543) {
   2251 		CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
   2252 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
   2253 		CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
   2254 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   2255 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   2256 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   2257 	} else {
   2258 		CSR_WRITE(sc, WMREG_TBDAH, 0);
   2259 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
   2260 		CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
   2261 		CSR_WRITE(sc, WMREG_TDH, 0);
   2262 		CSR_WRITE(sc, WMREG_TDT, 0);
   2263 		CSR_WRITE(sc, WMREG_TIDV, 128);
   2264 
   2265 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   2266 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   2267 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   2268 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   2269 	}
   2270 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   2271 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   2272 
   2273 	/* Initialize the transmit job descriptors. */
   2274 	for (i = 0; i < WM_TXQUEUELEN; i++)
   2275 		sc->sc_txsoft[i].txs_mbuf = NULL;
   2276 	sc->sc_txsfree = WM_TXQUEUELEN;
   2277 	sc->sc_txsnext = 0;
   2278 	sc->sc_txsdirty = 0;
   2279 
   2280 	/*
   2281 	 * Initialize the receive descriptor and receive job
   2282 	 * descriptor rings.
   2283 	 */
   2284 	if (sc->sc_type < WM_T_82543) {
   2285 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
   2286 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
   2287 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   2288 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   2289 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   2290 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   2291 
   2292 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   2293 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   2294 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   2295 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   2296 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   2297 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   2298 	} else {
   2299 		CSR_WRITE(sc, WMREG_RDBAH, 0);
   2300 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
   2301 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   2302 		CSR_WRITE(sc, WMREG_RDH, 0);
   2303 		CSR_WRITE(sc, WMREG_RDT, 0);
   2304 		CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
   2305 	}
   2306 	for (i = 0; i < WM_NRXDESC; i++) {
   2307 		rxs = &sc->sc_rxsoft[i];
   2308 		if (rxs->rxs_mbuf == NULL) {
   2309 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   2310 				printf("%s: unable to allocate or map rx "
   2311 				    "buffer %d, error = %d\n",
   2312 				    sc->sc_dev.dv_xname, i, error);
   2313 				/*
   2314 				 * XXX Should attempt to run with fewer receive
   2315 				 * XXX buffers instead of just failing.
   2316 				 */
   2317 				wm_rxdrain(sc);
   2318 				goto out;
   2319 			}
   2320 		} else
   2321 			WM_INIT_RXDESC(sc, i);
   2322 	}
   2323 	sc->sc_rxptr = 0;
   2324 	sc->sc_rxdiscard = 0;
   2325 	WM_RXCHAIN_RESET(sc);
   2326 
   2327 	/*
   2328 	 * Clear out the VLAN table -- we don't use it (yet).
   2329 	 */
   2330 	CSR_WRITE(sc, WMREG_VET, 0);
   2331 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   2332 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   2333 
   2334 	/*
   2335 	 * Set up flow-control parameters.
   2336 	 *
   2337 	 * XXX Values could probably stand some tuning.
   2338 	 */
   2339 	if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
   2340 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   2341 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   2342 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   2343 
   2344 		if (sc->sc_type < WM_T_82543) {
   2345 			CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   2346 			CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
   2347 		} else {
   2348 			CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   2349 			CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
   2350 		}
   2351 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   2352 	}
   2353 
   2354 #if 0 /* XXXJRT */
   2355 	/* Deal with VLAN enables. */
   2356 	if (sc->sc_ethercom.ec_nvlans != 0)
   2357 		sc->sc_ctrl |= CTRL_VME;
   2358 	else
   2359 #endif /* XXXJRT */
   2360 		sc->sc_ctrl &= ~CTRL_VME;
   2361 
   2362 	/* Write the control registers. */
   2363 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2364 #if 0
   2365 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2366 #endif
   2367 
   2368 	/*
   2369 	 * Set up checksum offload parameters.
   2370 	 */
   2371 	reg = CSR_READ(sc, WMREG_RXCSUM);
   2372 	if (ifp->if_capenable & IFCAP_CSUM_IPv4)
   2373 		reg |= RXCSUM_IPOFL;
   2374 	else
   2375 		reg &= ~RXCSUM_IPOFL;
   2376 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
   2377 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   2378 	else {
   2379 		reg &= ~RXCSUM_TUOFL;
   2380 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
   2381 			reg &= ~RXCSUM_IPOFL;
   2382 	}
   2383 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   2384 
   2385 	/*
   2386 	 * Set up the interrupt registers.
   2387 	 */
   2388 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   2389 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   2390 	    ICR_RXO | ICR_RXT0;
   2391 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   2392 		sc->sc_icr |= ICR_RXCFG;
   2393 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   2394 
   2395 	/* Set up the inter-packet gap. */
   2396 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   2397 
   2398 #if 0 /* XXXJRT */
   2399 	/* Set the VLAN ethernetype. */
   2400 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   2401 #endif
   2402 
   2403 	/*
   2404 	 * Set up the transmit control register; we start out with
   2405 	 * a collision distance suitable for FDX, but update it whe
   2406 	 * we resolve the media type.
   2407 	 */
   2408 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
   2409 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2410 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2411 
   2412 	/* Set the media. */
   2413 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
   2414 
   2415 	/*
   2416 	 * Set up the receive control register; we actually program
   2417 	 * the register when we set the receive filter.  Use multicast
   2418 	 * address offset type 0.
   2419 	 *
   2420 	 * Only the i82544 has the ability to strip the incoming
   2421 	 * CRC, so we don't enable that feature.
   2422 	 */
   2423 	sc->sc_mchash_type = 0;
   2424 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
   2425 	    RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
   2426 
   2427 	if(MCLBYTES == 2048) {
   2428 		sc->sc_rctl |= RCTL_2k;
   2429 	} else {
   2430 	/*
   2431 	 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
   2432 	 * XXX segments, dropping" -- why?
   2433 	 */
   2434 #if 0
   2435 		if(sc->sc_type >= WM_T_82543) {
   2436 			switch(MCLBYTES) {
   2437 			case 4096:
   2438 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   2439 				break;
   2440 			case 8192:
   2441 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   2442 				break;
   2443 			case 16384:
   2444 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   2445 				break;
   2446 			default:
   2447 				panic("wm_init: MCLBYTES %d unsupported",
   2448 				    MCLBYTES);
   2449 				break;
   2450 			}
   2451 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   2452 #else
   2453 		panic("wm_init: MCLBYTES > 2048 not supported.");
   2454 #endif
   2455 	}
   2456 
   2457 	/* Set the receive filter. */
   2458 	wm_set_filter(sc);
   2459 
   2460 	/* Start the one second link check clock. */
   2461 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2462 
   2463 	/* ...all done! */
   2464 	ifp->if_flags |= IFF_RUNNING;
   2465 	ifp->if_flags &= ~IFF_OACTIVE;
   2466 
   2467  out:
   2468 	if (error)
   2469 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
   2470 	return (error);
   2471 }
   2472 
   2473 /*
   2474  * wm_rxdrain:
   2475  *
   2476  *	Drain the receive queue.
   2477  */
   2478 static void
   2479 wm_rxdrain(struct wm_softc *sc)
   2480 {
   2481 	struct wm_rxsoft *rxs;
   2482 	int i;
   2483 
   2484 	for (i = 0; i < WM_NRXDESC; i++) {
   2485 		rxs = &sc->sc_rxsoft[i];
   2486 		if (rxs->rxs_mbuf != NULL) {
   2487 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2488 			m_freem(rxs->rxs_mbuf);
   2489 			rxs->rxs_mbuf = NULL;
   2490 		}
   2491 	}
   2492 }
   2493 
   2494 /*
   2495  * wm_stop:		[ifnet interface function]
   2496  *
   2497  *	Stop transmission on the interface.
   2498  */
   2499 static void
   2500 wm_stop(struct ifnet *ifp, int disable)
   2501 {
   2502 	struct wm_softc *sc = ifp->if_softc;
   2503 	struct wm_txsoft *txs;
   2504 	int i;
   2505 
   2506 	/* Stop the one second clock. */
   2507 	callout_stop(&sc->sc_tick_ch);
   2508 
   2509 	if (sc->sc_flags & WM_F_HAS_MII) {
   2510 		/* Down the MII. */
   2511 		mii_down(&sc->sc_mii);
   2512 	}
   2513 
   2514 	/* Stop the transmit and receive processes. */
   2515 	CSR_WRITE(sc, WMREG_TCTL, 0);
   2516 	CSR_WRITE(sc, WMREG_RCTL, 0);
   2517 
   2518 	/* Release any queued transmit buffers. */
   2519 	for (i = 0; i < WM_TXQUEUELEN; i++) {
   2520 		txs = &sc->sc_txsoft[i];
   2521 		if (txs->txs_mbuf != NULL) {
   2522 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   2523 			m_freem(txs->txs_mbuf);
   2524 			txs->txs_mbuf = NULL;
   2525 		}
   2526 	}
   2527 
   2528 	if (disable)
   2529 		wm_rxdrain(sc);
   2530 
   2531 	/* Mark the interface as down and cancel the watchdog timer. */
   2532 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2533 	ifp->if_timer = 0;
   2534 }
   2535 
   2536 /*
   2537  * wm_acquire_eeprom:
   2538  *
   2539  *	Perform the EEPROM handshake required on some chips.
   2540  */
   2541 static int
   2542 wm_acquire_eeprom(struct wm_softc *sc)
   2543 {
   2544 	uint32_t reg;
   2545 	int x;
   2546 
   2547 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
   2548 		reg = CSR_READ(sc, WMREG_EECD);
   2549 
   2550 		/* Request EEPROM access. */
   2551 		reg |= EECD_EE_REQ;
   2552 		CSR_WRITE(sc, WMREG_EECD, reg);
   2553 
   2554 		/* ..and wait for it to be granted. */
   2555 		for (x = 0; x < 100; x++) {
   2556 			reg = CSR_READ(sc, WMREG_EECD);
   2557 			if (reg & EECD_EE_GNT)
   2558 				break;
   2559 			delay(5);
   2560 		}
   2561 		if ((reg & EECD_EE_GNT) == 0) {
   2562 			aprint_error("%s: could not acquire EEPROM GNT\n",
   2563 			    sc->sc_dev.dv_xname);
   2564 			reg &= ~EECD_EE_REQ;
   2565 			CSR_WRITE(sc, WMREG_EECD, reg);
   2566 			return (1);
   2567 		}
   2568 	}
   2569 
   2570 	return (0);
   2571 }
   2572 
   2573 /*
   2574  * wm_release_eeprom:
   2575  *
   2576  *	Release the EEPROM mutex.
   2577  */
   2578 static void
   2579 wm_release_eeprom(struct wm_softc *sc)
   2580 {
   2581 	uint32_t reg;
   2582 
   2583 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
   2584 		reg = CSR_READ(sc, WMREG_EECD);
   2585 		reg &= ~EECD_EE_REQ;
   2586 		CSR_WRITE(sc, WMREG_EECD, reg);
   2587 	}
   2588 }
   2589 
   2590 /*
   2591  * wm_eeprom_sendbits:
   2592  *
   2593  *	Send a series of bits to the EEPROM.
   2594  */
   2595 static void
   2596 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   2597 {
   2598 	uint32_t reg;
   2599 	int x;
   2600 
   2601 	reg = CSR_READ(sc, WMREG_EECD);
   2602 
   2603 	for (x = nbits; x > 0; x--) {
   2604 		if (bits & (1U << (x - 1)))
   2605 			reg |= EECD_DI;
   2606 		else
   2607 			reg &= ~EECD_DI;
   2608 		CSR_WRITE(sc, WMREG_EECD, reg);
   2609 		delay(2);
   2610 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2611 		delay(2);
   2612 		CSR_WRITE(sc, WMREG_EECD, reg);
   2613 		delay(2);
   2614 	}
   2615 }
   2616 
   2617 /*
   2618  * wm_eeprom_recvbits:
   2619  *
   2620  *	Receive a series of bits from the EEPROM.
   2621  */
   2622 static void
   2623 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   2624 {
   2625 	uint32_t reg, val;
   2626 	int x;
   2627 
   2628 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   2629 
   2630 	val = 0;
   2631 	for (x = nbits; x > 0; x--) {
   2632 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2633 		delay(2);
   2634 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   2635 			val |= (1U << (x - 1));
   2636 		CSR_WRITE(sc, WMREG_EECD, reg);
   2637 		delay(2);
   2638 	}
   2639 	*valp = val;
   2640 }
   2641 
   2642 /*
   2643  * wm_read_eeprom_uwire:
   2644  *
   2645  *	Read a word from the EEPROM using the MicroWire protocol.
   2646  */
   2647 static int
   2648 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2649 {
   2650 	uint32_t reg, val;
   2651 	int i;
   2652 
   2653 	for (i = 0; i < wordcnt; i++) {
   2654 		/* Clear SK and DI. */
   2655 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   2656 		CSR_WRITE(sc, WMREG_EECD, reg);
   2657 
   2658 		/* Set CHIP SELECT. */
   2659 		reg |= EECD_CS;
   2660 		CSR_WRITE(sc, WMREG_EECD, reg);
   2661 		delay(2);
   2662 
   2663 		/* Shift in the READ command. */
   2664 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   2665 
   2666 		/* Shift in address. */
   2667 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
   2668 
   2669 		/* Shift out the data. */
   2670 		wm_eeprom_recvbits(sc, &val, 16);
   2671 		data[i] = val & 0xffff;
   2672 
   2673 		/* Clear CHIP SELECT. */
   2674 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   2675 		CSR_WRITE(sc, WMREG_EECD, reg);
   2676 		delay(2);
   2677 	}
   2678 
   2679 	return (0);
   2680 }
   2681 
   2682 /*
   2683  * wm_spi_eeprom_ready:
   2684  *
   2685  *	Wait for a SPI EEPROM to be ready for commands.
   2686  */
   2687 static int
   2688 wm_spi_eeprom_ready(struct wm_softc *sc)
   2689 {
   2690 	uint32_t val;
   2691 	int usec;
   2692 
   2693 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   2694 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   2695 		wm_eeprom_recvbits(sc, &val, 8);
   2696 		if ((val & SPI_SR_RDY) == 0)
   2697 			break;
   2698 	}
   2699 	if (usec >= SPI_MAX_RETRIES) {
   2700 		aprint_error("%s: EEPROM failed to become ready\n",
   2701 		    sc->sc_dev.dv_xname);
   2702 		return (1);
   2703 	}
   2704 	return (0);
   2705 }
   2706 
   2707 /*
   2708  * wm_read_eeprom_spi:
   2709  *
   2710  *	Read a work from the EEPROM using the SPI protocol.
   2711  */
   2712 static int
   2713 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2714 {
   2715 	uint32_t reg, val;
   2716 	int i;
   2717 	uint8_t opc;
   2718 
   2719 	/* Clear SK and CS. */
   2720 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   2721 	CSR_WRITE(sc, WMREG_EECD, reg);
   2722 	delay(2);
   2723 
   2724 	if (wm_spi_eeprom_ready(sc))
   2725 		return (1);
   2726 
   2727 	/* Toggle CS to flush commands. */
   2728 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   2729 	delay(2);
   2730 	CSR_WRITE(sc, WMREG_EECD, reg);
   2731 	delay(2);
   2732 
   2733 	opc = SPI_OPC_READ;
   2734 	if (sc->sc_ee_addrbits == 8 && word >= 128)
   2735 		opc |= SPI_OPC_A8;
   2736 
   2737 	wm_eeprom_sendbits(sc, opc, 8);
   2738 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
   2739 
   2740 	for (i = 0; i < wordcnt; i++) {
   2741 		wm_eeprom_recvbits(sc, &val, 16);
   2742 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   2743 	}
   2744 
   2745 	/* Raise CS and clear SK. */
   2746 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   2747 	CSR_WRITE(sc, WMREG_EECD, reg);
   2748 	delay(2);
   2749 
   2750 	return (0);
   2751 }
   2752 
   2753 /*
   2754  * wm_read_eeprom:
   2755  *
   2756  *	Read data from the serial EEPROM.
   2757  */
   2758 static int
   2759 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2760 {
   2761 	int rv;
   2762 
   2763 	if (wm_acquire_eeprom(sc))
   2764 		return (1);
   2765 
   2766 	if (sc->sc_flags & WM_F_EEPROM_SPI)
   2767 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
   2768 	else
   2769 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
   2770 
   2771 	wm_release_eeprom(sc);
   2772 	return (rv);
   2773 }
   2774 
   2775 /*
   2776  * wm_add_rxbuf:
   2777  *
   2778  *	Add a receive buffer to the indiciated descriptor.
   2779  */
   2780 static int
   2781 wm_add_rxbuf(struct wm_softc *sc, int idx)
   2782 {
   2783 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   2784 	struct mbuf *m;
   2785 	int error;
   2786 
   2787 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   2788 	if (m == NULL)
   2789 		return (ENOBUFS);
   2790 
   2791 	MCLGET(m, M_DONTWAIT);
   2792 	if ((m->m_flags & M_EXT) == 0) {
   2793 		m_freem(m);
   2794 		return (ENOBUFS);
   2795 	}
   2796 
   2797 	if (rxs->rxs_mbuf != NULL)
   2798 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2799 
   2800 	rxs->rxs_mbuf = m;
   2801 
   2802 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   2803 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   2804 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   2805 	if (error) {
   2806 		printf("%s: unable to load rx DMA map %d, error = %d\n",
   2807 		    sc->sc_dev.dv_xname, idx, error);
   2808 		panic("wm_add_rxbuf");	/* XXX XXX XXX */
   2809 	}
   2810 
   2811 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   2812 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   2813 
   2814 	WM_INIT_RXDESC(sc, idx);
   2815 
   2816 	return (0);
   2817 }
   2818 
   2819 /*
   2820  * wm_set_ral:
   2821  *
   2822  *	Set an entery in the receive address list.
   2823  */
   2824 static void
   2825 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2826 {
   2827 	uint32_t ral_lo, ral_hi;
   2828 
   2829 	if (enaddr != NULL) {
   2830 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2831 		    (enaddr[3] << 24);
   2832 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2833 		ral_hi |= RAL_AV;
   2834 	} else {
   2835 		ral_lo = 0;
   2836 		ral_hi = 0;
   2837 	}
   2838 
   2839 	if (sc->sc_type >= WM_T_82544) {
   2840 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2841 		    ral_lo);
   2842 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2843 		    ral_hi);
   2844 	} else {
   2845 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2846 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2847 	}
   2848 }
   2849 
   2850 /*
   2851  * wm_mchash:
   2852  *
   2853  *	Compute the hash of the multicast address for the 4096-bit
   2854  *	multicast filter.
   2855  */
   2856 static uint32_t
   2857 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2858 {
   2859 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2860 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2861 	uint32_t hash;
   2862 
   2863 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   2864 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   2865 
   2866 	return (hash & 0xfff);
   2867 }
   2868 
   2869 /*
   2870  * wm_set_filter:
   2871  *
   2872  *	Set up the receive filter.
   2873  */
   2874 static void
   2875 wm_set_filter(struct wm_softc *sc)
   2876 {
   2877 	struct ethercom *ec = &sc->sc_ethercom;
   2878 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2879 	struct ether_multi *enm;
   2880 	struct ether_multistep step;
   2881 	bus_addr_t mta_reg;
   2882 	uint32_t hash, reg, bit;
   2883 	int i;
   2884 
   2885 	if (sc->sc_type >= WM_T_82544)
   2886 		mta_reg = WMREG_CORDOVA_MTA;
   2887 	else
   2888 		mta_reg = WMREG_MTA;
   2889 
   2890 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   2891 
   2892 	if (ifp->if_flags & IFF_BROADCAST)
   2893 		sc->sc_rctl |= RCTL_BAM;
   2894 	if (ifp->if_flags & IFF_PROMISC) {
   2895 		sc->sc_rctl |= RCTL_UPE;
   2896 		goto allmulti;
   2897 	}
   2898 
   2899 	/*
   2900 	 * Set the station address in the first RAL slot, and
   2901 	 * clear the remaining slots.
   2902 	 */
   2903 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
   2904 	for (i = 1; i < WM_RAL_TABSIZE; i++)
   2905 		wm_set_ral(sc, NULL, i);
   2906 
   2907 	/* Clear out the multicast table. */
   2908 	for (i = 0; i < WM_MC_TABSIZE; i++)
   2909 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   2910 
   2911 	ETHER_FIRST_MULTI(step, ec, enm);
   2912 	while (enm != NULL) {
   2913 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2914 			/*
   2915 			 * We must listen to a range of multicast addresses.
   2916 			 * For now, just accept all multicasts, rather than
   2917 			 * trying to set only those filter bits needed to match
   2918 			 * the range.  (At this time, the only use of address
   2919 			 * ranges is for IP multicast routing, for which the
   2920 			 * range is big enough to require all bits set.)
   2921 			 */
   2922 			goto allmulti;
   2923 		}
   2924 
   2925 		hash = wm_mchash(sc, enm->enm_addrlo);
   2926 
   2927 		reg = (hash >> 5) & 0x7f;
   2928 		bit = hash & 0x1f;
   2929 
   2930 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   2931 		hash |= 1U << bit;
   2932 
   2933 		/* XXX Hardware bug?? */
   2934 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   2935 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   2936 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2937 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   2938 		} else
   2939 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2940 
   2941 		ETHER_NEXT_MULTI(step, enm);
   2942 	}
   2943 
   2944 	ifp->if_flags &= ~IFF_ALLMULTI;
   2945 	goto setit;
   2946 
   2947  allmulti:
   2948 	ifp->if_flags |= IFF_ALLMULTI;
   2949 	sc->sc_rctl |= RCTL_MPE;
   2950 
   2951  setit:
   2952 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   2953 }
   2954 
   2955 /*
   2956  * wm_tbi_mediainit:
   2957  *
   2958  *	Initialize media for use on 1000BASE-X devices.
   2959  */
   2960 static void
   2961 wm_tbi_mediainit(struct wm_softc *sc)
   2962 {
   2963 	const char *sep = "";
   2964 
   2965 	if (sc->sc_type < WM_T_82543)
   2966 		sc->sc_tipg = TIPG_WM_DFLT;
   2967 	else
   2968 		sc->sc_tipg = TIPG_LG_DFLT;
   2969 
   2970 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
   2971 	    wm_tbi_mediastatus);
   2972 
   2973 	/*
   2974 	 * SWD Pins:
   2975 	 *
   2976 	 *	0 = Link LED (output)
   2977 	 *	1 = Loss Of Signal (input)
   2978 	 */
   2979 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   2980 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   2981 
   2982 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2983 
   2984 #define	ADD(ss, mm, dd)							\
   2985 do {									\
   2986 	printf("%s%s", sep, ss);					\
   2987 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   2988 	sep = ", ";							\
   2989 } while (/*CONSTCOND*/0)
   2990 
   2991 	printf("%s: ", sc->sc_dev.dv_xname);
   2992 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   2993 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   2994 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   2995 	printf("\n");
   2996 
   2997 #undef ADD
   2998 
   2999 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   3000 }
   3001 
   3002 /*
   3003  * wm_tbi_mediastatus:	[ifmedia interface function]
   3004  *
   3005  *	Get the current interface media status on a 1000BASE-X device.
   3006  */
   3007 static void
   3008 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   3009 {
   3010 	struct wm_softc *sc = ifp->if_softc;
   3011 
   3012 	ifmr->ifm_status = IFM_AVALID;
   3013 	ifmr->ifm_active = IFM_ETHER;
   3014 
   3015 	if (sc->sc_tbi_linkup == 0) {
   3016 		ifmr->ifm_active |= IFM_NONE;
   3017 		return;
   3018 	}
   3019 
   3020 	ifmr->ifm_status |= IFM_ACTIVE;
   3021 	ifmr->ifm_active |= IFM_1000_SX;
   3022 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   3023 		ifmr->ifm_active |= IFM_FDX;
   3024 }
   3025 
   3026 /*
   3027  * wm_tbi_mediachange:	[ifmedia interface function]
   3028  *
   3029  *	Set hardware to newly-selected media on a 1000BASE-X device.
   3030  */
   3031 static int
   3032 wm_tbi_mediachange(struct ifnet *ifp)
   3033 {
   3034 	struct wm_softc *sc = ifp->if_softc;
   3035 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   3036 	uint32_t status;
   3037 	int i;
   3038 
   3039 	sc->sc_txcw = ife->ifm_data;
   3040 	if (sc->sc_ctrl & CTRL_RFCE)
   3041 		sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
   3042 	if (sc->sc_ctrl & CTRL_TFCE)
   3043 		sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
   3044 	sc->sc_txcw |= TXCW_ANE;
   3045 
   3046 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   3047 	delay(10000);
   3048 
   3049 	sc->sc_tbi_anstate = 0;
   3050 
   3051 	if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
   3052 		/* Have signal; wait for the link to come up. */
   3053 		for (i = 0; i < 50; i++) {
   3054 			delay(10000);
   3055 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   3056 				break;
   3057 		}
   3058 
   3059 		status = CSR_READ(sc, WMREG_STATUS);
   3060 		if (status & STATUS_LU) {
   3061 			/* Link is up. */
   3062 			DPRINTF(WM_DEBUG_LINK,
   3063 			    ("%s: LINK: set media -> link up %s\n",
   3064 			    sc->sc_dev.dv_xname,
   3065 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   3066 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3067 			if (status & STATUS_FD)
   3068 				sc->sc_tctl |=
   3069 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3070 			else
   3071 				sc->sc_tctl |=
   3072 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3073 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3074 			sc->sc_tbi_linkup = 1;
   3075 		} else {
   3076 			/* Link is down. */
   3077 			DPRINTF(WM_DEBUG_LINK,
   3078 			    ("%s: LINK: set media -> link down\n",
   3079 			    sc->sc_dev.dv_xname));
   3080 			sc->sc_tbi_linkup = 0;
   3081 		}
   3082 	} else {
   3083 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   3084 		    sc->sc_dev.dv_xname));
   3085 		sc->sc_tbi_linkup = 0;
   3086 	}
   3087 
   3088 	wm_tbi_set_linkled(sc);
   3089 
   3090 	return (0);
   3091 }
   3092 
   3093 /*
   3094  * wm_tbi_set_linkled:
   3095  *
   3096  *	Update the link LED on 1000BASE-X devices.
   3097  */
   3098 static void
   3099 wm_tbi_set_linkled(struct wm_softc *sc)
   3100 {
   3101 
   3102 	if (sc->sc_tbi_linkup)
   3103 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   3104 	else
   3105 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   3106 
   3107 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3108 }
   3109 
   3110 /*
   3111  * wm_tbi_check_link:
   3112  *
   3113  *	Check the link on 1000BASE-X devices.
   3114  */
   3115 static void
   3116 wm_tbi_check_link(struct wm_softc *sc)
   3117 {
   3118 	uint32_t rxcw, ctrl, status;
   3119 
   3120 	if (sc->sc_tbi_anstate == 0)
   3121 		return;
   3122 	else if (sc->sc_tbi_anstate > 1) {
   3123 		DPRINTF(WM_DEBUG_LINK,
   3124 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
   3125 		    sc->sc_tbi_anstate));
   3126 		sc->sc_tbi_anstate--;
   3127 		return;
   3128 	}
   3129 
   3130 	sc->sc_tbi_anstate = 0;
   3131 
   3132 	rxcw = CSR_READ(sc, WMREG_RXCW);
   3133 	ctrl = CSR_READ(sc, WMREG_CTRL);
   3134 	status = CSR_READ(sc, WMREG_STATUS);
   3135 
   3136 	if ((status & STATUS_LU) == 0) {
   3137 		DPRINTF(WM_DEBUG_LINK,
   3138 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
   3139 		sc->sc_tbi_linkup = 0;
   3140 	} else {
   3141 		DPRINTF(WM_DEBUG_LINK,
   3142 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
   3143 		    (status & STATUS_FD) ? "FDX" : "HDX"));
   3144 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3145 		if (status & STATUS_FD)
   3146 			sc->sc_tctl |=
   3147 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3148 		else
   3149 			sc->sc_tctl |=
   3150 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3151 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3152 		sc->sc_tbi_linkup = 1;
   3153 	}
   3154 
   3155 	wm_tbi_set_linkled(sc);
   3156 }
   3157 
   3158 /*
   3159  * wm_gmii_reset:
   3160  *
   3161  *	Reset the PHY.
   3162  */
   3163 static void
   3164 wm_gmii_reset(struct wm_softc *sc)
   3165 {
   3166 	uint32_t reg;
   3167 
   3168 	if (sc->sc_type >= WM_T_82544) {
   3169 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   3170 		delay(20000);
   3171 
   3172 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3173 		delay(20000);
   3174 	} else {
   3175 		/* The PHY reset pin is active-low. */
   3176 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3177 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   3178 		    CTRL_EXT_SWDPIN(4));
   3179 		reg |= CTRL_EXT_SWDPIO(4);
   3180 
   3181 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   3182 		delay(10);
   3183 
   3184 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3185 		delay(10);
   3186 
   3187 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   3188 		delay(10);
   3189 #if 0
   3190 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   3191 #endif
   3192 	}
   3193 }
   3194 
   3195 /*
   3196  * wm_gmii_mediainit:
   3197  *
   3198  *	Initialize media for use on 1000BASE-T devices.
   3199  */
   3200 static void
   3201 wm_gmii_mediainit(struct wm_softc *sc)
   3202 {
   3203 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3204 
   3205 	/* We have MII. */
   3206 	sc->sc_flags |= WM_F_HAS_MII;
   3207 
   3208 	sc->sc_tipg = TIPG_1000T_DFLT;
   3209 
   3210 	/*
   3211 	 * Let the chip set speed/duplex on its own based on
   3212 	 * signals from the PHY.
   3213 	 */
   3214 	sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
   3215 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3216 
   3217 	/* Initialize our media structures and probe the GMII. */
   3218 	sc->sc_mii.mii_ifp = ifp;
   3219 
   3220 	if (sc->sc_type >= WM_T_82544) {
   3221 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
   3222 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
   3223 	} else {
   3224 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
   3225 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
   3226 	}
   3227 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
   3228 
   3229 	wm_gmii_reset(sc);
   3230 
   3231 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
   3232 	    wm_gmii_mediastatus);
   3233 
   3234 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   3235 	    MII_OFFSET_ANY, 0);
   3236 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
   3237 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   3238 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
   3239 	} else
   3240 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   3241 }
   3242 
   3243 /*
   3244  * wm_gmii_mediastatus:	[ifmedia interface function]
   3245  *
   3246  *	Get the current interface media status on a 1000BASE-T device.
   3247  */
   3248 static void
   3249 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   3250 {
   3251 	struct wm_softc *sc = ifp->if_softc;
   3252 
   3253 	mii_pollstat(&sc->sc_mii);
   3254 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   3255 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   3256 }
   3257 
   3258 /*
   3259  * wm_gmii_mediachange:	[ifmedia interface function]
   3260  *
   3261  *	Set hardware to newly-selected media on a 1000BASE-T device.
   3262  */
   3263 static int
   3264 wm_gmii_mediachange(struct ifnet *ifp)
   3265 {
   3266 	struct wm_softc *sc = ifp->if_softc;
   3267 
   3268 	if (ifp->if_flags & IFF_UP)
   3269 		mii_mediachg(&sc->sc_mii);
   3270 	return (0);
   3271 }
   3272 
   3273 #define	MDI_IO		CTRL_SWDPIN(2)
   3274 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   3275 #define	MDI_CLK		CTRL_SWDPIN(3)
   3276 
   3277 static void
   3278 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   3279 {
   3280 	uint32_t i, v;
   3281 
   3282 	v = CSR_READ(sc, WMREG_CTRL);
   3283 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   3284 	v |= MDI_DIR | CTRL_SWDPIO(3);
   3285 
   3286 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   3287 		if (data & i)
   3288 			v |= MDI_IO;
   3289 		else
   3290 			v &= ~MDI_IO;
   3291 		CSR_WRITE(sc, WMREG_CTRL, v);
   3292 		delay(10);
   3293 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3294 		delay(10);
   3295 		CSR_WRITE(sc, WMREG_CTRL, v);
   3296 		delay(10);
   3297 	}
   3298 }
   3299 
   3300 static uint32_t
   3301 i82543_mii_recvbits(struct wm_softc *sc)
   3302 {
   3303 	uint32_t v, i, data = 0;
   3304 
   3305 	v = CSR_READ(sc, WMREG_CTRL);
   3306 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   3307 	v |= CTRL_SWDPIO(3);
   3308 
   3309 	CSR_WRITE(sc, WMREG_CTRL, v);
   3310 	delay(10);
   3311 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3312 	delay(10);
   3313 	CSR_WRITE(sc, WMREG_CTRL, v);
   3314 	delay(10);
   3315 
   3316 	for (i = 0; i < 16; i++) {
   3317 		data <<= 1;
   3318 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3319 		delay(10);
   3320 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   3321 			data |= 1;
   3322 		CSR_WRITE(sc, WMREG_CTRL, v);
   3323 		delay(10);
   3324 	}
   3325 
   3326 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   3327 	delay(10);
   3328 	CSR_WRITE(sc, WMREG_CTRL, v);
   3329 	delay(10);
   3330 
   3331 	return (data);
   3332 }
   3333 
   3334 #undef MDI_IO
   3335 #undef MDI_DIR
   3336 #undef MDI_CLK
   3337 
   3338 /*
   3339  * wm_gmii_i82543_readreg:	[mii interface function]
   3340  *
   3341  *	Read a PHY register on the GMII (i82543 version).
   3342  */
   3343 static int
   3344 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
   3345 {
   3346 	struct wm_softc *sc = (void *) self;
   3347 	int rv;
   3348 
   3349 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   3350 	i82543_mii_sendbits(sc, reg | (phy << 5) |
   3351 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   3352 	rv = i82543_mii_recvbits(sc) & 0xffff;
   3353 
   3354 	DPRINTF(WM_DEBUG_GMII,
   3355 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   3356 	    sc->sc_dev.dv_xname, phy, reg, rv));
   3357 
   3358 	return (rv);
   3359 }
   3360 
   3361 /*
   3362  * wm_gmii_i82543_writereg:	[mii interface function]
   3363  *
   3364  *	Write a PHY register on the GMII (i82543 version).
   3365  */
   3366 static void
   3367 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
   3368 {
   3369 	struct wm_softc *sc = (void *) self;
   3370 
   3371 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   3372 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   3373 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   3374 	    (MII_COMMAND_START << 30), 32);
   3375 }
   3376 
   3377 /*
   3378  * wm_gmii_i82544_readreg:	[mii interface function]
   3379  *
   3380  *	Read a PHY register on the GMII.
   3381  */
   3382 static int
   3383 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
   3384 {
   3385 	struct wm_softc *sc = (void *) self;
   3386 	uint32_t mdic = 0;
   3387 	int i, rv;
   3388 
   3389 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   3390 	    MDIC_REGADD(reg));
   3391 
   3392 	for (i = 0; i < 100; i++) {
   3393 		mdic = CSR_READ(sc, WMREG_MDIC);
   3394 		if (mdic & MDIC_READY)
   3395 			break;
   3396 		delay(10);
   3397 	}
   3398 
   3399 	if ((mdic & MDIC_READY) == 0) {
   3400 		printf("%s: MDIC read timed out: phy %d reg %d\n",
   3401 		    sc->sc_dev.dv_xname, phy, reg);
   3402 		rv = 0;
   3403 	} else if (mdic & MDIC_E) {
   3404 #if 0 /* This is normal if no PHY is present. */
   3405 		printf("%s: MDIC read error: phy %d reg %d\n",
   3406 		    sc->sc_dev.dv_xname, phy, reg);
   3407 #endif
   3408 		rv = 0;
   3409 	} else {
   3410 		rv = MDIC_DATA(mdic);
   3411 		if (rv == 0xffff)
   3412 			rv = 0;
   3413 	}
   3414 
   3415 	return (rv);
   3416 }
   3417 
   3418 /*
   3419  * wm_gmii_i82544_writereg:	[mii interface function]
   3420  *
   3421  *	Write a PHY register on the GMII.
   3422  */
   3423 static void
   3424 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
   3425 {
   3426 	struct wm_softc *sc = (void *) self;
   3427 	uint32_t mdic = 0;
   3428 	int i;
   3429 
   3430 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   3431 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   3432 
   3433 	for (i = 0; i < 100; i++) {
   3434 		mdic = CSR_READ(sc, WMREG_MDIC);
   3435 		if (mdic & MDIC_READY)
   3436 			break;
   3437 		delay(10);
   3438 	}
   3439 
   3440 	if ((mdic & MDIC_READY) == 0)
   3441 		printf("%s: MDIC write timed out: phy %d reg %d\n",
   3442 		    sc->sc_dev.dv_xname, phy, reg);
   3443 	else if (mdic & MDIC_E)
   3444 		printf("%s: MDIC write error: phy %d reg %d\n",
   3445 		    sc->sc_dev.dv_xname, phy, reg);
   3446 }
   3447 
   3448 /*
   3449  * wm_gmii_statchg:	[mii interface function]
   3450  *
   3451  *	Callback from MII layer when media changes.
   3452  */
   3453 static void
   3454 wm_gmii_statchg(struct device *self)
   3455 {
   3456 	struct wm_softc *sc = (void *) self;
   3457 
   3458 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   3459 
   3460 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   3461 		DPRINTF(WM_DEBUG_LINK,
   3462 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
   3463 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   3464 	} else  {
   3465 		DPRINTF(WM_DEBUG_LINK,
   3466 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
   3467 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   3468 	}
   3469 
   3470 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   3471 }
   3472