Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.20
      1 /*	$NetBSD: if_wm.c,v 1.20 2002/08/17 20:58:04 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     40  *
     41  * TODO (in order of importance):
     42  *
     43  *	- Make GMII work on the i82543.
     44  *
     45  *	- Fix hw VLAN assist.
     46  *
     47  *	- Jumbo frames -- requires changes to network stack due to
     48  *	  lame buffer length handling on chip.
     49  */
     50 
     51 #include "bpfilter.h"
     52 
     53 #include <sys/param.h>
     54 #include <sys/systm.h>
     55 #include <sys/callout.h>
     56 #include <sys/mbuf.h>
     57 #include <sys/malloc.h>
     58 #include <sys/kernel.h>
     59 #include <sys/socket.h>
     60 #include <sys/ioctl.h>
     61 #include <sys/errno.h>
     62 #include <sys/device.h>
     63 #include <sys/queue.h>
     64 
     65 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
     66 
     67 #include <net/if.h>
     68 #include <net/if_dl.h>
     69 #include <net/if_media.h>
     70 #include <net/if_ether.h>
     71 
     72 #if NBPFILTER > 0
     73 #include <net/bpf.h>
     74 #endif
     75 
     76 #include <netinet/in.h>			/* XXX for struct ip */
     77 #include <netinet/in_systm.h>		/* XXX for struct ip */
     78 #include <netinet/ip.h>			/* XXX for struct ip */
     79 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
     80 
     81 #include <machine/bus.h>
     82 #include <machine/intr.h>
     83 #include <machine/endian.h>
     84 
     85 #include <dev/mii/mii.h>
     86 #include <dev/mii/miivar.h>
     87 #include <dev/mii/mii_bitbang.h>
     88 
     89 #include <dev/pci/pcireg.h>
     90 #include <dev/pci/pcivar.h>
     91 #include <dev/pci/pcidevs.h>
     92 
     93 #include <dev/pci/if_wmreg.h>
     94 
     95 #ifdef WM_DEBUG
     96 #define	WM_DEBUG_LINK		0x01
     97 #define	WM_DEBUG_TX		0x02
     98 #define	WM_DEBUG_RX		0x04
     99 #define	WM_DEBUG_GMII		0x08
    100 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
    101 
    102 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    103 #else
    104 #define	DPRINTF(x, y)	/* nothing */
    105 #endif /* WM_DEBUG */
    106 
    107 /*
    108  * Transmit descriptor list size.  Due to errata, we can only have
    109  * 256 hardware descriptors in the ring.  We tell the upper layers
    110  * that they can queue a lot of packets, and we go ahead and manage
    111  * up to 64 of them at a time.  We allow up to 16 DMA segments per
    112  * packet.
    113  */
    114 #define	WM_NTXSEGS		16
    115 #define	WM_IFQUEUELEN		256
    116 #define	WM_TXQUEUELEN		64
    117 #define	WM_TXQUEUELEN_MASK	(WM_TXQUEUELEN - 1)
    118 #define	WM_TXQUEUE_GC		(WM_TXQUEUELEN / 8)
    119 #define	WM_NTXDESC		256
    120 #define	WM_NTXDESC_MASK		(WM_NTXDESC - 1)
    121 #define	WM_NEXTTX(x)		(((x) + 1) & WM_NTXDESC_MASK)
    122 #define	WM_NEXTTXS(x)		(((x) + 1) & WM_TXQUEUELEN_MASK)
    123 
    124 /*
    125  * Receive descriptor list size.  We have one Rx buffer for normal
    126  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    127  * packet.  We allocate 256 receive descriptors, each with a 2k
    128  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    129  */
    130 #define	WM_NRXDESC		256
    131 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    132 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    133 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    134 
    135 /*
    136  * Control structures are DMA'd to the i82542 chip.  We allocate them in
    137  * a single clump that maps to a single DMA segment to make serveral things
    138  * easier.
    139  */
    140 struct wm_control_data {
    141 	/*
    142 	 * The transmit descriptors.
    143 	 */
    144 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
    145 
    146 	/*
    147 	 * The receive descriptors.
    148 	 */
    149 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
    150 };
    151 
    152 #define	WM_CDOFF(x)	offsetof(struct wm_control_data, x)
    153 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
    154 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
    155 
    156 /*
    157  * Software state for transmit jobs.
    158  */
    159 struct wm_txsoft {
    160 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    161 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    162 	int txs_firstdesc;		/* first descriptor in packet */
    163 	int txs_lastdesc;		/* last descriptor in packet */
    164 	int txs_ndesc;			/* # of descriptors used */
    165 };
    166 
    167 /*
    168  * Software state for receive buffers.  Each descriptor gets a
    169  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    170  * more than one buffer, we chain them together.
    171  */
    172 struct wm_rxsoft {
    173 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    174 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    175 };
    176 
    177 /*
    178  * Software state per device.
    179  */
    180 struct wm_softc {
    181 	struct device sc_dev;		/* generic device information */
    182 	bus_space_tag_t sc_st;		/* bus space tag */
    183 	bus_space_handle_t sc_sh;	/* bus space handle */
    184 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    185 	struct ethercom sc_ethercom;	/* ethernet common data */
    186 	void *sc_sdhook;		/* shutdown hook */
    187 
    188 	int sc_type;			/* chip type; see below */
    189 	int sc_flags;			/* flags; see below */
    190 
    191 	void *sc_ih;			/* interrupt cookie */
    192 
    193 	struct mii_data sc_mii;		/* MII/media information */
    194 
    195 	struct callout sc_tick_ch;	/* tick callout */
    196 
    197 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    198 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    199 
    200 	/*
    201 	 * Software state for the transmit and receive descriptors.
    202 	 */
    203 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
    204 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
    205 
    206 	/*
    207 	 * Control data structures.
    208 	 */
    209 	struct wm_control_data *sc_control_data;
    210 #define	sc_txdescs	sc_control_data->wcd_txdescs
    211 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
    212 
    213 #ifdef WM_EVENT_COUNTERS
    214 	/* Event counters. */
    215 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    216 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    217 	struct evcnt sc_ev_txforceintr;	/* Tx interrupts forced */
    218 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    219 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    220 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    221 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    222 
    223 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    224 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    225 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    226 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    227 
    228 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
    229 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
    230 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
    231 
    232 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    233 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    234 
    235 	struct evcnt sc_ev_tu;		/* Tx underrun */
    236 #endif /* WM_EVENT_COUNTERS */
    237 
    238 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
    239 
    240 	int	sc_txfree;		/* number of free Tx descriptors */
    241 	int	sc_txnext;		/* next ready Tx descriptor */
    242 
    243 	int	sc_txsfree;		/* number of free Tx jobs */
    244 	int	sc_txsnext;		/* next free Tx job */
    245 	int	sc_txsdirty;		/* dirty Tx jobs */
    246 
    247 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
    248 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
    249 
    250 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
    251 
    252 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
    253 	int	sc_rxdiscard;
    254 	int	sc_rxlen;
    255 	struct mbuf *sc_rxhead;
    256 	struct mbuf *sc_rxtail;
    257 	struct mbuf **sc_rxtailp;
    258 
    259 	uint32_t sc_ctrl;		/* prototype CTRL register */
    260 #if 0
    261 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    262 #endif
    263 	uint32_t sc_icr;		/* prototype interrupt bits */
    264 	uint32_t sc_tctl;		/* prototype TCTL register */
    265 	uint32_t sc_rctl;		/* prototype RCTL register */
    266 	uint32_t sc_txcw;		/* prototype TXCW register */
    267 	uint32_t sc_tipg;		/* prototype TIPG register */
    268 
    269 	int sc_tbi_linkup;		/* TBI link status */
    270 	int sc_tbi_anstate;		/* autonegotiation state */
    271 
    272 	int sc_mchash_type;		/* multicast filter offset */
    273 };
    274 
    275 #define	WM_RXCHAIN_RESET(sc)						\
    276 do {									\
    277 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
    278 	*(sc)->sc_rxtailp = NULL;					\
    279 	(sc)->sc_rxlen = 0;						\
    280 } while (/*CONSTCOND*/0)
    281 
    282 #define	WM_RXCHAIN_LINK(sc, m)						\
    283 do {									\
    284 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
    285 	(sc)->sc_rxtailp = &(m)->m_next;				\
    286 } while (/*CONSTCOND*/0)
    287 
    288 /* sc_type */
    289 #define	WM_T_82542_2_0		0	/* i82542 2.0 (really old) */
    290 #define	WM_T_82542_2_1		1	/* i82542 2.1+ (old) */
    291 #define	WM_T_82543		2	/* i82543 */
    292 #define	WM_T_82544		3	/* i82544 */
    293 #define	WM_T_82540		4	/* i82540 */
    294 #define	WM_T_82545		5	/* i82545 */
    295 #define	WM_T_82546		6	/* i82546 */
    296 
    297 /* sc_flags */
    298 #define	WM_F_HAS_MII		0x01	/* has MII */
    299 #define	WM_F_EEPROM_HANDSHAKE	0x02	/* requires EEPROM handshake */
    300 
    301 #ifdef WM_EVENT_COUNTERS
    302 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    303 #else
    304 #define	WM_EVCNT_INCR(ev)	/* nothing */
    305 #endif
    306 
    307 #define	CSR_READ(sc, reg)						\
    308 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    309 #define	CSR_WRITE(sc, reg, val)						\
    310 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    311 
    312 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
    313 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
    314 
    315 #define	WM_CDTXSYNC(sc, x, n, ops)					\
    316 do {									\
    317 	int __x, __n;							\
    318 									\
    319 	__x = (x);							\
    320 	__n = (n);							\
    321 									\
    322 	/* If it will wrap around, sync to the end of the ring. */	\
    323 	if ((__x + __n) > WM_NTXDESC) {					\
    324 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    325 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
    326 		    (WM_NTXDESC - __x), (ops));				\
    327 		__n -= (WM_NTXDESC - __x);				\
    328 		__x = 0;						\
    329 	}								\
    330 									\
    331 	/* Now sync whatever is left. */				\
    332 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    333 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
    334 } while (/*CONSTCOND*/0)
    335 
    336 #define	WM_CDRXSYNC(sc, x, ops)						\
    337 do {									\
    338 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    339 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
    340 } while (/*CONSTCOND*/0)
    341 
    342 #define	WM_INIT_RXDESC(sc, x)						\
    343 do {									\
    344 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    345 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
    346 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    347 									\
    348 	/*								\
    349 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
    350 	 * so that the payload after the Ethernet header is aligned	\
    351 	 * to a 4-byte boundary.					\
    352 	 *								\
    353 	 * XXX BRAINDAMAGE ALERT!					\
    354 	 * The stupid chip uses the same size for every buffer, which	\
    355 	 * is set in the Receive Control register.  We are using the 2K	\
    356 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
    357 	 * reason, we can't accept packets longer than the standard	\
    358 	 * Ethernet MTU, without incurring a big penalty to copy every	\
    359 	 * incoming packet to a new, suitably aligned buffer.		\
    360 	 *								\
    361 	 * We'll need to make some changes to the layer 3/4 parts of	\
    362 	 * the stack (to copy the headers to a new buffer if not	\
    363 	 * aligned) in order to support large MTU on this chip.  Lame.	\
    364 	 */								\
    365 	__m->m_data = __m->m_ext.ext_buf + 2;				\
    366 									\
    367 	__rxd->wrx_addr.wa_low =					\
    368 	    htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2);		\
    369 	__rxd->wrx_addr.wa_high = 0;					\
    370 	__rxd->wrx_len = 0;						\
    371 	__rxd->wrx_cksum = 0;						\
    372 	__rxd->wrx_status = 0;						\
    373 	__rxd->wrx_errors = 0;						\
    374 	__rxd->wrx_special = 0;						\
    375 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
    376 									\
    377 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
    378 } while (/*CONSTCOND*/0)
    379 
    380 void	wm_start(struct ifnet *);
    381 void	wm_watchdog(struct ifnet *);
    382 int	wm_ioctl(struct ifnet *, u_long, caddr_t);
    383 int	wm_init(struct ifnet *);
    384 void	wm_stop(struct ifnet *, int);
    385 
    386 void	wm_shutdown(void *);
    387 
    388 void	wm_reset(struct wm_softc *);
    389 void	wm_rxdrain(struct wm_softc *);
    390 int	wm_add_rxbuf(struct wm_softc *, int);
    391 void	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
    392 void	wm_tick(void *);
    393 
    394 void	wm_set_filter(struct wm_softc *);
    395 
    396 int	wm_intr(void *);
    397 void	wm_txintr(struct wm_softc *);
    398 void	wm_rxintr(struct wm_softc *);
    399 void	wm_linkintr(struct wm_softc *, uint32_t);
    400 
    401 void	wm_tbi_mediainit(struct wm_softc *);
    402 int	wm_tbi_mediachange(struct ifnet *);
    403 void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    404 
    405 void	wm_tbi_set_linkled(struct wm_softc *);
    406 void	wm_tbi_check_link(struct wm_softc *);
    407 
    408 void	wm_gmii_reset(struct wm_softc *);
    409 
    410 int	wm_gmii_i82543_readreg(struct device *, int, int);
    411 void	wm_gmii_i82543_writereg(struct device *, int, int, int);
    412 
    413 int	wm_gmii_i82544_readreg(struct device *, int, int);
    414 void	wm_gmii_i82544_writereg(struct device *, int, int, int);
    415 
    416 void	wm_gmii_statchg(struct device *);
    417 
    418 void	wm_gmii_mediainit(struct wm_softc *);
    419 int	wm_gmii_mediachange(struct ifnet *);
    420 void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    421 
    422 int	wm_match(struct device *, struct cfdata *, void *);
    423 void	wm_attach(struct device *, struct device *, void *);
    424 
    425 int	wm_copy_small = 0;
    426 
    427 struct cfattach wm_ca = {
    428 	sizeof(struct wm_softc), wm_match, wm_attach,
    429 };
    430 
    431 /*
    432  * Devices supported by this driver.
    433  */
    434 const struct wm_product {
    435 	pci_vendor_id_t		wmp_vendor;
    436 	pci_product_id_t	wmp_product;
    437 	const char		*wmp_name;
    438 	int			wmp_type;
    439 	int			wmp_flags;
    440 #define	WMP_F_1000X		0x01
    441 #define	WMP_F_1000T		0x02
    442 } wm_products[] = {
    443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    444 	  "Intel i82542 1000BASE-X Ethernet",
    445 	  WM_T_82542_2_1,	WMP_F_1000X },
    446 
    447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    448 	  "Intel i82543GC 1000BASE-X Ethernet",
    449 	  WM_T_82543,		WMP_F_1000X },
    450 
    451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    452 	  "Intel i82543GC 1000BASE-T Ethernet",
    453 	  WM_T_82543,		WMP_F_1000T },
    454 
    455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    456 	  "Intel i82544EI 1000BASE-T Ethernet",
    457 	  WM_T_82544,		WMP_F_1000T },
    458 
    459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    460 	  "Intel i82544EI 1000BASE-X Ethernet",
    461 	  WM_T_82544,		WMP_F_1000X },
    462 
    463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    464 	  "Intel i82544GC 1000BASE-T Ethernet",
    465 	  WM_T_82544,		WMP_F_1000T },
    466 
    467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    468 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    469 	  WM_T_82544,		WMP_F_1000T },
    470 
    471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    472 	  "Intel i82540EM 1000BASE-T Ethernet",
    473 	  WM_T_82540,		WMP_F_1000T },
    474 
    475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    476 	  "Intel i82545EM 1000BASE-T Ethernet",
    477 	  WM_T_82545,		WMP_F_1000T },
    478 
    479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    480 	  "Intel i82546EB 1000BASE-T Ethernet",
    481 	  WM_T_82546,		WMP_F_1000T },
    482 
    483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    484 	  "Intel i82545EM 1000BASE-X Ethernet",
    485 	  WM_T_82545,		WMP_F_1000X },
    486 
    487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    488 	  "Intel i82546EB 1000BASE-X Ethernet",
    489 	  WM_T_82546,		WMP_F_1000X },
    490 
    491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    492 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    493 	  WM_T_82540,		WMP_F_1000T },
    494 
    495 	{ 0,			0,
    496 	  NULL,
    497 	  0,			0 },
    498 };
    499 
    500 #ifdef WM_EVENT_COUNTERS
    501 #if WM_NTXSEGS != 16
    502 #error Update wm_txseg_evcnt_names
    503 #endif
    504 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
    505 	"txseg1",
    506 	"txseg2",
    507 	"txseg3",
    508 	"txseg4",
    509 	"txseg5",
    510 	"txseg6",
    511 	"txseg7",
    512 	"txseg8",
    513 	"txseg9",
    514 	"txseg10",
    515 	"txseg11",
    516 	"txseg12",
    517 	"txseg13",
    518 	"txseg14",
    519 	"txseg15",
    520 	"txseg16",
    521 };
    522 #endif /* WM_EVENT_COUNTERS */
    523 
    524 static const struct wm_product *
    525 wm_lookup(const struct pci_attach_args *pa)
    526 {
    527 	const struct wm_product *wmp;
    528 
    529 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
    530 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
    531 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
    532 			return (wmp);
    533 	}
    534 	return (NULL);
    535 }
    536 
    537 int
    538 wm_match(struct device *parent, struct cfdata *cf, void *aux)
    539 {
    540 	struct pci_attach_args *pa = aux;
    541 
    542 	if (wm_lookup(pa) != NULL)
    543 		return (1);
    544 
    545 	return (0);
    546 }
    547 
    548 void
    549 wm_attach(struct device *parent, struct device *self, void *aux)
    550 {
    551 	struct wm_softc *sc = (void *) self;
    552 	struct pci_attach_args *pa = aux;
    553 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    554 	pci_chipset_tag_t pc = pa->pa_pc;
    555 	pci_intr_handle_t ih;
    556 	const char *intrstr = NULL;
    557 	bus_space_tag_t memt;
    558 	bus_space_handle_t memh;
    559 	bus_dma_segment_t seg;
    560 	int memh_valid;
    561 	int i, rseg, error;
    562 	const struct wm_product *wmp;
    563 	uint8_t enaddr[ETHER_ADDR_LEN];
    564 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
    565 	pcireg_t preg, memtype;
    566 	int pmreg;
    567 
    568 	callout_init(&sc->sc_tick_ch);
    569 
    570 	wmp = wm_lookup(pa);
    571 	if (wmp == NULL) {
    572 		printf("\n");
    573 		panic("wm_attach: impossible");
    574 	}
    575 
    576 	sc->sc_dmat = pa->pa_dmat;
    577 
    578 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
    579 	printf(": %s, rev. %d\n", wmp->wmp_name, preg);
    580 
    581 	sc->sc_type = wmp->wmp_type;
    582 	if (sc->sc_type < WM_T_82543) {
    583 		if (preg < 2) {
    584 			printf("%s: i82542 must be at least rev. 2\n",
    585 			    sc->sc_dev.dv_xname);
    586 			return;
    587 		}
    588 		if (preg < 3)
    589 			sc->sc_type = WM_T_82542_2_0;
    590 	}
    591 
    592 	/*
    593 	 * Some chips require a handshake to access the EEPROM.
    594 	 */
    595 	if (sc->sc_type >= WM_T_82540)
    596 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
    597 
    598 	/*
    599 	 * Map the device.
    600 	 */
    601 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
    602 	switch (memtype) {
    603 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
    604 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
    605 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
    606 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
    607 		break;
    608 	default:
    609 		memh_valid = 0;
    610 	}
    611 
    612 	if (memh_valid) {
    613 		sc->sc_st = memt;
    614 		sc->sc_sh = memh;
    615 	} else {
    616 		printf("%s: unable to map device registers\n",
    617 		    sc->sc_dev.dv_xname);
    618 		return;
    619 	}
    620 
    621 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
    622 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    623 	preg |= PCI_COMMAND_MASTER_ENABLE;
    624 	if (sc->sc_type < WM_T_82542_2_1)
    625 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
    626 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
    627 
    628 	/* Get it out of power save mode, if needed. */
    629 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
    630 		preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
    631 		if (preg == 3) {
    632 			/*
    633 			 * The card has lost all configuration data in
    634 			 * this state, so punt.
    635 			 */
    636 			printf("%s: unable to wake from power state D3\n",
    637 			    sc->sc_dev.dv_xname);
    638 			return;
    639 		}
    640 		if (preg != 0) {
    641 			printf("%s: waking up from power state D%d\n",
    642 			    sc->sc_dev.dv_xname, preg);
    643 			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
    644 		}
    645 	}
    646 
    647 	/*
    648 	 * Map and establish our interrupt.
    649 	 */
    650 	if (pci_intr_map(pa, &ih)) {
    651 		printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
    652 		return;
    653 	}
    654 	intrstr = pci_intr_string(pc, ih);
    655 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
    656 	if (sc->sc_ih == NULL) {
    657 		printf("%s: unable to establish interrupt",
    658 		    sc->sc_dev.dv_xname);
    659 		if (intrstr != NULL)
    660 			printf(" at %s", intrstr);
    661 		printf("\n");
    662 		return;
    663 	}
    664 	printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
    665 
    666 	/*
    667 	 * Allocate the control data structures, and create and load the
    668 	 * DMA map for it.
    669 	 */
    670 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
    671 	    sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
    672 	    0)) != 0) {
    673 		printf("%s: unable to allocate control data, error = %d\n",
    674 		    sc->sc_dev.dv_xname, error);
    675 		goto fail_0;
    676 	}
    677 
    678 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    679 	    sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
    680 	    0)) != 0) {
    681 		printf("%s: unable to map control data, error = %d\n",
    682 		    sc->sc_dev.dv_xname, error);
    683 		goto fail_1;
    684 	}
    685 
    686 	if ((error = bus_dmamap_create(sc->sc_dmat,
    687 	    sizeof(struct wm_control_data), 1,
    688 	    sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
    689 		printf("%s: unable to create control data DMA map, "
    690 		    "error = %d\n", sc->sc_dev.dv_xname, error);
    691 		goto fail_2;
    692 	}
    693 
    694 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
    695 	    sc->sc_control_data, sizeof(struct wm_control_data), NULL,
    696 	    0)) != 0) {
    697 		printf("%s: unable to load control data DMA map, error = %d\n",
    698 		    sc->sc_dev.dv_xname, error);
    699 		goto fail_3;
    700 	}
    701 
    702 	/*
    703 	 * Create the transmit buffer DMA maps.
    704 	 */
    705 	for (i = 0; i < WM_TXQUEUELEN; i++) {
    706 		if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
    707 		    WM_NTXSEGS, MCLBYTES, 0, 0,
    708 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
    709 			printf("%s: unable to create Tx DMA map %d, "
    710 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    711 			goto fail_4;
    712 		}
    713 	}
    714 
    715 	/*
    716 	 * Create the receive buffer DMA maps.
    717 	 */
    718 	for (i = 0; i < WM_NRXDESC; i++) {
    719 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
    720 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
    721 			printf("%s: unable to create Rx DMA map %d, "
    722 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
    723 			goto fail_5;
    724 		}
    725 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
    726 	}
    727 
    728 	/*
    729 	 * Reset the chip to a known state.
    730 	 */
    731 	wm_reset(sc);
    732 
    733 	/*
    734 	 * Read the Ethernet address from the EEPROM.
    735 	 */
    736 	wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
    737 	    sizeof(myea) / sizeof(myea[0]), myea);
    738 	enaddr[0] = myea[0] & 0xff;
    739 	enaddr[1] = myea[0] >> 8;
    740 	enaddr[2] = myea[1] & 0xff;
    741 	enaddr[3] = myea[1] >> 8;
    742 	enaddr[4] = myea[2] & 0xff;
    743 	enaddr[5] = myea[2] >> 8;
    744 
    745 	/*
    746 	 * Toggle the LSB of the MAC address on the second port
    747 	 * of the i82546.
    748 	 */
    749 	if (sc->sc_type == WM_T_82546) {
    750 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
    751 			enaddr[5] ^= 1;
    752 	}
    753 
    754 	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
    755 	    ether_sprintf(enaddr));
    756 
    757 	/*
    758 	 * Read the config info from the EEPROM, and set up various
    759 	 * bits in the control registers based on their contents.
    760 	 */
    761 	wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
    762 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
    763 	if (sc->sc_type >= WM_T_82544)
    764 		wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
    765 
    766 	if (cfg1 & EEPROM_CFG1_ILOS)
    767 		sc->sc_ctrl |= CTRL_ILOS;
    768 	if (sc->sc_type >= WM_T_82544) {
    769 		sc->sc_ctrl |=
    770 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
    771 		    CTRL_SWDPIO_SHIFT;
    772 		sc->sc_ctrl |=
    773 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
    774 		    CTRL_SWDPINS_SHIFT;
    775 	} else {
    776 		sc->sc_ctrl |=
    777 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
    778 		    CTRL_SWDPIO_SHIFT;
    779 	}
    780 
    781 #if 0
    782 	if (sc->sc_type >= WM_T_82544) {
    783 		if (cfg1 & EEPROM_CFG1_IPS0)
    784 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
    785 		if (cfg1 & EEPROM_CFG1_IPS1)
    786 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
    787 		sc->sc_ctrl_ext |=
    788 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
    789 		    CTRL_EXT_SWDPIO_SHIFT;
    790 		sc->sc_ctrl_ext |=
    791 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
    792 		    CTRL_EXT_SWDPINS_SHIFT;
    793 	} else {
    794 		sc->sc_ctrl_ext |=
    795 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
    796 		    CTRL_EXT_SWDPIO_SHIFT;
    797 	}
    798 #endif
    799 
    800 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
    801 #if 0
    802 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
    803 #endif
    804 
    805 	/*
    806 	 * Set up some register offsets that are different between
    807 	 * the i82542 and the i82543 and later chips.
    808 	 */
    809 	if (sc->sc_type < WM_T_82543) {
    810 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
    811 		sc->sc_tdt_reg = WMREG_OLD_TDT;
    812 	} else {
    813 		sc->sc_rdt_reg = WMREG_RDT;
    814 		sc->sc_tdt_reg = WMREG_TDT;
    815 	}
    816 
    817 	/*
    818 	 * Determine if we should use flow control.  We should
    819 	 * always use it, unless we're on a i82542 < 2.1.
    820 	 */
    821 	if (sc->sc_type >= WM_T_82542_2_1)
    822 		sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
    823 
    824 	/*
    825 	 * Determine if we're TBI or GMII mode, and initialize the
    826 	 * media structures accordingly.
    827 	 */
    828 	if (sc->sc_type < WM_T_82543 ||
    829 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
    830 		if (wmp->wmp_flags & WMP_F_1000T)
    831 			printf("%s: WARNING: TBIMODE set on 1000BASE-T "
    832 			    "product!\n", sc->sc_dev.dv_xname);
    833 		wm_tbi_mediainit(sc);
    834 	} else {
    835 		if (wmp->wmp_flags & WMP_F_1000X)
    836 			printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
    837 			    "product!\n", sc->sc_dev.dv_xname);
    838 		wm_gmii_mediainit(sc);
    839 	}
    840 
    841 	ifp = &sc->sc_ethercom.ec_if;
    842 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    843 	ifp->if_softc = sc;
    844 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    845 	ifp->if_ioctl = wm_ioctl;
    846 	ifp->if_start = wm_start;
    847 	ifp->if_watchdog = wm_watchdog;
    848 	ifp->if_init = wm_init;
    849 	ifp->if_stop = wm_stop;
    850 	IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
    851 	IFQ_SET_READY(&ifp->if_snd);
    852 
    853 	/*
    854 	 * If we're a i82543 or greater, we can support VLANs.
    855 	 */
    856 	if (sc->sc_type >= WM_T_82543)
    857 		sc->sc_ethercom.ec_capabilities |=
    858 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
    859 
    860 	/*
    861 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
    862 	 * on i82543 and later.
    863 	 */
    864 	if (sc->sc_type >= WM_T_82543)
    865 		ifp->if_capabilities |=
    866 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
    867 
    868 	/*
    869 	 * Attach the interface.
    870 	 */
    871 	if_attach(ifp);
    872 	ether_ifattach(ifp, enaddr);
    873 
    874 #ifdef WM_EVENT_COUNTERS
    875 	/* Attach event counters. */
    876 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
    877 	    NULL, sc->sc_dev.dv_xname, "txsstall");
    878 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
    879 	    NULL, sc->sc_dev.dv_xname, "txdstall");
    880 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
    881 	    NULL, sc->sc_dev.dv_xname, "txforceintr");
    882 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
    883 	    NULL, sc->sc_dev.dv_xname, "txdw");
    884 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
    885 	    NULL, sc->sc_dev.dv_xname, "txqe");
    886 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
    887 	    NULL, sc->sc_dev.dv_xname, "rxintr");
    888 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
    889 	    NULL, sc->sc_dev.dv_xname, "linkintr");
    890 
    891 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
    892 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
    893 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
    894 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
    895 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
    896 	    NULL, sc->sc_dev.dv_xname, "txipsum");
    897 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
    898 	    NULL, sc->sc_dev.dv_xname, "txtusum");
    899 
    900 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
    901 	    NULL, sc->sc_dev.dv_xname, "txctx init");
    902 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
    903 	    NULL, sc->sc_dev.dv_xname, "txctx hit");
    904 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
    905 	    NULL, sc->sc_dev.dv_xname, "txctx miss");
    906 
    907 	for (i = 0; i < WM_NTXSEGS; i++)
    908 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
    909 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
    910 
    911 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
    912 	    NULL, sc->sc_dev.dv_xname, "txdrop");
    913 
    914 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
    915 	    NULL, sc->sc_dev.dv_xname, "tu");
    916 #endif /* WM_EVENT_COUNTERS */
    917 
    918 	/*
    919 	 * Make sure the interface is shutdown during reboot.
    920 	 */
    921 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
    922 	if (sc->sc_sdhook == NULL)
    923 		printf("%s: WARNING: unable to establish shutdown hook\n",
    924 		    sc->sc_dev.dv_xname);
    925 	return;
    926 
    927 	/*
    928 	 * Free any resources we've allocated during the failed attach
    929 	 * attempt.  Do this in reverse order and fall through.
    930 	 */
    931  fail_5:
    932 	for (i = 0; i < WM_NRXDESC; i++) {
    933 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
    934 			bus_dmamap_destroy(sc->sc_dmat,
    935 			    sc->sc_rxsoft[i].rxs_dmamap);
    936 	}
    937  fail_4:
    938 	for (i = 0; i < WM_TXQUEUELEN; i++) {
    939 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
    940 			bus_dmamap_destroy(sc->sc_dmat,
    941 			    sc->sc_txsoft[i].txs_dmamap);
    942 	}
    943 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
    944  fail_3:
    945 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
    946  fail_2:
    947 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
    948 	    sizeof(struct wm_control_data));
    949  fail_1:
    950 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
    951  fail_0:
    952 	return;
    953 }
    954 
    955 /*
    956  * wm_shutdown:
    957  *
    958  *	Make sure the interface is stopped at reboot time.
    959  */
    960 void
    961 wm_shutdown(void *arg)
    962 {
    963 	struct wm_softc *sc = arg;
    964 
    965 	wm_stop(&sc->sc_ethercom.ec_if, 1);
    966 }
    967 
    968 /*
    969  * wm_tx_cksum:
    970  *
    971  *	Set up TCP/IP checksumming parameters for the
    972  *	specified packet.
    973  */
    974 static int
    975 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
    976     uint32_t *fieldsp)
    977 {
    978 	struct mbuf *m0 = txs->txs_mbuf;
    979 	struct livengood_tcpip_ctxdesc *t;
    980 	uint32_t fields = 0, ipcs, tucs;
    981 	struct ip *ip;
    982 	struct ether_header *eh;
    983 	int offset, iphl;
    984 
    985 	/*
    986 	 * XXX It would be nice if the mbuf pkthdr had offset
    987 	 * fields for the protocol headers.
    988 	 */
    989 
    990 	eh = mtod(m0, struct ether_header *);
    991 	switch (htons(eh->ether_type)) {
    992 	case ETHERTYPE_IP:
    993 		iphl = sizeof(struct ip);
    994 		offset = ETHER_HDR_LEN;
    995 		break;
    996 
    997 	default:
    998 		/*
    999 		 * Don't support this protocol or encapsulation.
   1000 		 */
   1001 		*fieldsp = 0;
   1002 		*cmdp = 0;
   1003 		return (0);
   1004 	}
   1005 
   1006 	/* XXX */
   1007 	if (m0->m_len < (offset + iphl)) {
   1008 		printf("%s: wm_tx_cksum: need to m_pullup, "
   1009 		    "packet dropped\n", sc->sc_dev.dv_xname);
   1010 		return (EINVAL);
   1011 	}
   1012 
   1013 	ip = (struct ip *) (mtod(m0, caddr_t) + offset);
   1014 	iphl = ip->ip_hl << 2;
   1015 
   1016 	/*
   1017 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   1018 	 * offload feature, if we load the context descriptor, we
   1019 	 * MUST provide valid values for IPCSS and TUCSS fields.
   1020 	 */
   1021 
   1022 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   1023 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   1024 		fields |= htole32(WTX_IXSM);
   1025 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
   1026 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1027 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
   1028 	} else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
   1029 		/* Use the cached value. */
   1030 		ipcs = sc->sc_txctx_ipcs;
   1031 	} else {
   1032 		/* Just initialize it to the likely value anyway. */
   1033 		ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
   1034 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   1035 		    WTX_TCPIP_IPCSE(offset + iphl - 1));
   1036 	}
   1037 
   1038 	offset += iphl;
   1039 
   1040 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1041 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   1042 		fields |= htole32(WTX_TXSM);
   1043 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
   1044 		    WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
   1045 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
   1046 	} else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
   1047 		/* Use the cached value. */
   1048 		tucs = sc->sc_txctx_tucs;
   1049 	} else {
   1050 		/* Just initialize it to a valid TCP context. */
   1051 		tucs = htole32(WTX_TCPIP_TUCSS(offset) |
   1052 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   1053 		    WTX_TCPIP_TUCSE(0) /* rest of packet */);
   1054 	}
   1055 
   1056 	if (sc->sc_txctx_ipcs == ipcs &&
   1057 	    sc->sc_txctx_tucs == tucs) {
   1058 		/* Cached context is fine. */
   1059 		WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
   1060 	} else {
   1061 		/* Fill in the context descriptor. */
   1062 #ifdef WM_EVENT_COUNTERS
   1063 		if (sc->sc_txctx_ipcs == 0xffffffff &&
   1064 		    sc->sc_txctx_tucs == 0xffffffff)
   1065 			WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
   1066 		else
   1067 			WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
   1068 #endif
   1069 		t = (struct livengood_tcpip_ctxdesc *)
   1070 		    &sc->sc_txdescs[sc->sc_txnext];
   1071 		t->tcpip_ipcs = ipcs;
   1072 		t->tcpip_tucs = tucs;
   1073 		t->tcpip_cmdlen =
   1074 		    htole32(WTX_CMD_DEXT | WTX_DTYP_C);
   1075 		t->tcpip_seg = 0;
   1076 		WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
   1077 
   1078 		sc->sc_txctx_ipcs = ipcs;
   1079 		sc->sc_txctx_tucs = tucs;
   1080 
   1081 		sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
   1082 		txs->txs_ndesc++;
   1083 	}
   1084 
   1085 	*cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
   1086 	*fieldsp = fields;
   1087 
   1088 	return (0);
   1089 }
   1090 
   1091 /*
   1092  * wm_start:		[ifnet interface function]
   1093  *
   1094  *	Start packet transmission on the interface.
   1095  */
   1096 void
   1097 wm_start(struct ifnet *ifp)
   1098 {
   1099 	struct wm_softc *sc = ifp->if_softc;
   1100 	struct mbuf *m0/*, *m*/;
   1101 	struct wm_txsoft *txs;
   1102 	bus_dmamap_t dmamap;
   1103 	int error, nexttx, lasttx, ofree, seg;
   1104 	uint32_t cksumcmd, cksumfields;
   1105 
   1106 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   1107 		return;
   1108 
   1109 	/*
   1110 	 * Remember the previous number of free descriptors.
   1111 	 */
   1112 	ofree = sc->sc_txfree;
   1113 
   1114 	/*
   1115 	 * Loop through the send queue, setting up transmit descriptors
   1116 	 * until we drain the queue, or use up all available transmit
   1117 	 * descriptors.
   1118 	 */
   1119 	for (;;) {
   1120 		/* Grab a packet off the queue. */
   1121 		IFQ_POLL(&ifp->if_snd, m0);
   1122 		if (m0 == NULL)
   1123 			break;
   1124 
   1125 		DPRINTF(WM_DEBUG_TX,
   1126 		    ("%s: TX: have packet to transmit: %p\n",
   1127 		    sc->sc_dev.dv_xname, m0));
   1128 
   1129 		/* Get a work queue entry. */
   1130 		if (sc->sc_txsfree < WM_TXQUEUE_GC) {
   1131 			wm_txintr(sc);
   1132 			if (sc->sc_txsfree == 0) {
   1133 				DPRINTF(WM_DEBUG_TX,
   1134 				    ("%s: TX: no free job descriptors\n",
   1135 					sc->sc_dev.dv_xname));
   1136 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   1137 				break;
   1138 			}
   1139 		}
   1140 
   1141 		txs = &sc->sc_txsoft[sc->sc_txsnext];
   1142 		dmamap = txs->txs_dmamap;
   1143 
   1144 		/*
   1145 		 * Load the DMA map.  If this fails, the packet either
   1146 		 * didn't fit in the allotted number of segments, or we
   1147 		 * were short on resources.  For the too-many-segments
   1148 		 * case, we simply report an error and drop the packet,
   1149 		 * since we can't sanely copy a jumbo packet to a single
   1150 		 * buffer.
   1151 		 */
   1152 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   1153 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1154 		if (error) {
   1155 			if (error == EFBIG) {
   1156 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   1157 				printf("%s: Tx packet consumes too many "
   1158 				    "DMA segments, dropping...\n",
   1159 				    sc->sc_dev.dv_xname);
   1160 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   1161 				m_freem(m0);
   1162 				continue;
   1163 			}
   1164 			/*
   1165 			 * Short on resources, just stop for now.
   1166 			 */
   1167 			DPRINTF(WM_DEBUG_TX,
   1168 			    ("%s: TX: dmamap load failed: %d\n",
   1169 			    sc->sc_dev.dv_xname, error));
   1170 			break;
   1171 		}
   1172 
   1173 		/*
   1174 		 * Ensure we have enough descriptors free to describe
   1175 		 * the packet.  Note, we always reserve one descriptor
   1176 		 * at the end of the ring due to the semantics of the
   1177 		 * TDT register, plus one more in the event we need
   1178 		 * to re-load checksum offload context.
   1179 		 */
   1180 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
   1181 			/*
   1182 			 * Not enough free descriptors to transmit this
   1183 			 * packet.  We haven't committed anything yet,
   1184 			 * so just unload the DMA map, put the packet
   1185 			 * pack on the queue, and punt.  Notify the upper
   1186 			 * layer that there are no more slots left.
   1187 			 */
   1188 			DPRINTF(WM_DEBUG_TX,
   1189 			    ("%s: TX: need %d descriptors, have %d\n",
   1190 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs,
   1191 			    sc->sc_txfree - 1));
   1192 			ifp->if_flags |= IFF_OACTIVE;
   1193 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   1194 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   1195 			break;
   1196 		}
   1197 
   1198 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   1199 
   1200 		/*
   1201 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   1202 		 */
   1203 
   1204 		/* Sync the DMA map. */
   1205 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   1206 		    BUS_DMASYNC_PREWRITE);
   1207 
   1208 		DPRINTF(WM_DEBUG_TX,
   1209 		    ("%s: TX: packet has %d DMA segments\n",
   1210 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs));
   1211 
   1212 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   1213 
   1214 		/*
   1215 		 * Store a pointer to the packet so that we can free it
   1216 		 * later.
   1217 		 *
   1218 		 * Initially, we consider the number of descriptors the
   1219 		 * packet uses the number of DMA segments.  This may be
   1220 		 * incremented by 1 if we do checksum offload (a descriptor
   1221 		 * is used to set the checksum context).
   1222 		 */
   1223 		txs->txs_mbuf = m0;
   1224 		txs->txs_firstdesc = sc->sc_txnext;
   1225 		txs->txs_ndesc = dmamap->dm_nsegs;
   1226 
   1227 		/*
   1228 		 * Set up checksum offload parameters for
   1229 		 * this packet.
   1230 		 */
   1231 		if (m0->m_pkthdr.csum_flags &
   1232 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
   1233 			if (wm_tx_cksum(sc, txs, &cksumcmd,
   1234 					&cksumfields) != 0) {
   1235 				/* Error message already displayed. */
   1236 				m_freem(m0);
   1237 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   1238 				txs->txs_mbuf = NULL;
   1239 				continue;
   1240 			}
   1241 		} else {
   1242 			cksumcmd = 0;
   1243 			cksumfields = 0;
   1244 		}
   1245 
   1246 		cksumcmd |= htole32(WTX_CMD_IDE);
   1247 
   1248 		/*
   1249 		 * Initialize the transmit descriptor.
   1250 		 */
   1251 		for (nexttx = sc->sc_txnext, seg = 0;
   1252 		     seg < dmamap->dm_nsegs;
   1253 		     seg++, nexttx = WM_NEXTTX(nexttx)) {
   1254 			/*
   1255 			 * Note: we currently only use 32-bit DMA
   1256 			 * addresses.
   1257 			 */
   1258 			sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
   1259 			sc->sc_txdescs[nexttx].wtx_addr.wa_low =
   1260 			    htole32(dmamap->dm_segs[seg].ds_addr);
   1261 			sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
   1262 			    htole32(dmamap->dm_segs[seg].ds_len);
   1263 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
   1264 			    cksumfields;
   1265 			lasttx = nexttx;
   1266 
   1267 			DPRINTF(WM_DEBUG_TX,
   1268 			    ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
   1269 			    sc->sc_dev.dv_xname, nexttx,
   1270 			    (uint32_t) dmamap->dm_segs[seg].ds_addr,
   1271 			    (uint32_t) dmamap->dm_segs[seg].ds_len));
   1272 		}
   1273 
   1274 		/*
   1275 		 * Set up the command byte on the last descriptor of
   1276 		 * the packet.  If we're in the interrupt delay window,
   1277 		 * delay the interrupt.
   1278 		 */
   1279 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1280 		    htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
   1281 
   1282 #if 0 /* XXXJRT */
   1283 		/*
   1284 		 * If VLANs are enabled and the packet has a VLAN tag, set
   1285 		 * up the descriptor to encapsulate the packet for us.
   1286 		 *
   1287 		 * This is only valid on the last descriptor of the packet.
   1288 		 */
   1289 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   1290 		    (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
   1291 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
   1292 			    htole32(WTX_CMD_VLE);
   1293 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
   1294 			    = htole16(*mtod(m, int *) & 0xffff);
   1295 		}
   1296 #endif /* XXXJRT */
   1297 
   1298 		txs->txs_lastdesc = lasttx;
   1299 
   1300 		DPRINTF(WM_DEBUG_TX,
   1301 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
   1302 		    lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
   1303 
   1304 		/* Sync the descriptors we're using. */
   1305 		WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
   1306 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1307 
   1308 		/* Give the packet to the chip. */
   1309 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
   1310 
   1311 		DPRINTF(WM_DEBUG_TX,
   1312 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
   1313 
   1314 		DPRINTF(WM_DEBUG_TX,
   1315 		    ("%s: TX: finished transmitting packet, job %d\n",
   1316 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
   1317 
   1318 		/* Advance the tx pointer. */
   1319 		sc->sc_txfree -= txs->txs_ndesc;
   1320 		sc->sc_txnext = nexttx;
   1321 
   1322 		sc->sc_txsfree--;
   1323 		sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
   1324 
   1325 #if NBPFILTER > 0
   1326 		/* Pass the packet to any BPF listeners. */
   1327 		if (ifp->if_bpf)
   1328 			bpf_mtap(ifp->if_bpf, m0);
   1329 #endif /* NBPFILTER > 0 */
   1330 	}
   1331 
   1332 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
   1333 		/* No more slots; notify upper layer. */
   1334 		ifp->if_flags |= IFF_OACTIVE;
   1335 	}
   1336 
   1337 	if (sc->sc_txfree != ofree) {
   1338 		/* Set a watchdog timer in case the chip flakes out. */
   1339 		ifp->if_timer = 5;
   1340 	}
   1341 }
   1342 
   1343 /*
   1344  * wm_watchdog:		[ifnet interface function]
   1345  *
   1346  *	Watchdog timer handler.
   1347  */
   1348 void
   1349 wm_watchdog(struct ifnet *ifp)
   1350 {
   1351 	struct wm_softc *sc = ifp->if_softc;
   1352 
   1353 	/*
   1354 	 * Since we're using delayed interrupts, sweep up
   1355 	 * before we report an error.
   1356 	 */
   1357 	wm_txintr(sc);
   1358 
   1359 	if (sc->sc_txfree != WM_NTXDESC) {
   1360 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   1361 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
   1362 		    sc->sc_txnext);
   1363 		ifp->if_oerrors++;
   1364 
   1365 		/* Reset the interface. */
   1366 		(void) wm_init(ifp);
   1367 	}
   1368 
   1369 	/* Try to get more packets going. */
   1370 	wm_start(ifp);
   1371 }
   1372 
   1373 /*
   1374  * wm_ioctl:		[ifnet interface function]
   1375  *
   1376  *	Handle control requests from the operator.
   1377  */
   1378 int
   1379 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
   1380 {
   1381 	struct wm_softc *sc = ifp->if_softc;
   1382 	struct ifreq *ifr = (struct ifreq *) data;
   1383 	int s, error;
   1384 
   1385 	s = splnet();
   1386 
   1387 	switch (cmd) {
   1388 	case SIOCSIFMEDIA:
   1389 	case SIOCGIFMEDIA:
   1390 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   1391 		break;
   1392 
   1393 	default:
   1394 		error = ether_ioctl(ifp, cmd, data);
   1395 		if (error == ENETRESET) {
   1396 			/*
   1397 			 * Multicast list has changed; set the hardware filter
   1398 			 * accordingly.
   1399 			 */
   1400 			wm_set_filter(sc);
   1401 			error = 0;
   1402 		}
   1403 		break;
   1404 	}
   1405 
   1406 	/* Try to get more packets going. */
   1407 	wm_start(ifp);
   1408 
   1409 	splx(s);
   1410 	return (error);
   1411 }
   1412 
   1413 /*
   1414  * wm_intr:
   1415  *
   1416  *	Interrupt service routine.
   1417  */
   1418 int
   1419 wm_intr(void *arg)
   1420 {
   1421 	struct wm_softc *sc = arg;
   1422 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1423 	uint32_t icr;
   1424 	int wantinit, handled = 0;
   1425 
   1426 	for (wantinit = 0; wantinit == 0;) {
   1427 		icr = CSR_READ(sc, WMREG_ICR);
   1428 		if ((icr & sc->sc_icr) == 0)
   1429 			break;
   1430 
   1431 		handled = 1;
   1432 
   1433 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   1434 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   1435 			DPRINTF(WM_DEBUG_RX,
   1436 			    ("%s: RX: got Rx intr 0x%08x\n",
   1437 			    sc->sc_dev.dv_xname,
   1438 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   1439 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   1440 		}
   1441 #endif
   1442 		wm_rxintr(sc);
   1443 
   1444 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   1445 		if (icr & ICR_TXDW) {
   1446 			DPRINTF(WM_DEBUG_TX,
   1447 			    ("%s: TX: got TDXW interrupt\n",
   1448 			    sc->sc_dev.dv_xname));
   1449 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   1450 		}
   1451 #endif
   1452 		wm_txintr(sc);
   1453 
   1454 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
   1455 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   1456 			wm_linkintr(sc, icr);
   1457 		}
   1458 
   1459 		if (icr & ICR_RXO) {
   1460 			printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
   1461 			wantinit = 1;
   1462 		}
   1463 	}
   1464 
   1465 	if (handled) {
   1466 		if (wantinit)
   1467 			wm_init(ifp);
   1468 
   1469 		/* Try to get more packets going. */
   1470 		wm_start(ifp);
   1471 	}
   1472 
   1473 	return (handled);
   1474 }
   1475 
   1476 /*
   1477  * wm_txintr:
   1478  *
   1479  *	Helper; handle transmit interrupts.
   1480  */
   1481 void
   1482 wm_txintr(struct wm_softc *sc)
   1483 {
   1484 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1485 	struct wm_txsoft *txs;
   1486 	uint8_t status;
   1487 	int i;
   1488 
   1489 	ifp->if_flags &= ~IFF_OACTIVE;
   1490 
   1491 	/*
   1492 	 * Go through the Tx list and free mbufs for those
   1493 	 * frames which have been transmitted.
   1494 	 */
   1495 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
   1496 	     i = WM_NEXTTXS(i), sc->sc_txsfree++) {
   1497 		txs = &sc->sc_txsoft[i];
   1498 
   1499 		DPRINTF(WM_DEBUG_TX,
   1500 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
   1501 
   1502 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
   1503 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1504 
   1505 		status = le32toh(sc->sc_txdescs[
   1506 		    txs->txs_lastdesc].wtx_fields.wtxu_bits);
   1507 		if ((status & WTX_ST_DD) == 0) {
   1508 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
   1509 			    BUS_DMASYNC_PREREAD);
   1510 			break;
   1511 		}
   1512 
   1513 		DPRINTF(WM_DEBUG_TX,
   1514 		    ("%s: TX: job %d done: descs %d..%d\n",
   1515 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
   1516 		    txs->txs_lastdesc));
   1517 
   1518 		/*
   1519 		 * XXX We should probably be using the statistics
   1520 		 * XXX registers, but I don't know if they exist
   1521 		 * XXX on chips before the i82544.
   1522 		 */
   1523 
   1524 #ifdef WM_EVENT_COUNTERS
   1525 		if (status & WTX_ST_TU)
   1526 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   1527 #endif /* WM_EVENT_COUNTERS */
   1528 
   1529 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   1530 			ifp->if_oerrors++;
   1531 			if (status & WTX_ST_LC)
   1532 				printf("%s: late collision\n",
   1533 				    sc->sc_dev.dv_xname);
   1534 			else if (status & WTX_ST_EC) {
   1535 				ifp->if_collisions += 16;
   1536 				printf("%s: excessive collisions\n",
   1537 				    sc->sc_dev.dv_xname);
   1538 			}
   1539 		} else
   1540 			ifp->if_opackets++;
   1541 
   1542 		sc->sc_txfree += txs->txs_ndesc;
   1543 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   1544 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1545 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1546 		m_freem(txs->txs_mbuf);
   1547 		txs->txs_mbuf = NULL;
   1548 	}
   1549 
   1550 	/* Update the dirty transmit buffer pointer. */
   1551 	sc->sc_txsdirty = i;
   1552 	DPRINTF(WM_DEBUG_TX,
   1553 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
   1554 
   1555 	/*
   1556 	 * If there are no more pending transmissions, cancel the watchdog
   1557 	 * timer.
   1558 	 */
   1559 	if (sc->sc_txsfree == WM_TXQUEUELEN)
   1560 		ifp->if_timer = 0;
   1561 }
   1562 
   1563 /*
   1564  * wm_rxintr:
   1565  *
   1566  *	Helper; handle receive interrupts.
   1567  */
   1568 void
   1569 wm_rxintr(struct wm_softc *sc)
   1570 {
   1571 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1572 	struct wm_rxsoft *rxs;
   1573 	struct mbuf *m;
   1574 	int i, len;
   1575 	uint8_t status, errors;
   1576 
   1577 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
   1578 		rxs = &sc->sc_rxsoft[i];
   1579 
   1580 		DPRINTF(WM_DEBUG_RX,
   1581 		    ("%s: RX: checking descriptor %d\n",
   1582 		    sc->sc_dev.dv_xname, i));
   1583 
   1584 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1585 
   1586 		status = sc->sc_rxdescs[i].wrx_status;
   1587 		errors = sc->sc_rxdescs[i].wrx_errors;
   1588 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
   1589 
   1590 		if ((status & WRX_ST_DD) == 0) {
   1591 			/*
   1592 			 * We have processed all of the receive descriptors.
   1593 			 */
   1594 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
   1595 			break;
   1596 		}
   1597 
   1598 		if (__predict_false(sc->sc_rxdiscard)) {
   1599 			DPRINTF(WM_DEBUG_RX,
   1600 			    ("%s: RX: discarding contents of descriptor %d\n",
   1601 			    sc->sc_dev.dv_xname, i));
   1602 			WM_INIT_RXDESC(sc, i);
   1603 			if (status & WRX_ST_EOP) {
   1604 				/* Reset our state. */
   1605 				DPRINTF(WM_DEBUG_RX,
   1606 				    ("%s: RX: resetting rxdiscard -> 0\n",
   1607 				    sc->sc_dev.dv_xname));
   1608 				sc->sc_rxdiscard = 0;
   1609 			}
   1610 			continue;
   1611 		}
   1612 
   1613 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1614 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1615 
   1616 		m = rxs->rxs_mbuf;
   1617 
   1618 		/*
   1619 		 * Add a new receive buffer to the ring.
   1620 		 */
   1621 		if (wm_add_rxbuf(sc, i) != 0) {
   1622 			/*
   1623 			 * Failed, throw away what we've done so
   1624 			 * far, and discard the rest of the packet.
   1625 			 */
   1626 			ifp->if_ierrors++;
   1627 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1628 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1629 			WM_INIT_RXDESC(sc, i);
   1630 			if ((status & WRX_ST_EOP) == 0)
   1631 				sc->sc_rxdiscard = 1;
   1632 			if (sc->sc_rxhead != NULL)
   1633 				m_freem(sc->sc_rxhead);
   1634 			WM_RXCHAIN_RESET(sc);
   1635 			DPRINTF(WM_DEBUG_RX,
   1636 			    ("%s: RX: Rx buffer allocation failed, "
   1637 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
   1638 			    sc->sc_rxdiscard ? " (discard)" : ""));
   1639 			continue;
   1640 		}
   1641 
   1642 		WM_RXCHAIN_LINK(sc, m);
   1643 
   1644 		m->m_len = len;
   1645 
   1646 		DPRINTF(WM_DEBUG_RX,
   1647 		    ("%s: RX: buffer at %p len %d\n",
   1648 		    sc->sc_dev.dv_xname, m->m_data, len));
   1649 
   1650 		/*
   1651 		 * If this is not the end of the packet, keep
   1652 		 * looking.
   1653 		 */
   1654 		if ((status & WRX_ST_EOP) == 0) {
   1655 			sc->sc_rxlen += len;
   1656 			DPRINTF(WM_DEBUG_RX,
   1657 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   1658 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
   1659 			continue;
   1660 		}
   1661 
   1662 		/*
   1663 		 * Okay, we have the entire packet now...
   1664 		 */
   1665 		*sc->sc_rxtailp = NULL;
   1666 		m = sc->sc_rxhead;
   1667 		len += sc->sc_rxlen;
   1668 
   1669 		WM_RXCHAIN_RESET(sc);
   1670 
   1671 		DPRINTF(WM_DEBUG_RX,
   1672 		    ("%s: RX: have entire packet, len -> %d\n",
   1673 		    sc->sc_dev.dv_xname, len));
   1674 
   1675 		/*
   1676 		 * If an error occurred, update stats and drop the packet.
   1677 		 */
   1678 		if (errors &
   1679 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   1680 			ifp->if_ierrors++;
   1681 			if (errors & WRX_ER_SE)
   1682 				printf("%s: symbol error\n",
   1683 				    sc->sc_dev.dv_xname);
   1684 			else if (errors & WRX_ER_SEQ)
   1685 				printf("%s: receive sequence error\n",
   1686 				    sc->sc_dev.dv_xname);
   1687 			else if (errors & WRX_ER_CE)
   1688 				printf("%s: CRC error\n",
   1689 				    sc->sc_dev.dv_xname);
   1690 			m_freem(m);
   1691 			continue;
   1692 		}
   1693 
   1694 		/*
   1695 		 * No errors.  Receive the packet.
   1696 		 *
   1697 		 * Note, we have configured the chip to include the
   1698 		 * CRC with every packet.
   1699 		 */
   1700 		m->m_flags |= M_HASFCS;
   1701 		m->m_pkthdr.rcvif = ifp;
   1702 		m->m_pkthdr.len = len;
   1703 
   1704 #if 0 /* XXXJRT */
   1705 		/*
   1706 		 * If VLANs are enabled, VLAN packets have been unwrapped
   1707 		 * for us.  Associate the tag with the packet.
   1708 		 */
   1709 		if (sc->sc_ethercom.ec_nvlans != 0 &&
   1710 		    (status & WRX_ST_VP) != 0) {
   1711 			struct mbuf *vtag;
   1712 
   1713 			vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
   1714 			if (vtag == NULL) {
   1715 				ifp->if_ierrors++;
   1716 				printf("%s: unable to allocate VLAN tag\n",
   1717 				    sc->sc_dev.dv_xname);
   1718 				m_freem(m);
   1719 				continue;
   1720 			}
   1721 
   1722 			*mtod(m, int *) =
   1723 			    le16toh(sc->sc_rxdescs[i].wrx_special);
   1724 			vtag->m_len = sizeof(int);
   1725 		}
   1726 #endif /* XXXJRT */
   1727 
   1728 		/*
   1729 		 * Set up checksum info for this packet.
   1730 		 */
   1731 		if (status & WRX_ST_IPCS) {
   1732 			WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   1733 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   1734 			if (errors & WRX_ER_IPE)
   1735 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   1736 		}
   1737 		if (status & WRX_ST_TCPCS) {
   1738 			/*
   1739 			 * Note: we don't know if this was TCP or UDP,
   1740 			 * so we just set both bits, and expect the
   1741 			 * upper layers to deal.
   1742 			 */
   1743 			WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   1744 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
   1745 			if (errors & WRX_ER_TCPE)
   1746 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   1747 		}
   1748 
   1749 		ifp->if_ipackets++;
   1750 
   1751 #if NBPFILTER > 0
   1752 		/* Pass this up to any BPF listeners. */
   1753 		if (ifp->if_bpf)
   1754 			bpf_mtap(ifp->if_bpf, m);
   1755 #endif /* NBPFILTER > 0 */
   1756 
   1757 		/* Pass it on. */
   1758 		(*ifp->if_input)(ifp, m);
   1759 	}
   1760 
   1761 	/* Update the receive pointer. */
   1762 	sc->sc_rxptr = i;
   1763 
   1764 	DPRINTF(WM_DEBUG_RX,
   1765 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
   1766 }
   1767 
   1768 /*
   1769  * wm_linkintr:
   1770  *
   1771  *	Helper; handle link interrupts.
   1772  */
   1773 void
   1774 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   1775 {
   1776 	uint32_t status;
   1777 
   1778 	/*
   1779 	 * If we get a link status interrupt on a 1000BASE-T
   1780 	 * device, just fall into the normal MII tick path.
   1781 	 */
   1782 	if (sc->sc_flags & WM_F_HAS_MII) {
   1783 		if (icr & ICR_LSC) {
   1784 			DPRINTF(WM_DEBUG_LINK,
   1785 			    ("%s: LINK: LSC -> mii_tick\n",
   1786 			    sc->sc_dev.dv_xname));
   1787 			mii_tick(&sc->sc_mii);
   1788 		} else if (icr & ICR_RXSEQ) {
   1789 			DPRINTF(WM_DEBUG_LINK,
   1790 			    ("%s: LINK Receive sequence error\n",
   1791 			    sc->sc_dev.dv_xname));
   1792 		}
   1793 		return;
   1794 	}
   1795 
   1796 	/*
   1797 	 * If we are now receiving /C/, check for link again in
   1798 	 * a couple of link clock ticks.
   1799 	 */
   1800 	if (icr & ICR_RXCFG) {
   1801 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
   1802 		    sc->sc_dev.dv_xname));
   1803 		sc->sc_tbi_anstate = 2;
   1804 	}
   1805 
   1806 	if (icr & ICR_LSC) {
   1807 		status = CSR_READ(sc, WMREG_STATUS);
   1808 		if (status & STATUS_LU) {
   1809 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   1810 			    sc->sc_dev.dv_xname,
   1811 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   1812 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   1813 			if (status & STATUS_FD)
   1814 				sc->sc_tctl |=
   1815 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   1816 			else
   1817 				sc->sc_tctl |=
   1818 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   1819 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   1820 			sc->sc_tbi_linkup = 1;
   1821 		} else {
   1822 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   1823 			    sc->sc_dev.dv_xname));
   1824 			sc->sc_tbi_linkup = 0;
   1825 		}
   1826 		sc->sc_tbi_anstate = 2;
   1827 		wm_tbi_set_linkled(sc);
   1828 	} else if (icr & ICR_RXSEQ) {
   1829 		DPRINTF(WM_DEBUG_LINK,
   1830 		    ("%s: LINK: Receive sequence error\n",
   1831 		    sc->sc_dev.dv_xname));
   1832 	}
   1833 }
   1834 
   1835 /*
   1836  * wm_tick:
   1837  *
   1838  *	One second timer, used to check link status, sweep up
   1839  *	completed transmit jobs, etc.
   1840  */
   1841 void
   1842 wm_tick(void *arg)
   1843 {
   1844 	struct wm_softc *sc = arg;
   1845 	int s;
   1846 
   1847 	s = splnet();
   1848 
   1849 	if (sc->sc_flags & WM_F_HAS_MII)
   1850 		mii_tick(&sc->sc_mii);
   1851 	else
   1852 		wm_tbi_check_link(sc);
   1853 
   1854 	splx(s);
   1855 
   1856 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   1857 }
   1858 
   1859 /*
   1860  * wm_reset:
   1861  *
   1862  *	Reset the i82542 chip.
   1863  */
   1864 void
   1865 wm_reset(struct wm_softc *sc)
   1866 {
   1867 	int i;
   1868 
   1869 	CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   1870 	delay(10000);
   1871 
   1872 	for (i = 0; i < 1000; i++) {
   1873 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
   1874 			return;
   1875 		delay(20);
   1876 	}
   1877 
   1878 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
   1879 		printf("%s: WARNING: reset failed to complete\n",
   1880 		    sc->sc_dev.dv_xname);
   1881 }
   1882 
   1883 /*
   1884  * wm_init:		[ifnet interface function]
   1885  *
   1886  *	Initialize the interface.  Must be called at splnet().
   1887  */
   1888 int
   1889 wm_init(struct ifnet *ifp)
   1890 {
   1891 	struct wm_softc *sc = ifp->if_softc;
   1892 	struct wm_rxsoft *rxs;
   1893 	int i, error = 0;
   1894 	uint32_t reg;
   1895 
   1896 	/* Cancel any pending I/O. */
   1897 	wm_stop(ifp, 0);
   1898 
   1899 	/* Reset the chip to a known state. */
   1900 	wm_reset(sc);
   1901 
   1902 	/* Initialize the transmit descriptor ring. */
   1903 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
   1904 	WM_CDTXSYNC(sc, 0, WM_NTXDESC,
   1905 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1906 	sc->sc_txfree = WM_NTXDESC;
   1907 	sc->sc_txnext = 0;
   1908 
   1909 	sc->sc_txctx_ipcs = 0xffffffff;
   1910 	sc->sc_txctx_tucs = 0xffffffff;
   1911 
   1912 	if (sc->sc_type < WM_T_82543) {
   1913 		CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
   1914 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
   1915 		CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
   1916 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   1917 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   1918 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   1919 	} else {
   1920 		CSR_WRITE(sc, WMREG_TBDAH, 0);
   1921 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
   1922 		CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
   1923 		CSR_WRITE(sc, WMREG_TDH, 0);
   1924 		CSR_WRITE(sc, WMREG_TDT, 0);
   1925 		CSR_WRITE(sc, WMREG_TIDV, 128);
   1926 
   1927 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
   1928 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   1929 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
   1930 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   1931 	}
   1932 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
   1933 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
   1934 
   1935 	/* Initialize the transmit job descriptors. */
   1936 	for (i = 0; i < WM_TXQUEUELEN; i++)
   1937 		sc->sc_txsoft[i].txs_mbuf = NULL;
   1938 	sc->sc_txsfree = WM_TXQUEUELEN;
   1939 	sc->sc_txsnext = 0;
   1940 	sc->sc_txsdirty = 0;
   1941 
   1942 	/*
   1943 	 * Initialize the receive descriptor and receive job
   1944 	 * descriptor rings.
   1945 	 */
   1946 	if (sc->sc_type < WM_T_82543) {
   1947 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
   1948 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
   1949 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
   1950 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   1951 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   1952 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   1953 
   1954 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   1955 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   1956 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   1957 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   1958 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   1959 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   1960 	} else {
   1961 		CSR_WRITE(sc, WMREG_RDBAH, 0);
   1962 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
   1963 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
   1964 		CSR_WRITE(sc, WMREG_RDH, 0);
   1965 		CSR_WRITE(sc, WMREG_RDT, 0);
   1966 		CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
   1967 	}
   1968 	for (i = 0; i < WM_NRXDESC; i++) {
   1969 		rxs = &sc->sc_rxsoft[i];
   1970 		if (rxs->rxs_mbuf == NULL) {
   1971 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
   1972 				printf("%s: unable to allocate or map rx "
   1973 				    "buffer %d, error = %d\n",
   1974 				    sc->sc_dev.dv_xname, i, error);
   1975 				/*
   1976 				 * XXX Should attempt to run with fewer receive
   1977 				 * XXX buffers instead of just failing.
   1978 				 */
   1979 				wm_rxdrain(sc);
   1980 				goto out;
   1981 			}
   1982 		} else
   1983 			WM_INIT_RXDESC(sc, i);
   1984 	}
   1985 	sc->sc_rxptr = 0;
   1986 	sc->sc_rxdiscard = 0;
   1987 	WM_RXCHAIN_RESET(sc);
   1988 
   1989 	/*
   1990 	 * Clear out the VLAN table -- we don't use it (yet).
   1991 	 */
   1992 	CSR_WRITE(sc, WMREG_VET, 0);
   1993 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   1994 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   1995 
   1996 	/*
   1997 	 * Set up flow-control parameters.
   1998 	 *
   1999 	 * XXX Values could probably stand some tuning.
   2000 	 */
   2001 	if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
   2002 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   2003 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   2004 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   2005 
   2006 		if (sc->sc_type < WM_T_82543) {
   2007 			CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   2008 			CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
   2009 		} else {
   2010 			CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   2011 			CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
   2012 		}
   2013 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   2014 	}
   2015 
   2016 #if 0 /* XXXJRT */
   2017 	/* Deal with VLAN enables. */
   2018 	if (sc->sc_ethercom.ec_nvlans != 0)
   2019 		sc->sc_ctrl |= CTRL_VME;
   2020 	else
   2021 #endif /* XXXJRT */
   2022 		sc->sc_ctrl &= ~CTRL_VME;
   2023 
   2024 	/* Write the control registers. */
   2025 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2026 #if 0
   2027 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2028 #endif
   2029 
   2030 	/*
   2031 	 * Set up checksum offload parameters.
   2032 	 */
   2033 	reg = CSR_READ(sc, WMREG_RXCSUM);
   2034 	if (ifp->if_capenable & IFCAP_CSUM_IPv4)
   2035 		reg |= RXCSUM_IPOFL;
   2036 	else
   2037 		reg &= ~RXCSUM_IPOFL;
   2038 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
   2039 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   2040 	else {
   2041 		reg &= ~RXCSUM_TUOFL;
   2042 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
   2043 			reg &= ~RXCSUM_IPOFL;
   2044 	}
   2045 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   2046 
   2047 	/*
   2048 	 * Set up the interrupt registers.
   2049 	 */
   2050 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   2051 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   2052 	    ICR_RXO | ICR_RXT0;
   2053 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
   2054 		sc->sc_icr |= ICR_RXCFG;
   2055 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   2056 
   2057 	/* Set up the inter-packet gap. */
   2058 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   2059 
   2060 #if 0 /* XXXJRT */
   2061 	/* Set the VLAN ethernetype. */
   2062 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   2063 #endif
   2064 
   2065 	/*
   2066 	 * Set up the transmit control register; we start out with
   2067 	 * a collision distance suitable for FDX, but update it whe
   2068 	 * we resolve the media type.
   2069 	 */
   2070 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
   2071 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2072 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2073 
   2074 	/* Set the media. */
   2075 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
   2076 
   2077 	/*
   2078 	 * Set up the receive control register; we actually program
   2079 	 * the register when we set the receive filter.  Use multicast
   2080 	 * address offset type 0.
   2081 	 *
   2082 	 * Only the i82544 has the ability to strip the incoming
   2083 	 * CRC, so we don't enable that feature.
   2084 	 */
   2085 	sc->sc_mchash_type = 0;
   2086 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
   2087 	    RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
   2088 
   2089 	/* Set the receive filter. */
   2090 	wm_set_filter(sc);
   2091 
   2092 	/* Start the one second link check clock. */
   2093 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2094 
   2095 	/* ...all done! */
   2096 	ifp->if_flags |= IFF_RUNNING;
   2097 	ifp->if_flags &= ~IFF_OACTIVE;
   2098 
   2099  out:
   2100 	if (error)
   2101 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
   2102 	return (error);
   2103 }
   2104 
   2105 /*
   2106  * wm_rxdrain:
   2107  *
   2108  *	Drain the receive queue.
   2109  */
   2110 void
   2111 wm_rxdrain(struct wm_softc *sc)
   2112 {
   2113 	struct wm_rxsoft *rxs;
   2114 	int i;
   2115 
   2116 	for (i = 0; i < WM_NRXDESC; i++) {
   2117 		rxs = &sc->sc_rxsoft[i];
   2118 		if (rxs->rxs_mbuf != NULL) {
   2119 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2120 			m_freem(rxs->rxs_mbuf);
   2121 			rxs->rxs_mbuf = NULL;
   2122 		}
   2123 	}
   2124 }
   2125 
   2126 /*
   2127  * wm_stop:		[ifnet interface function]
   2128  *
   2129  *	Stop transmission on the interface.
   2130  */
   2131 void
   2132 wm_stop(struct ifnet *ifp, int disable)
   2133 {
   2134 	struct wm_softc *sc = ifp->if_softc;
   2135 	struct wm_txsoft *txs;
   2136 	int i;
   2137 
   2138 	/* Stop the one second clock. */
   2139 	callout_stop(&sc->sc_tick_ch);
   2140 
   2141 	if (sc->sc_flags & WM_F_HAS_MII) {
   2142 		/* Down the MII. */
   2143 		mii_down(&sc->sc_mii);
   2144 	}
   2145 
   2146 	/* Stop the transmit and receive processes. */
   2147 	CSR_WRITE(sc, WMREG_TCTL, 0);
   2148 	CSR_WRITE(sc, WMREG_RCTL, 0);
   2149 
   2150 	/* Release any queued transmit buffers. */
   2151 	for (i = 0; i < WM_TXQUEUELEN; i++) {
   2152 		txs = &sc->sc_txsoft[i];
   2153 		if (txs->txs_mbuf != NULL) {
   2154 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   2155 			m_freem(txs->txs_mbuf);
   2156 			txs->txs_mbuf = NULL;
   2157 		}
   2158 	}
   2159 
   2160 	if (disable)
   2161 		wm_rxdrain(sc);
   2162 
   2163 	/* Mark the interface as down and cancel the watchdog timer. */
   2164 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2165 	ifp->if_timer = 0;
   2166 }
   2167 
   2168 /*
   2169  * wm_read_eeprom:
   2170  *
   2171  *	Read data from the serial EEPROM.
   2172  */
   2173 void
   2174 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   2175 {
   2176 	uint32_t reg;
   2177 	int i, x, addrbits = 6;
   2178 
   2179 	for (i = 0; i < wordcnt; i++) {
   2180 		if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
   2181 			reg = CSR_READ(sc, WMREG_EECD);
   2182 
   2183 			/* Get number of address bits. */
   2184 			if (reg & EECD_EE_SIZE)
   2185 				addrbits = 8;
   2186 
   2187 			/* Request EEPROM access. */
   2188 			reg |= EECD_EE_REQ;
   2189 			CSR_WRITE(sc, WMREG_EECD, reg);
   2190 
   2191 			/* ..and wait for it to be granted. */
   2192 			for (x = 0; x < 100; x++) {
   2193 				reg = CSR_READ(sc, WMREG_EECD);
   2194 				if (reg & EECD_EE_GNT)
   2195 					break;
   2196 				delay(5);
   2197 			}
   2198 			if ((reg & EECD_EE_GNT) == 0) {
   2199 				printf("%s: could not acquire EEPROM GNT\n",
   2200 				    sc->sc_dev.dv_xname);
   2201 				*data = 0xffff;
   2202 				reg &= ~EECD_EE_REQ;
   2203 				CSR_WRITE(sc, WMREG_EECD, reg);
   2204 				continue;
   2205 			}
   2206 		} else
   2207 			reg = 0;
   2208 
   2209 		/* Clear SK and DI. */
   2210 		reg &= ~(EECD_SK | EECD_DI);
   2211 		CSR_WRITE(sc, WMREG_EECD, reg);
   2212 
   2213 		/* Set CHIP SELECT. */
   2214 		reg |= EECD_CS;
   2215 		CSR_WRITE(sc, WMREG_EECD, reg);
   2216 		delay(2);
   2217 
   2218 		/* Shift in the READ command. */
   2219 		for (x = 3; x > 0; x--) {
   2220 			if (UWIRE_OPC_READ & (1 << (x - 1)))
   2221 				reg |= EECD_DI;
   2222 			else
   2223 				reg &= ~EECD_DI;
   2224 			CSR_WRITE(sc, WMREG_EECD, reg);
   2225 			delay(2);
   2226 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2227 			delay(2);
   2228 			CSR_WRITE(sc, WMREG_EECD, reg);
   2229 			delay(2);
   2230 		}
   2231 
   2232 		/* Shift in address. */
   2233 		for (x = addrbits; x > 0; x--) {
   2234 			if ((word + i) & (1 << (x - 1)))
   2235 				reg |= EECD_DI;
   2236 			else
   2237 				reg &= ~EECD_DI;
   2238 			CSR_WRITE(sc, WMREG_EECD, reg);
   2239 			delay(2);
   2240 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2241 			delay(2);
   2242 			CSR_WRITE(sc, WMREG_EECD, reg);
   2243 			delay(2);
   2244 		}
   2245 
   2246 		/* Shift out the data. */
   2247 		reg &= ~EECD_DI;
   2248 		data[i] = 0;
   2249 		for (x = 16; x > 0; x--) {
   2250 			CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   2251 			delay(2);
   2252 			if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   2253 				data[i] |= (1 << (x - 1));
   2254 			CSR_WRITE(sc, WMREG_EECD, reg);
   2255 			delay(2);
   2256 		}
   2257 
   2258 		/* Clear CHIP SELECT. */
   2259 		reg &= ~EECD_CS;
   2260 		CSR_WRITE(sc, WMREG_EECD, reg);
   2261 		delay(2);
   2262 
   2263 		if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
   2264 			/* Release the EEPROM. */
   2265 			reg &= ~EECD_EE_REQ;
   2266 			CSR_WRITE(sc, WMREG_EECD, reg);
   2267 		}
   2268 	}
   2269 }
   2270 
   2271 /*
   2272  * wm_add_rxbuf:
   2273  *
   2274  *	Add a receive buffer to the indiciated descriptor.
   2275  */
   2276 int
   2277 wm_add_rxbuf(struct wm_softc *sc, int idx)
   2278 {
   2279 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
   2280 	struct mbuf *m;
   2281 	int error;
   2282 
   2283 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   2284 	if (m == NULL)
   2285 		return (ENOBUFS);
   2286 
   2287 	MCLGET(m, M_DONTWAIT);
   2288 	if ((m->m_flags & M_EXT) == 0) {
   2289 		m_freem(m);
   2290 		return (ENOBUFS);
   2291 	}
   2292 
   2293 	if (rxs->rxs_mbuf != NULL)
   2294 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   2295 
   2296 	rxs->rxs_mbuf = m;
   2297 
   2298 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
   2299 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
   2300 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   2301 	if (error) {
   2302 		printf("%s: unable to load rx DMA map %d, error = %d\n",
   2303 		    sc->sc_dev.dv_xname, idx, error);
   2304 		panic("wm_add_rxbuf");	/* XXX XXX XXX */
   2305 	}
   2306 
   2307 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   2308 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   2309 
   2310 	WM_INIT_RXDESC(sc, idx);
   2311 
   2312 	return (0);
   2313 }
   2314 
   2315 /*
   2316  * wm_set_ral:
   2317  *
   2318  *	Set an entery in the receive address list.
   2319  */
   2320 static void
   2321 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2322 {
   2323 	uint32_t ral_lo, ral_hi;
   2324 
   2325 	if (enaddr != NULL) {
   2326 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2327 		    (enaddr[3] << 24);
   2328 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2329 		ral_hi |= RAL_AV;
   2330 	} else {
   2331 		ral_lo = 0;
   2332 		ral_hi = 0;
   2333 	}
   2334 
   2335 	if (sc->sc_type >= WM_T_82544) {
   2336 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2337 		    ral_lo);
   2338 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2339 		    ral_hi);
   2340 	} else {
   2341 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2342 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2343 	}
   2344 }
   2345 
   2346 /*
   2347  * wm_mchash:
   2348  *
   2349  *	Compute the hash of the multicast address for the 4096-bit
   2350  *	multicast filter.
   2351  */
   2352 static uint32_t
   2353 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2354 {
   2355 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2356 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2357 	uint32_t hash;
   2358 
   2359 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   2360 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   2361 
   2362 	return (hash & 0xfff);
   2363 }
   2364 
   2365 /*
   2366  * wm_set_filter:
   2367  *
   2368  *	Set up the receive filter.
   2369  */
   2370 void
   2371 wm_set_filter(struct wm_softc *sc)
   2372 {
   2373 	struct ethercom *ec = &sc->sc_ethercom;
   2374 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2375 	struct ether_multi *enm;
   2376 	struct ether_multistep step;
   2377 	bus_addr_t mta_reg;
   2378 	uint32_t hash, reg, bit;
   2379 	int i;
   2380 
   2381 	if (sc->sc_type >= WM_T_82544)
   2382 		mta_reg = WMREG_CORDOVA_MTA;
   2383 	else
   2384 		mta_reg = WMREG_MTA;
   2385 
   2386 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   2387 
   2388 	if (ifp->if_flags & IFF_BROADCAST)
   2389 		sc->sc_rctl |= RCTL_BAM;
   2390 	if (ifp->if_flags & IFF_PROMISC) {
   2391 		sc->sc_rctl |= RCTL_UPE;
   2392 		goto allmulti;
   2393 	}
   2394 
   2395 	/*
   2396 	 * Set the station address in the first RAL slot, and
   2397 	 * clear the remaining slots.
   2398 	 */
   2399 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
   2400 	for (i = 1; i < WM_RAL_TABSIZE; i++)
   2401 		wm_set_ral(sc, NULL, i);
   2402 
   2403 	/* Clear out the multicast table. */
   2404 	for (i = 0; i < WM_MC_TABSIZE; i++)
   2405 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   2406 
   2407 	ETHER_FIRST_MULTI(step, ec, enm);
   2408 	while (enm != NULL) {
   2409 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2410 			/*
   2411 			 * We must listen to a range of multicast addresses.
   2412 			 * For now, just accept all multicasts, rather than
   2413 			 * trying to set only those filter bits needed to match
   2414 			 * the range.  (At this time, the only use of address
   2415 			 * ranges is for IP multicast routing, for which the
   2416 			 * range is big enough to require all bits set.)
   2417 			 */
   2418 			goto allmulti;
   2419 		}
   2420 
   2421 		hash = wm_mchash(sc, enm->enm_addrlo);
   2422 
   2423 		reg = (hash >> 5) & 0x7f;
   2424 		bit = hash & 0x1f;
   2425 
   2426 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   2427 		hash |= 1U << bit;
   2428 
   2429 		/* XXX Hardware bug?? */
   2430 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   2431 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   2432 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2433 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   2434 		} else
   2435 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   2436 
   2437 		ETHER_NEXT_MULTI(step, enm);
   2438 	}
   2439 
   2440 	ifp->if_flags &= ~IFF_ALLMULTI;
   2441 	goto setit;
   2442 
   2443  allmulti:
   2444 	ifp->if_flags |= IFF_ALLMULTI;
   2445 	sc->sc_rctl |= RCTL_MPE;
   2446 
   2447  setit:
   2448 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   2449 }
   2450 
   2451 /*
   2452  * wm_tbi_mediainit:
   2453  *
   2454  *	Initialize media for use on 1000BASE-X devices.
   2455  */
   2456 void
   2457 wm_tbi_mediainit(struct wm_softc *sc)
   2458 {
   2459 	const char *sep = "";
   2460 
   2461 	if (sc->sc_type < WM_T_82543)
   2462 		sc->sc_tipg = TIPG_WM_DFLT;
   2463 	else
   2464 		sc->sc_tipg = TIPG_LG_DFLT;
   2465 
   2466 	ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
   2467 	    wm_tbi_mediastatus);
   2468 
   2469 	/*
   2470 	 * SWD Pins:
   2471 	 *
   2472 	 *	0 = Link LED (output)
   2473 	 *	1 = Loss Of Signal (input)
   2474 	 */
   2475 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   2476 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   2477 
   2478 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2479 
   2480 #define	ADD(s, m, d)							\
   2481 do {									\
   2482 	printf("%s%s", sep, s);						\
   2483 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL);	\
   2484 	sep = ", ";							\
   2485 } while (/*CONSTCOND*/0)
   2486 
   2487 	printf("%s: ", sc->sc_dev.dv_xname);
   2488 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   2489 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   2490 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   2491 	printf("\n");
   2492 
   2493 #undef ADD
   2494 
   2495 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   2496 }
   2497 
   2498 /*
   2499  * wm_tbi_mediastatus:	[ifmedia interface function]
   2500  *
   2501  *	Get the current interface media status on a 1000BASE-X device.
   2502  */
   2503 void
   2504 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   2505 {
   2506 	struct wm_softc *sc = ifp->if_softc;
   2507 
   2508 	ifmr->ifm_status = IFM_AVALID;
   2509 	ifmr->ifm_active = IFM_ETHER;
   2510 
   2511 	if (sc->sc_tbi_linkup == 0) {
   2512 		ifmr->ifm_active |= IFM_NONE;
   2513 		return;
   2514 	}
   2515 
   2516 	ifmr->ifm_status |= IFM_ACTIVE;
   2517 	ifmr->ifm_active |= IFM_1000_SX;
   2518 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   2519 		ifmr->ifm_active |= IFM_FDX;
   2520 }
   2521 
   2522 /*
   2523  * wm_tbi_mediachange:	[ifmedia interface function]
   2524  *
   2525  *	Set hardware to newly-selected media on a 1000BASE-X device.
   2526  */
   2527 int
   2528 wm_tbi_mediachange(struct ifnet *ifp)
   2529 {
   2530 	struct wm_softc *sc = ifp->if_softc;
   2531 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   2532 	uint32_t status;
   2533 	int i;
   2534 
   2535 	sc->sc_txcw = ife->ifm_data;
   2536 	if (sc->sc_ctrl & CTRL_RFCE)
   2537 		sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
   2538 	if (sc->sc_ctrl & CTRL_TFCE)
   2539 		sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
   2540 	sc->sc_txcw |= TXCW_ANE;
   2541 
   2542 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   2543 	delay(10000);
   2544 
   2545 	sc->sc_tbi_anstate = 0;
   2546 
   2547 	if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
   2548 		/* Have signal; wait for the link to come up. */
   2549 		for (i = 0; i < 50; i++) {
   2550 			delay(10000);
   2551 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   2552 				break;
   2553 		}
   2554 
   2555 		status = CSR_READ(sc, WMREG_STATUS);
   2556 		if (status & STATUS_LU) {
   2557 			/* Link is up. */
   2558 			DPRINTF(WM_DEBUG_LINK,
   2559 			    ("%s: LINK: set media -> link up %s\n",
   2560 			    sc->sc_dev.dv_xname,
   2561 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   2562 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2563 			if (status & STATUS_FD)
   2564 				sc->sc_tctl |=
   2565 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2566 			else
   2567 				sc->sc_tctl |=
   2568 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2569 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2570 			sc->sc_tbi_linkup = 1;
   2571 		} else {
   2572 			/* Link is down. */
   2573 			DPRINTF(WM_DEBUG_LINK,
   2574 			    ("%s: LINK: set media -> link down\n",
   2575 			    sc->sc_dev.dv_xname));
   2576 			sc->sc_tbi_linkup = 0;
   2577 		}
   2578 	} else {
   2579 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   2580 		    sc->sc_dev.dv_xname));
   2581 		sc->sc_tbi_linkup = 0;
   2582 	}
   2583 
   2584 	wm_tbi_set_linkled(sc);
   2585 
   2586 	return (0);
   2587 }
   2588 
   2589 /*
   2590  * wm_tbi_set_linkled:
   2591  *
   2592  *	Update the link LED on 1000BASE-X devices.
   2593  */
   2594 void
   2595 wm_tbi_set_linkled(struct wm_softc *sc)
   2596 {
   2597 
   2598 	if (sc->sc_tbi_linkup)
   2599 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   2600 	else
   2601 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   2602 
   2603 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2604 }
   2605 
   2606 /*
   2607  * wm_tbi_check_link:
   2608  *
   2609  *	Check the link on 1000BASE-X devices.
   2610  */
   2611 void
   2612 wm_tbi_check_link(struct wm_softc *sc)
   2613 {
   2614 	uint32_t rxcw, ctrl, status;
   2615 
   2616 	if (sc->sc_tbi_anstate == 0)
   2617 		return;
   2618 	else if (sc->sc_tbi_anstate > 1) {
   2619 		DPRINTF(WM_DEBUG_LINK,
   2620 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
   2621 		    sc->sc_tbi_anstate));
   2622 		sc->sc_tbi_anstate--;
   2623 		return;
   2624 	}
   2625 
   2626 	sc->sc_tbi_anstate = 0;
   2627 
   2628 	rxcw = CSR_READ(sc, WMREG_RXCW);
   2629 	ctrl = CSR_READ(sc, WMREG_CTRL);
   2630 	status = CSR_READ(sc, WMREG_STATUS);
   2631 
   2632 	if ((status & STATUS_LU) == 0) {
   2633 		DPRINTF(WM_DEBUG_LINK,
   2634 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
   2635 		sc->sc_tbi_linkup = 0;
   2636 	} else {
   2637 		DPRINTF(WM_DEBUG_LINK,
   2638 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
   2639 		    (status & STATUS_FD) ? "FDX" : "HDX"));
   2640 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2641 		if (status & STATUS_FD)
   2642 			sc->sc_tctl |=
   2643 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2644 		else
   2645 			sc->sc_tctl |=
   2646 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2647 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2648 		sc->sc_tbi_linkup = 1;
   2649 	}
   2650 
   2651 	wm_tbi_set_linkled(sc);
   2652 }
   2653 
   2654 /*
   2655  * wm_gmii_reset:
   2656  *
   2657  *	Reset the PHY.
   2658  */
   2659 void
   2660 wm_gmii_reset(struct wm_softc *sc)
   2661 {
   2662 	uint32_t reg;
   2663 
   2664 	if (sc->sc_type >= WM_T_82544) {
   2665 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   2666 		delay(20000);
   2667 
   2668 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2669 		delay(20000);
   2670 	} else {
   2671 		/* The PHY reset pin is active-low. */
   2672 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2673 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   2674 		    CTRL_EXT_SWDPIN(4));
   2675 		reg |= CTRL_EXT_SWDPIO(4);
   2676 
   2677 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   2678 		delay(10);
   2679 
   2680 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2681 		delay(10);
   2682 
   2683 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   2684 		delay(10);
   2685 #if 0
   2686 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   2687 #endif
   2688 	}
   2689 }
   2690 
   2691 /*
   2692  * wm_gmii_mediainit:
   2693  *
   2694  *	Initialize media for use on 1000BASE-T devices.
   2695  */
   2696 void
   2697 wm_gmii_mediainit(struct wm_softc *sc)
   2698 {
   2699 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2700 
   2701 	/* We have MII. */
   2702 	sc->sc_flags |= WM_F_HAS_MII;
   2703 
   2704 	sc->sc_tipg = TIPG_1000T_DFLT;
   2705 
   2706 	/*
   2707 	 * Let the chip set speed/duplex on its own based on
   2708 	 * signals from the PHY.
   2709 	 */
   2710 	sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
   2711 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2712 
   2713 	/* Initialize our media structures and probe the GMII. */
   2714 	sc->sc_mii.mii_ifp = ifp;
   2715 
   2716 	if (sc->sc_type >= WM_T_82544) {
   2717 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
   2718 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
   2719 	} else {
   2720 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
   2721 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
   2722 	}
   2723 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
   2724 
   2725 	wm_gmii_reset(sc);
   2726 
   2727 	ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
   2728 	    wm_gmii_mediastatus);
   2729 
   2730 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   2731 	    MII_OFFSET_ANY, 0);
   2732 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
   2733 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   2734 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
   2735 	} else
   2736 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
   2737 }
   2738 
   2739 /*
   2740  * wm_gmii_mediastatus:	[ifmedia interface function]
   2741  *
   2742  *	Get the current interface media status on a 1000BASE-T device.
   2743  */
   2744 void
   2745 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   2746 {
   2747 	struct wm_softc *sc = ifp->if_softc;
   2748 
   2749 	mii_pollstat(&sc->sc_mii);
   2750 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   2751 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   2752 }
   2753 
   2754 /*
   2755  * wm_gmii_mediachange:	[ifmedia interface function]
   2756  *
   2757  *	Set hardware to newly-selected media on a 1000BASE-T device.
   2758  */
   2759 int
   2760 wm_gmii_mediachange(struct ifnet *ifp)
   2761 {
   2762 	struct wm_softc *sc = ifp->if_softc;
   2763 
   2764 	if (ifp->if_flags & IFF_UP)
   2765 		mii_mediachg(&sc->sc_mii);
   2766 	return (0);
   2767 }
   2768 
   2769 #define	MDI_IO		CTRL_SWDPIN(2)
   2770 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   2771 #define	MDI_CLK		CTRL_SWDPIN(3)
   2772 
   2773 static void
   2774 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   2775 {
   2776 	uint32_t i, v;
   2777 
   2778 	v = CSR_READ(sc, WMREG_CTRL);
   2779 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   2780 	v |= MDI_DIR | CTRL_SWDPIO(3);
   2781 
   2782 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   2783 		if (data & i)
   2784 			v |= MDI_IO;
   2785 		else
   2786 			v &= ~MDI_IO;
   2787 		CSR_WRITE(sc, WMREG_CTRL, v);
   2788 		delay(10);
   2789 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   2790 		delay(10);
   2791 		CSR_WRITE(sc, WMREG_CTRL, v);
   2792 		delay(10);
   2793 	}
   2794 }
   2795 
   2796 static uint32_t
   2797 i82543_mii_recvbits(struct wm_softc *sc)
   2798 {
   2799 	uint32_t v, i, data = 0;
   2800 
   2801 	v = CSR_READ(sc, WMREG_CTRL);
   2802 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   2803 	v |= CTRL_SWDPIO(3);
   2804 
   2805 	CSR_WRITE(sc, WMREG_CTRL, v);
   2806 	delay(10);
   2807 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   2808 	delay(10);
   2809 	CSR_WRITE(sc, WMREG_CTRL, v);
   2810 	delay(10);
   2811 
   2812 	for (i = 0; i < 16; i++) {
   2813 		data <<= 1;
   2814 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   2815 		delay(10);
   2816 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   2817 			data |= 1;
   2818 		CSR_WRITE(sc, WMREG_CTRL, v);
   2819 		delay(10);
   2820 	}
   2821 
   2822 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   2823 	delay(10);
   2824 	CSR_WRITE(sc, WMREG_CTRL, v);
   2825 	delay(10);
   2826 
   2827 	return (data);
   2828 }
   2829 
   2830 #undef MDI_IO
   2831 #undef MDI_DIR
   2832 #undef MDI_CLK
   2833 
   2834 /*
   2835  * wm_gmii_i82543_readreg:	[mii interface function]
   2836  *
   2837  *	Read a PHY register on the GMII (i82543 version).
   2838  */
   2839 int
   2840 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
   2841 {
   2842 	struct wm_softc *sc = (void *) self;
   2843 	int rv;
   2844 
   2845 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   2846 	i82543_mii_sendbits(sc, reg | (phy << 5) |
   2847 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   2848 	rv = i82543_mii_recvbits(sc) & 0xffff;
   2849 
   2850 	DPRINTF(WM_DEBUG_GMII,
   2851 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   2852 	    sc->sc_dev.dv_xname, phy, reg, rv));
   2853 
   2854 	return (rv);
   2855 }
   2856 
   2857 /*
   2858  * wm_gmii_i82543_writereg:	[mii interface function]
   2859  *
   2860  *	Write a PHY register on the GMII (i82543 version).
   2861  */
   2862 void
   2863 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
   2864 {
   2865 	struct wm_softc *sc = (void *) self;
   2866 
   2867 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
   2868 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   2869 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   2870 	    (MII_COMMAND_START << 30), 32);
   2871 }
   2872 
   2873 /*
   2874  * wm_gmii_i82544_readreg:	[mii interface function]
   2875  *
   2876  *	Read a PHY register on the GMII.
   2877  */
   2878 int
   2879 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
   2880 {
   2881 	struct wm_softc *sc = (void *) self;
   2882 	uint32_t mdic;
   2883 	int i, rv;
   2884 
   2885 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   2886 	    MDIC_REGADD(reg));
   2887 
   2888 	for (i = 0; i < 100; i++) {
   2889 		mdic = CSR_READ(sc, WMREG_MDIC);
   2890 		if (mdic & MDIC_READY)
   2891 			break;
   2892 		delay(10);
   2893 	}
   2894 
   2895 	if ((mdic & MDIC_READY) == 0) {
   2896 		printf("%s: MDIC read timed out: phy %d reg %d\n",
   2897 		    sc->sc_dev.dv_xname, phy, reg);
   2898 		rv = 0;
   2899 	} else if (mdic & MDIC_E) {
   2900 #if 0 /* This is normal if no PHY is present. */
   2901 		printf("%s: MDIC read error: phy %d reg %d\n",
   2902 		    sc->sc_dev.dv_xname, phy, reg);
   2903 #endif
   2904 		rv = 0;
   2905 	} else {
   2906 		rv = MDIC_DATA(mdic);
   2907 		if (rv == 0xffff)
   2908 			rv = 0;
   2909 	}
   2910 
   2911 	return (rv);
   2912 }
   2913 
   2914 /*
   2915  * wm_gmii_i82544_writereg:	[mii interface function]
   2916  *
   2917  *	Write a PHY register on the GMII.
   2918  */
   2919 void
   2920 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
   2921 {
   2922 	struct wm_softc *sc = (void *) self;
   2923 	uint32_t mdic;
   2924 	int i;
   2925 
   2926 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   2927 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   2928 
   2929 	for (i = 0; i < 100; i++) {
   2930 		mdic = CSR_READ(sc, WMREG_MDIC);
   2931 		if (mdic & MDIC_READY)
   2932 			break;
   2933 		delay(10);
   2934 	}
   2935 
   2936 	if ((mdic & MDIC_READY) == 0)
   2937 		printf("%s: MDIC write timed out: phy %d reg %d\n",
   2938 		    sc->sc_dev.dv_xname, phy, reg);
   2939 	else if (mdic & MDIC_E)
   2940 		printf("%s: MDIC write error: phy %d reg %d\n",
   2941 		    sc->sc_dev.dv_xname, phy, reg);
   2942 }
   2943 
   2944 /*
   2945  * wm_gmii_statchg:	[mii interface function]
   2946  *
   2947  *	Callback from MII layer when media changes.
   2948  */
   2949 void
   2950 wm_gmii_statchg(struct device *self)
   2951 {
   2952 	struct wm_softc *sc = (void *) self;
   2953 
   2954 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   2955 
   2956 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   2957 		DPRINTF(WM_DEBUG_LINK,
   2958 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
   2959 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   2960 	} else  {
   2961 		DPRINTF(WM_DEBUG_LINK,
   2962 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
   2963 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   2964 	}
   2965 
   2966 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   2967 }
   2968