Home | History | Annotate | Line # | Download | only in ic
mtd803.c revision 1.19
      1 /* $NetBSD: mtd803.c,v 1.19 2008/04/08 12:07:26 cegger Exp $ */
      2 
      3 /*-
      4  *
      5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Peter Bex <Peter.Bex (at) student.kun.nl>.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *      This product includes software developed by the NetBSD
     22  *      Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * TODO:
     42  * - Most importantly, get some bus_dmamap_syncs in the correct places.
     43  *    I don't have access to a computer with PCI other than i386, and i386
     44  *    is just such a machine where dmamap_syncs don't do anything.
     45  * - Powerhook for when resuming after standby.
     46  * - Watchdog stuff doesn't work yet, the system crashes.
     47  * - There seems to be a CardBus version of the card. (see datasheet)
     48  *    Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
     49  * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
     50  *    raised every time a packet is sent. Strange, since everything works anyway
     51  */
     52 
     53 #include <sys/cdefs.h>
     54 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.19 2008/04/08 12:07:26 cegger Exp $");
     55 
     56 #include "bpfilter.h"
     57 
     58 #include <sys/param.h>
     59 #include <sys/mbuf.h>
     60 #include <sys/systm.h>
     61 #include <sys/device.h>
     62 #include <sys/socket.h>
     63 #include <sys/ioctl.h>
     64 #include <sys/syslog.h>
     65 
     66 #include <net/if.h>
     67 #include <net/if_ether.h>
     68 #include <net/if_media.h>
     69 
     70 #ifdef INET
     71 #include <netinet/in.h>
     72 #include <netinet/if_inarp.h>
     73 #include <netinet/in_systm.h>
     74 #include <netinet/in_var.h>
     75 #include <netinet/ip.h>
     76 #endif
     77 
     78 #if NBPFILTER > 0
     79 #include <net/bpf.h>
     80 #include <net/bpfdesc.h>
     81 #endif
     82 
     83 #include <sys/bus.h>
     84 
     85 #include <dev/ic/mtd803reg.h>
     86 #include <dev/ic/mtd803var.h>
     87 #include <dev/mii/mii.h>
     88 #include <dev/mii/miivar.h>
     89 
     90 /*
     91  * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
     92  * Written by Peter Bex (peter.bex (at) student.kun.nl)
     93  *
     94  * Datasheet at:   http://www.myson.com.tw   or   http://www.century-semi.com
     95  */
     96 
     97 #define MTD_READ_1(sc, reg) \
     98 	bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
     99 #define MTD_WRITE_1(sc, reg, data) \
    100 	bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
    101 
    102 #define MTD_READ_2(sc, reg) \
    103 	bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
    104 #define MTD_WRITE_2(sc, reg, data) \
    105 	bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
    106 
    107 #define MTD_READ_4(sc, reg) \
    108 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
    109 #define MTD_WRITE_4(sc, reg, data) \
    110 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
    111 
    112 #define MTD_SETBIT(sc, reg, x) \
    113 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
    114 #define MTD_CLRBIT(sc, reg, x) \
    115 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
    116 
    117 #define ETHER_CRC32(buf, len)	(ether_crc32_be((buf), (len)))
    118 
    119 int mtd_mii_readreg(struct device *, int, int);
    120 void mtd_mii_writereg(struct device *, int, int, int);
    121 void mtd_mii_statchg(struct device *);
    122 
    123 void mtd_start(struct ifnet *);
    124 void mtd_stop(struct ifnet *, int);
    125 int mtd_ioctl(struct ifnet *, u_long, void *);
    126 void mtd_setmulti(struct mtd_softc *);
    127 void mtd_watchdog(struct ifnet *);
    128 
    129 int mtd_init(struct ifnet *);
    130 void mtd_reset(struct mtd_softc *);
    131 void mtd_shutdown(void *);
    132 int mtd_init_desc(struct mtd_softc *);
    133 int mtd_put(struct mtd_softc *, int, struct mbuf *);
    134 struct mbuf *mtd_get(struct mtd_softc *, int, int);
    135 
    136 int mtd_rxirq(struct mtd_softc *);
    137 int mtd_txirq(struct mtd_softc *);
    138 int mtd_bufirq(struct mtd_softc *);
    139 
    140 
    141 int
    142 mtd_config(struct mtd_softc *sc)
    143 {
    144 	struct ifnet *ifp = &sc->ethercom.ec_if;
    145 	int i;
    146 
    147 	/* Read station address */
    148 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
    149 		sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
    150 
    151 	/* Initialize ifnet structure */
    152 	memcpy(ifp->if_xname, device_xname(&sc->dev), IFNAMSIZ);
    153 	ifp->if_softc = sc;
    154 	ifp->if_init = mtd_init;
    155 	ifp->if_start = mtd_start;
    156 	ifp->if_stop = mtd_stop;
    157 	ifp->if_ioctl = mtd_ioctl;
    158 	ifp->if_watchdog = mtd_watchdog;
    159 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    160 	IFQ_SET_READY(&ifp->if_snd);
    161 
    162 	/* Setup MII interface */
    163 	sc->mii.mii_ifp = ifp;
    164 	sc->mii.mii_readreg = mtd_mii_readreg;
    165 	sc->mii.mii_writereg = mtd_mii_writereg;
    166 	sc->mii.mii_statchg = mtd_mii_statchg;
    167 
    168 	sc->ethercom.ec_mii = &sc->mii;
    169 	ifmedia_init(&sc->mii.mii_media, 0, ether_mediachange,
    170 	    ether_mediastatus);
    171 
    172 	mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
    173 
    174 	if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
    175 		aprint_error_dev(&sc->dev, "Unable to configure MII\n");
    176 		return 1;
    177 	} else {
    178 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
    179 	}
    180 
    181 	if (mtd_init_desc(sc))
    182 		return 1;
    183 
    184 	/* Attach interface */
    185 	if_attach(ifp);
    186 	ether_ifattach(ifp, sc->eaddr);
    187 
    188 #if NRND > 0
    189 	/* Initialise random source */
    190 	rnd_attach_source(&sc->rnd_src, device_xname(&sc->dev), RND_TYPE_NET, 0);
    191 #endif
    192 
    193 	/* Add shutdown hook to reset card when we reboot */
    194 	sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
    195 
    196 	return 0;
    197 }
    198 
    199 
    200 /*
    201  * mtd_init
    202  * Must be called at splnet()
    203  */
    204 int
    205 mtd_init(struct ifnet *ifp)
    206 {
    207 	struct mtd_softc *sc = ifp->if_softc;
    208 
    209 	mtd_reset(sc);
    210 
    211 	/*
    212 	 * Set cache alignment and burst length. Don't really know what these
    213 	 * mean, so their values are probably suboptimal.
    214 	 */
    215 	MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
    216 
    217 	MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX);
    218 
    219 	/* Promiscuous mode? */
    220 	if (ifp->if_flags & IFF_PROMISC)
    221 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
    222 	else
    223 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
    224 
    225 	/* Broadcast mode? */
    226 	if (ifp->if_flags & IFF_BROADCAST)
    227 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
    228 	else
    229 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
    230 
    231 	mtd_setmulti(sc);
    232 
    233 	/* Enable interrupts */
    234 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
    235 	MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
    236 
    237 	/* Set descriptor base addresses */
    238 	MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    239 				+ sizeof(struct mtd_desc) * MTD_NUM_RXD));
    240 	MTD_WRITE_4(sc, MTD_RXLBA,
    241 		htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
    242 
    243 	/* Enable receiver and transmitter */
    244 	MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
    245 	MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
    246 
    247 	/* Interface is running */
    248 	ifp->if_flags |= IFF_RUNNING;
    249 	ifp->if_flags &= ~IFF_OACTIVE;
    250 
    251 	return 0;
    252 }
    253 
    254 
    255 int
    256 mtd_init_desc(struct mtd_softc *sc)
    257 {
    258 	int rseg, err, i;
    259 	bus_dma_segment_t seg;
    260 	bus_size_t size;
    261 
    262 	/* Allocate memory for descriptors */
    263 	size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
    264 
    265 	/* Allocate DMA-safe memory */
    266 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
    267 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    268 		aprint_error_dev(&sc->dev, "unable to allocate DMA buffer, error = %d\n", err);
    269 		return 1;
    270 	}
    271 
    272 	/* Map memory to kernel addressable space */
    273 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
    274 		(void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    275 		aprint_error_dev(&sc->dev, "unable to map DMA buffer, error = %d\n", err);
    276 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    277 		return 1;
    278 	}
    279 
    280 	/* Create a DMA map */
    281 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
    282 		size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
    283 		aprint_error_dev(&sc->dev, "unable to create DMA map, error = %d\n", err);
    284 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    285 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    286 		return 1;
    287 	}
    288 
    289 	/* Load the DMA map */
    290 	if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
    291 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
    292 		aprint_error_dev(&sc->dev, "unable to load DMA map, error = %d\n",
    293 			err);
    294 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    295 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    296 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    297 		return 1;
    298 	}
    299 
    300 	/* Allocate memory for the buffers */
    301 	size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
    302 
    303 	/* Allocate DMA-safe memory */
    304 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
    305 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    306 		aprint_error_dev(&sc->dev, "unable to allocate DMA buffer, error = %d\n",
    307 			err);
    308 
    309 		/* Undo DMA map for descriptors */
    310 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    311 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    312 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    313 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    314 		return 1;
    315 	}
    316 
    317 	/* Map memory to kernel addressable space */
    318 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
    319 		&sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    320 		aprint_error_dev(&sc->dev, "unable to map DMA buffer, error = %d\n",
    321 			err);
    322 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    323 
    324 		/* Undo DMA map for descriptors */
    325 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    326 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    327 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    328 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    329 		return 1;
    330 	}
    331 
    332 	/* Create a DMA map */
    333 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
    334 		size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
    335 		aprint_error_dev(&sc->dev, "unable to create DMA map, error = %d\n",
    336 			err);
    337 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
    338 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    339 
    340 		/* Undo DMA map for descriptors */
    341 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    342 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    343 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    344 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    345 		return 1;
    346 	}
    347 
    348 	/* Load the DMA map */
    349 	if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
    350 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
    351 		aprint_error_dev(&sc->dev, "unable to load DMA map, error = %d\n",
    352 			err);
    353 		bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
    354 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
    355 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    356 
    357 		/* Undo DMA map for descriptors */
    358 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    359 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    360 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    361 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    362 		return 1;
    363 	}
    364 
    365 	/* Descriptors are stored as a circular linked list */
    366 	/* Fill in rx descriptors */
    367 	for (i = 0; i < MTD_NUM_RXD; ++i) {
    368 		sc->desc[i].stat = MTD_RXD_OWNER;
    369 		if (i == MTD_NUM_RXD - 1) {	/* Last descriptor */
    370 			/* Link back to first rx descriptor */
    371 			sc->desc[i].next =
    372 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
    373 		} else {
    374 			/* Link forward to next rx descriptor */
    375 			sc->desc[i].next =
    376 			htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    377 					+ (i + 1) * sizeof(struct mtd_desc));
    378 		}
    379 		sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
    380 		/* Set buffer's address */
    381 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
    382 					+ i * MTD_RXBUF_SIZE);
    383 	}
    384 
    385 	/* Fill in tx descriptors */
    386 	for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
    387 		sc->desc[i].stat = 0;	/* At least, NOT MTD_TXD_OWNER! */
    388 		if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) {	/* Last descr */
    389 			/* Link back to first tx descriptor */
    390 			sc->desc[i].next =
    391 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    392 					+MTD_NUM_RXD * sizeof(struct mtd_desc));
    393 		} else {
    394 			/* Link forward to next tx descriptor */
    395 			sc->desc[i].next =
    396 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    397 					+ (i + 1) * sizeof(struct mtd_desc));
    398 		}
    399 		/* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
    400 		/* Set buffer's address */
    401 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
    402 					+ MTD_NUM_RXD * MTD_RXBUF_SIZE
    403 					+ (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
    404 	}
    405 
    406 	return 0;
    407 }
    408 
    409 
    410 void
    411 mtd_mii_statchg(device_t self)
    412 {
    413 	/* Should we do something here? :) */
    414 }
    415 
    416 
    417 int
    418 mtd_mii_readreg(device_t self, int phy, int reg)
    419 {
    420 	struct mtd_softc *sc = device_private(self);
    421 
    422 	return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
    423 }
    424 
    425 
    426 void
    427 mtd_mii_writereg(device_t self, int phy, int reg, int val)
    428 {
    429 	struct mtd_softc *sc = device_private(self);
    430 
    431 	MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
    432 }
    433 
    434 
    435 int
    436 mtd_put(struct mtd_softc *sc, int index, struct mbuf *m)
    437 {
    438 	int len, tlen;
    439 	char *buf = (char *)sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
    440 			+ index * MTD_TXBUF_SIZE;
    441 	struct mbuf *n;
    442 
    443 	for (tlen = 0; m != NULL; m = n) {
    444 		len = m->m_len;
    445 		if (len == 0) {
    446 			MFREE(m, n);
    447 			continue;
    448 		} else if (tlen > MTD_TXBUF_SIZE) {
    449 			/* XXX FIXME: No idea what to do here. */
    450 			aprint_error_dev(&sc->dev, "packet too large! Size = %i\n",
    451 				tlen);
    452 			MFREE(m, n);
    453 			continue;
    454 		}
    455 		memcpy(buf, mtod(m, void *), len);
    456 		buf += len;
    457 		tlen += len;
    458 		MFREE(m, n);
    459 	}
    460 	sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
    461 		| MTD_TXD_CONF_IRQC
    462 		| ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
    463 		| (tlen & MTD_TXD_CONF_BUFS);
    464 
    465 	return tlen;
    466 }
    467 
    468 
    469 void
    470 mtd_start(struct ifnet *ifp)
    471 {
    472 	struct mtd_softc *sc = ifp->if_softc;
    473 	struct mbuf *m;
    474 	int len;
    475 	int first_tx = sc->cur_tx;
    476 
    477 	/* Don't transmit when the interface is busy or inactive */
    478 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    479 		return;
    480 
    481 	for (;;) {
    482 		IF_DEQUEUE(&ifp->if_snd, m);
    483 
    484 		if (m == NULL)
    485 			break;
    486 
    487 #if NBPFILTER > 0
    488 		if (ifp->if_bpf)
    489 			bpf_mtap(ifp->if_bpf, m);
    490 #endif
    491 
    492 		/* Copy mbuf chain into tx buffer */
    493 		len = mtd_put(sc, sc->cur_tx, m);
    494 
    495 		if (sc->cur_tx != first_tx)
    496 			sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
    497 
    498 		if (++sc->cur_tx >= MTD_NUM_TXD)
    499 			sc->cur_tx = 0;
    500 	}
    501 	/* Mark first & last descriptor */
    502 	sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
    503 
    504 	if (sc->cur_tx == 0) {
    505 		sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
    506 	} else {
    507 		sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
    508 	}
    509 
    510 	/* Give first descriptor to chip to complete transaction */
    511 	sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
    512 
    513 	/* Transmit polling demand */
    514 	MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
    515 
    516 	/* XXX FIXME: Set up a watchdog timer */
    517 	/* ifp->if_timer = 5; */
    518 }
    519 
    520 
    521 void
    522 mtd_stop(struct ifnet *ifp, int disable)
    523 {
    524 	struct mtd_softc *sc = ifp->if_softc;
    525 
    526 	/* Disable transmitter and receiver */
    527 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
    528 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
    529 
    530 	/* Disable interrupts */
    531 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
    532 
    533 	/* Must do more at disable??... */
    534 	if (disable) {
    535 		/* Delete tx and rx descriptor base addresses */
    536 		MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
    537 		MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
    538 	}
    539 
    540 	ifp->if_timer = 0;
    541 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    542 }
    543 
    544 
    545 void
    546 mtd_watchdog(struct ifnet *ifp)
    547 {
    548 	struct mtd_softc *sc = ifp->if_softc;
    549 	int s;
    550 
    551 	log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->dev));
    552 	++sc->ethercom.ec_if.if_oerrors;
    553 
    554 	mtd_stop(ifp, 0);
    555 
    556 	s = splnet();
    557 	mtd_init(ifp);
    558 	splx(s);
    559 
    560 	return;
    561 }
    562 
    563 
    564 int
    565 mtd_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    566 {
    567 	struct mtd_softc *sc = ifp->if_softc;
    568 	int s, error = 0;
    569 
    570 	s = splnet();
    571 
    572 	if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
    573 		/*
    574 		 * Multicast list has changed; set the hardware
    575 		 * filter accordingly.
    576 		 */
    577 		 if (ifp->if_flags & IFF_RUNNING)
    578 			 mtd_setmulti(sc);
    579 		 error = 0;
    580 	}
    581 
    582 	splx(s);
    583 	return error;
    584 }
    585 
    586 
    587 struct mbuf *
    588 mtd_get(struct mtd_softc *sc, int index, int totlen)
    589 {
    590 	struct ifnet *ifp = &sc->ethercom.ec_if;
    591 	struct mbuf *m, *m0, *newm;
    592 	int len;
    593 	char *buf = (char *)sc->buf + index * MTD_RXBUF_SIZE;
    594 
    595 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
    596 	if (m0 == NULL)
    597 		return NULL;
    598 
    599 	m0->m_pkthdr.rcvif = ifp;
    600 	m0->m_pkthdr.len = totlen;
    601 	m = m0;
    602 	len = MHLEN;
    603 
    604 	while (totlen > 0) {
    605 		if (totlen >= MINCLSIZE) {
    606 			MCLGET(m, M_DONTWAIT);
    607 			if (!(m->m_flags & M_EXT)) {
    608 				m_freem(m0);
    609 				return NULL;
    610 			}
    611 			len = MCLBYTES;
    612 		}
    613 
    614 		if (m == m0) {
    615 			char *newdata = (char *)
    616 				ALIGN(m->m_data + sizeof(struct ether_header)) -
    617 				sizeof(struct ether_header);
    618 			len -= newdata - m->m_data;
    619 			m->m_data = newdata;
    620 		}
    621 
    622 		m->m_len = len = min(totlen, len);
    623 		memcpy(mtod(m, void *), buf, len);
    624 		buf += len;
    625 
    626 		totlen -= len;
    627 		if (totlen > 0) {
    628 			MGET(newm, M_DONTWAIT, MT_DATA);
    629 			if (newm == NULL) {
    630 				m_freem(m0);
    631 				return NULL;
    632 			}
    633 			len = MLEN;
    634 			m = m->m_next = newm;
    635 		}
    636 	}
    637 
    638 	return m0;
    639 }
    640 
    641 
    642 int
    643 mtd_rxirq(struct mtd_softc *sc)
    644 {
    645 	struct ifnet *ifp = &sc->ethercom.ec_if;
    646 	int len;
    647 	struct mbuf *m;
    648 
    649 	for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
    650 		/* Error summary set? */
    651 		if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
    652 			aprint_error_dev(&sc->dev, "received packet with errors\n");
    653 			/* Give up packet, since an error occurred */
    654 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    655 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
    656 							MTD_RXD_CONF_BUFS;
    657 			++ifp->if_ierrors;
    658 			if (++sc->cur_rx >= MTD_NUM_RXD)
    659 				sc->cur_rx = 0;
    660 			continue;
    661 		}
    662 		/* Get buffer length */
    663 		len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
    664 			>> MTD_RXD_FLEN_SHIFT;
    665 		len -= ETHER_CRC_LEN;
    666 
    667 		/* Check packet size */
    668 		if (len <= sizeof(struct ether_header)) {
    669 			aprint_error_dev(&sc->dev, "invalid packet size %d; dropping\n",
    670 				len);
    671 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    672 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
    673 							MTD_RXD_CONF_BUFS;
    674 			++ifp->if_ierrors;
    675 			if (++sc->cur_rx >= MTD_NUM_RXD)
    676 				sc->cur_rx = 0;
    677 			continue;
    678 		}
    679 
    680 		m = mtd_get(sc, (sc->cur_rx), len);
    681 
    682 		/* Give descriptor back to card */
    683 		sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
    684 		sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    685 
    686 		if (++sc->cur_rx >= MTD_NUM_RXD)
    687 			sc->cur_rx = 0;
    688 
    689 		if (m == NULL) {
    690 			aprint_error_dev(&sc->dev, "error pulling packet off interface\n");
    691 			++ifp->if_ierrors;
    692 			continue;
    693 		}
    694 
    695 		++ifp->if_ipackets;
    696 
    697 #if NBPFILTER > 0
    698 		if (ifp->if_bpf)
    699 			bpf_mtap(ifp->if_bpf, m);
    700 #endif
    701 		/* Pass the packet up */
    702 		(*ifp->if_input)(ifp, m);
    703 	}
    704 
    705 	return 1;
    706 }
    707 
    708 
    709 int
    710 mtd_txirq(struct mtd_softc *sc)
    711 {
    712 	struct ifnet *ifp = &sc->ethercom.ec_if;
    713 
    714 	/* Clear timeout */
    715 	ifp->if_timer = 0;
    716 
    717 	ifp->if_flags &= ~IFF_OACTIVE;
    718 	++ifp->if_opackets;
    719 
    720 	/* XXX FIXME If there is some queued, do an mtd_start? */
    721 
    722 	return 1;
    723 }
    724 
    725 
    726 int
    727 mtd_bufirq(struct mtd_softc *sc)
    728 {
    729 	struct ifnet *ifp = &sc->ethercom.ec_if;
    730 
    731 	/* Clear timeout */
    732 	ifp->if_timer = 0;
    733 
    734 	/* XXX FIXME: Do something here to make sure we get some buffers! */
    735 
    736 	return 1;
    737 }
    738 
    739 
    740 int
    741 mtd_irq_h(void *args)
    742 {
    743 	struct mtd_softc *sc = args;
    744 	struct ifnet *ifp = &sc->ethercom.ec_if;
    745 	u_int32_t status;
    746 	int r = 0;
    747 
    748 	if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(&sc->dev))
    749 		return 0;
    750 
    751 	/* Disable interrupts */
    752 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
    753 
    754 	for(;;) {
    755 		status = MTD_READ_4(sc, MTD_ISR);
    756 #if NRND > 0
    757 		/* Add random seed before masking out bits */
    758 		if (status)
    759 			rnd_add_uint32(&sc->rnd_src, status);
    760 #endif
    761 		status &= MTD_ISR_MASK;
    762 		if (!status)		/* We didn't ask for this */
    763 			break;
    764 
    765 		MTD_WRITE_4(sc, MTD_ISR, status);
    766 
    767 		/* NOTE: Perhaps we should reset with some of these errors? */
    768 
    769 		if (status & MTD_ISR_RXBUN) {
    770 			aprint_error_dev(&sc->dev, "receive buffer unavailable\n");
    771 			++ifp->if_ierrors;
    772 		}
    773 
    774 		if (status & MTD_ISR_RXERR) {
    775 			aprint_error_dev(&sc->dev, "receive error\n");
    776 			++ifp->if_ierrors;
    777 		}
    778 
    779 		if (status & MTD_ISR_TXBUN) {
    780 			aprint_error_dev(&sc->dev, "transmit buffer unavailable\n");
    781 			++ifp->if_ierrors;
    782 		}
    783 
    784 		if ((status & MTD_ISR_PDF)) {
    785 			aprint_error_dev(&sc->dev, "parallel detection fault\n");
    786 			++ifp->if_ierrors;
    787 		}
    788 
    789 		if (status & MTD_ISR_FBUSERR) {
    790 			aprint_error_dev(&sc->dev, "fatal bus error\n");
    791 			++ifp->if_ierrors;
    792 		}
    793 
    794 		if (status & MTD_ISR_TARERR) {
    795 			aprint_error_dev(&sc->dev, "target error\n");
    796 			++ifp->if_ierrors;
    797 		}
    798 
    799 		if (status & MTD_ISR_MASTERR) {
    800 			aprint_error_dev(&sc->dev, "master error\n");
    801 			++ifp->if_ierrors;
    802 		}
    803 
    804 		if (status & MTD_ISR_PARERR) {
    805 			aprint_error_dev(&sc->dev, "parity error\n");
    806 			++ifp->if_ierrors;
    807 		}
    808 
    809 		if (status & MTD_ISR_RXIRQ)	/* Receive interrupt */
    810 			r |= mtd_rxirq(sc);
    811 
    812 		if (status & MTD_ISR_TXIRQ)	/* Transmit interrupt */
    813 			r |= mtd_txirq(sc);
    814 
    815 		if (status & MTD_ISR_TXEARLY)	/* Transmit early */
    816 			r |= mtd_txirq(sc);
    817 
    818 		if (status & MTD_ISR_TXBUN)	/* Transmit buffer n/a */
    819 			r |= mtd_bufirq(sc);
    820 
    821 	}
    822 
    823 	/* Enable interrupts */
    824 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
    825 
    826 	return r;
    827 }
    828 
    829 
    830 void
    831 mtd_setmulti(struct mtd_softc *sc)
    832 {
    833 	struct ifnet *ifp = &sc->ethercom.ec_if;
    834 	u_int32_t rxtx_stat;
    835 	u_int32_t hash[2] = {0, 0};
    836 	u_int32_t crc;
    837 	struct ether_multi *enm;
    838 	struct ether_multistep step;
    839 	int mcnt = 0;
    840 
    841 	/* Get old status */
    842 	rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
    843 
    844 	if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
    845 		rxtx_stat |= MTD_RX_AMULTI;
    846 		MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
    847 		MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
    848 		MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
    849 		return;
    850 	}
    851 
    852 	ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
    853 	while (enm != NULL) {
    854 		/* We need the 6 most significant bits of the CRC */
    855 		crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
    856 
    857 		hash[crc >> 5] |= 1 << (crc & 0xf);
    858 
    859 		++mcnt;
    860 		ETHER_NEXT_MULTI(step, enm);
    861 	}
    862 
    863 	/* Accept multicast bit needs to be on? */
    864 	if (mcnt)
    865 		rxtx_stat |= MTD_RX_AMULTI;
    866 	else
    867 		rxtx_stat &= ~MTD_RX_AMULTI;
    868 
    869 	/* Write out the hash */
    870 	MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
    871 	MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
    872 	MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
    873 }
    874 
    875 
    876 void
    877 mtd_reset(struct mtd_softc *sc)
    878 {
    879 	int i;
    880 
    881 	MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
    882 
    883 	/* Reset descriptor status */
    884 	sc->cur_tx = 0;
    885 	sc->cur_rx = 0;
    886 
    887 	/* Wait until done with reset */
    888 	for (i = 0; i < MTD_TIMEOUT; ++i) {
    889 		DELAY(10);
    890 		if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
    891 			break;
    892 	}
    893 
    894 	if (i == MTD_TIMEOUT) {
    895 		aprint_error_dev(&sc->dev, "reset timed out\n");
    896 	}
    897 
    898 	/* Wait a little so chip can stabilize */
    899 	DELAY(1000);
    900 }
    901 
    902 
    903 void
    904 mtd_shutdown (arg)
    905 	void *arg;
    906 {
    907 	struct mtd_softc *sc = arg;
    908 	struct ifnet *ifp = &sc->ethercom.ec_if;
    909 
    910 #if NRND > 0
    911 	rnd_detach_source(&sc->rnd_src);
    912 #endif
    913 	mtd_stop(ifp, 1);
    914 }
    915