Home | History | Annotate | Line # | Download | only in ic
mtd803.c revision 1.1
      1 /* $NetBSD: mtd803.c,v 1.1 2002/11/07 21:56:59 martin Exp $ */
      2 
      3 /*-
      4  *
      5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Peter Bex <Peter.Bex (at) student.kun.nl>.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *      This product includes software developed by the NetBSD
     22  *      Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * TODO:
     42  * - Most importantly, get some bus_dmamap_syncs in the correct places.
     43  *    I don't have access to a computer with PCI other than i386, and i386
     44  *    is just such a machine where dmamap_syncs don't do anything.
     45  * - Powerhook for when resuming after standby.
     46  * - Watchdog stuff doesn't work yet, the system crashes.(lockmgr: no context)
     47  * - There seems to be a CardBus version of the card. (see datasheet)
     48  *    Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
     49  * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
     50  *    raised every time a packet is sent. Strange, since everything works anyway
     51  */
     52 
     53 #include "bpfilter.h"
     54 
     55 #include <sys/param.h>
     56 #include <sys/mbuf.h>
     57 #include <sys/systm.h>
     58 #include <sys/device.h>
     59 #include <sys/socket.h>
     60 #include <sys/ioctl.h>
     61 #include <sys/syslog.h>
     62 
     63 #include <net/if.h>
     64 #include <net/if_ether.h>
     65 #include <net/if_media.h>
     66 
     67 #ifdef INET
     68 #include <netinet/in.h>
     69 #include <netinet/if_inarp.h>
     70 #include <netinet/in_systm.h>
     71 #include <netinet/in_var.h>
     72 #include <netinet/ip.h>
     73 #endif
     74 
     75 #if NBPFILTER > 0
     76 #include <net/bpf.h>
     77 #include <net/bpfdesc.h>
     78 #endif
     79 
     80 #include <machine/bus.h>
     81 
     82 #include <dev/ic/mtd803reg.h>
     83 #include <dev/ic/mtd803var.h>
     84 #include <dev/mii/mii.h>
     85 #include <dev/mii/miivar.h>
     86 
     87 /*
     88  * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
     89  * Written by Peter Bex (peter.bex (at) student.kun.nl)
     90  *
     91  * Datasheet at:   http://www.myson.com.tw   or   http://www.century-semi.com
     92  */
     93 
     94 #define MTD_READ_1(sc, reg) \
     95 	bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
     96 #define MTD_WRITE_1(sc, reg, data) \
     97 	bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
     98 
     99 #define MTD_READ_2(sc, reg) \
    100 	bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
    101 #define MTD_WRITE_2(sc, reg, data) \
    102 	bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
    103 
    104 #define MTD_READ_4(sc, reg) \
    105 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
    106 #define MTD_WRITE_4(sc, reg, data) \
    107 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
    108 
    109 #define MTD_SETBIT(sc, reg, x) \
    110 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
    111 #define MTD_CLRBIT(sc, reg, x) \
    112 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
    113 
    114 #define ETHER_CRC32(buf, len)	(ether_crc32_be((buf), (len)))
    115 
    116 int mtd_mii_readreg __P((struct device *, int, int));
    117 void mtd_mii_writereg __P((struct device *, int, int, int));
    118 void mtd_mii_statchg __P((struct device *));
    119 
    120 void mtd_start __P((struct ifnet *));
    121 void mtd_stop __P((struct ifnet *, int));
    122 int mtd_ioctl __P((struct ifnet *, u_long, caddr_t));
    123 void mtd_setmulti __P((struct mtd_softc *));
    124 void mtd_watchdog __P((struct ifnet *));
    125 int mtd_mediachange __P((struct ifnet *));
    126 void mtd_mediastatus __P((struct ifnet *, struct ifmediareq *));
    127 
    128 int mtd_init __P((struct ifnet *));
    129 void mtd_reset __P((struct mtd_softc *));
    130 void mtd_shutdown __P((void *));
    131 int mtd_init_desc __P((struct mtd_softc *));
    132 int mtd_put __P((struct mtd_softc *, int, struct mbuf *));
    133 struct mbuf *mtd_get __P((struct mtd_softc *, int, int));
    134 
    135 int mtd_rxirq __P((struct mtd_softc *));
    136 int mtd_txirq __P((struct mtd_softc *));
    137 int mtd_bufirq __P((struct mtd_softc *));
    138 
    139 
    140 int
    141 mtd_config(sc)
    142 	struct mtd_softc *sc;
    143 {
    144 	struct ifnet *ifp = &sc->ethercom.ec_if;
    145 	int i;
    146 
    147 	/* Read station address */
    148 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
    149 		sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
    150 
    151 	/* Initialize ifnet structure */
    152 	memcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
    153 	ifp->if_softc = sc;
    154 	ifp->if_init = mtd_init;
    155 	ifp->if_start = mtd_start;
    156 	ifp->if_stop = mtd_stop;
    157 	ifp->if_ioctl = mtd_ioctl;
    158 	ifp->if_watchdog = mtd_watchdog;
    159 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    160 	IFQ_SET_READY(&ifp->if_snd);
    161 
    162 	/* Setup MII interface */
    163 	sc->mii.mii_ifp = ifp;
    164 	sc->mii.mii_readreg = mtd_mii_readreg;
    165 	sc->mii.mii_writereg = mtd_mii_writereg;
    166 	sc->mii.mii_statchg = mtd_mii_statchg;
    167 
    168 	ifmedia_init(&sc->mii.mii_media, 0, mtd_mediachange, mtd_mediastatus);
    169 
    170 	mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
    171 
    172 	if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
    173 		printf("%s: Unable to configure MII\n", sc->dev.dv_xname);
    174 		return 1;
    175 	} else {
    176 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
    177 	}
    178 
    179 	if (mtd_init_desc(sc))
    180 		return 1;
    181 
    182 	/* Attach interface */
    183 	if_attach(ifp);
    184 	ether_ifattach(ifp, sc->eaddr);
    185 
    186 #if NRND > 0
    187 	/* Initialise random source */
    188 	rnd_attach_source(&sc->rnd_src, sc->dev.dv_xname, RND_TYPE_NET, 0);
    189 #endif
    190 
    191 	/* Add shutdown hook to reset card when we reboot */
    192 	sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
    193 
    194 	return 0;
    195 }
    196 
    197 
    198 /*
    199  * mtd_init
    200  * Must be called at splnet()
    201  */
    202 int
    203 mtd_init(ifp)
    204 	struct ifnet *ifp;
    205 {
    206 	struct mtd_softc *sc = ifp->if_softc;
    207 
    208 	mtd_reset(sc);
    209 
    210 	/*
    211 	 * Set cache alignment and burst length. Don't really know what these
    212 	 * mean, so their values are probably suboptimal.
    213 	 */
    214 	MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
    215 
    216 	MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_RX_BLEN | MTD_RX_512
    217 			| MTD_TX_FDPLX);
    218 
    219 	/* Promiscuous mode? */
    220 	if (ifp->if_flags & IFF_PROMISC)
    221 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
    222 	else
    223 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
    224 
    225 	/* Broadcast mode? */
    226 	if (ifp->if_flags & IFF_BROADCAST)
    227 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
    228 	else
    229 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
    230 
    231 	mtd_setmulti(sc);
    232 
    233 	/* Enable interrupts */
    234 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
    235 	MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
    236 
    237 	/* Set descriptor base addresses */
    238 	MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    239 				+ sizeof(struct mtd_desc) * MTD_NUM_RXD));
    240 	MTD_WRITE_4(sc, MTD_RXLBA,
    241 		htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
    242 
    243 	/* Enable receiver and transmitter */
    244 	MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
    245 	MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
    246 
    247 	/* Interface is running */
    248 	ifp->if_flags |= IFF_RUNNING;
    249 	ifp->if_flags &= ~IFF_OACTIVE;
    250 
    251 	return 0;
    252 }
    253 
    254 
    255 int
    256 mtd_init_desc(sc)
    257 	struct mtd_softc *sc;
    258 {
    259 	int rseg, err, i;
    260 	bus_dma_segment_t seg;
    261 	bus_size_t size;
    262 
    263 	/* Allocate memory for descriptors */
    264 	size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
    265 
    266 	/* Allocate DMA-safe memory */
    267 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
    268 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    269 		printf("%s: unable to allocate DMA buffer, error = %d\n",
    270 			sc->dev.dv_xname, err);
    271 		return 1;
    272 	}
    273 
    274 	/* Map memory to kernel addressable space */
    275 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
    276 		(caddr_t *)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    277 		printf("%s: unable to map DMA buffer, error = %d\n",
    278 			sc->dev.dv_xname, err);
    279 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    280 		return 1;
    281 	}
    282 
    283 	/* Create a DMA map */
    284 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
    285 		size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
    286 		printf("%s: unable to create DMA map, error = %d\n",
    287 			sc->dev.dv_xname, err);
    288 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    289 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    290 		return 1;
    291 	}
    292 
    293 	/* Load the DMA map */
    294 	if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
    295 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
    296 		printf("%s: unable to load DMA map, error = %d\n",
    297 			sc->dev.dv_xname, err);
    298 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    299 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    300 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    301 		return 1;
    302 	}
    303 
    304 	/* Allocate memory for the buffers */
    305 	size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
    306 
    307 	/* Allocate DMA-safe memory */
    308 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
    309 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    310 		printf("%s: unable to allocate DMA buffer, error = %d\n",
    311 			sc->dev.dv_xname, err);
    312 
    313 		/* Undo DMA map for descriptors */
    314 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    315 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    316 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    317 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    318 		return 1;
    319 	}
    320 
    321 	/* Map memory to kernel addressable space */
    322 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
    323 		&sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    324 		printf("%s: unable to map DMA buffer, error = %d\n",
    325 			sc->dev.dv_xname, err);
    326 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    327 
    328 		/* Undo DMA map for descriptors */
    329 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    330 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    331 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    332 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    333 		return 1;
    334 	}
    335 
    336 	/* Create a DMA map */
    337 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
    338 		size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
    339 		printf("%s: unable to create DMA map, error = %d\n",
    340 			sc->dev.dv_xname, err);
    341 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
    342 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    343 
    344 		/* Undo DMA map for descriptors */
    345 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    346 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    347 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    348 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    349 		return 1;
    350 	}
    351 
    352 	/* Load the DMA map */
    353 	if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
    354 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
    355 		printf("%s: unable to load DMA map, error = %d\n",
    356 			sc->dev.dv_xname, err);
    357 		bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
    358 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
    359 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    360 
    361 		/* Undo DMA map for descriptors */
    362 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    363 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    364 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    365 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    366 		return 1;
    367 	}
    368 
    369 	/* Descriptors are stored as a circular linked list */
    370 	/* Fill in rx descriptors */
    371 	for (i = 0; i < MTD_NUM_RXD; ++i) {
    372 		sc->desc[i].stat = MTD_RXD_OWNER;
    373 		if (i == MTD_NUM_RXD - 1) {	/* Last desriptor */
    374 			/* Link back to first rx descriptor */
    375 			sc->desc[i].next =
    376 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
    377 		} else {
    378 			/* Link forward to next rx descriptor */
    379 			sc->desc[i].next =
    380 			htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    381 					+ (i + 1) * sizeof(struct mtd_desc));
    382 		}
    383 		sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
    384 		/* Set buffer's address */
    385 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
    386 					+ i * MTD_RXBUF_SIZE);
    387 	}
    388 
    389 	/* Fill in tx descriptors */
    390 	for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
    391 		sc->desc[i].stat = 0;	/* At least, NOT MTD_TXD_OWNER! */
    392 		if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) {	/* Last descr */
    393 			/* Link back to first tx descriptor */
    394 			sc->desc[i].next =
    395 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    396 					+MTD_NUM_RXD * sizeof(struct mtd_desc));
    397 		} else {
    398 			/* Link forward to next tx descriptor */
    399 			sc->desc[i].next =
    400 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    401 					+ (i + 1) * sizeof(struct mtd_desc));
    402 		}
    403 		/* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
    404 		/* Set buffer's address */
    405 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
    406 					+ MTD_NUM_RXD * MTD_RXBUF_SIZE
    407 					+ (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
    408 	}
    409 
    410 	return 0;
    411 }
    412 
    413 
    414 void
    415 mtd_mii_statchg(self)
    416 	struct device *self;
    417 {
    418 	/*struct mtd_softc *sc = (void *)self;*/
    419 
    420 	/* Should we do something here? :) */
    421 }
    422 
    423 
    424 int
    425 mtd_mii_readreg(self, phy, reg)
    426 	struct device *self;
    427 	int phy, reg;
    428 {
    429 	struct mtd_softc *sc = (void *)self;
    430 
    431 	return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
    432 }
    433 
    434 
    435 void
    436 mtd_mii_writereg(self, phy, reg, val)
    437 	struct device *self;
    438 	int phy, reg, val;
    439 {
    440 	struct mtd_softc *sc = (void *)self;
    441 
    442 	MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
    443 }
    444 
    445 
    446 int
    447 mtd_put(sc, index, m)
    448 	struct mtd_softc *sc;
    449 	int index;
    450 	struct mbuf *m;
    451 {
    452 	int len, tlen;
    453 	caddr_t buf = sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
    454 			+ index * MTD_TXBUF_SIZE;
    455 	struct mbuf *n;
    456 
    457 	for (tlen = 0; m != NULL; m = n) {
    458 		len = m->m_len;
    459 		if (len == 0) {
    460 			MFREE(m, n);
    461 			continue;
    462 		} else if (tlen > MTD_TXBUF_SIZE) {
    463 			/* XXX FIXME: No idea what to do here. */
    464 			printf("%s: packet too large!\n",
    465 				sc->dev.dv_xname);
    466 			MFREE(m, n);
    467 			continue;
    468 		}
    469 		memcpy(buf, mtod(m, caddr_t), len);
    470 		buf += len;
    471 		tlen += len;
    472 		MFREE(m, n);
    473 	}
    474 	sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
    475 		| MTD_TXD_CONF_IRQC
    476 		| ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
    477 		| (tlen & MTD_TXD_CONF_BUFS);
    478 
    479 	return tlen;
    480 }
    481 
    482 
    483 void
    484 mtd_start(ifp)
    485 	struct ifnet *ifp;
    486 {
    487 	struct mtd_softc *sc = ifp->if_softc;
    488 	struct mbuf *m;
    489 	int len;
    490 	int first_tx = sc->cur_tx;
    491 
    492 	/* Don't transmit when the interface is busy or inactive */
    493 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    494 		return;
    495 
    496 	for (;;) {
    497 		IF_DEQUEUE(&ifp->if_snd, m);
    498 
    499 		if (m == NULL)
    500 			break;
    501 
    502 #if NBPFILTER > 0
    503 		if (ifp->if_bpf)
    504 			bpf_mtap(ifp->if_bpf, m);
    505 #endif
    506 
    507 		/* Copy mbuf chain into tx buffer */
    508 		len = mtd_put(sc, sc->cur_tx, m);
    509 
    510 		if (sc->cur_tx != first_tx)
    511 			sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
    512 
    513 		if (++sc->cur_tx >= MTD_NUM_TXD)
    514 			sc->cur_tx = 0;
    515 	}
    516 	/* Mark first & last descriptor */
    517 	sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
    518 
    519 	if (sc->cur_tx == 0) {
    520 		sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
    521 	} else {
    522 		sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
    523 	}
    524 
    525 	/* Give first descriptor to chip to complete transaction */
    526 	sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
    527 
    528 	/* Transmit polling demand */
    529 	MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
    530 
    531 	/* XXX FIXME: Set up a watchdog timer */
    532 	/* ifp->if_timer = 5; */
    533 }
    534 
    535 
    536 void
    537 mtd_stop (ifp, disable)
    538 	struct ifnet *ifp;
    539 	int disable;
    540 {
    541 	struct mtd_softc *sc = ifp->if_softc;
    542 
    543 	/* Disable transmitter and receiver */
    544 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
    545 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
    546 
    547 	/* Disable interrupts */
    548 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
    549 
    550 	/* Must do more at disable??... */
    551 	if (disable) {
    552 		/* Delete tx and rx descriptor base adresses */
    553 		MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
    554 		MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
    555 	}
    556 
    557 	ifp->if_timer = 0;
    558 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    559 }
    560 
    561 
    562 void
    563 mtd_watchdog(ifp)
    564 	struct ifnet *ifp;
    565 {
    566 	struct mtd_softc *sc = ifp->if_softc;
    567 	int s;
    568 
    569 	log(LOG_ERR, "%s: device timeout\n", sc->dev.dv_xname);
    570 	++sc->ethercom.ec_if.if_oerrors;
    571 
    572 	mtd_stop(ifp, 0);
    573 
    574 	s = splnet();
    575 	mtd_init(ifp);
    576 	splx(s);
    577 
    578 	return;
    579 }
    580 
    581 
    582 int
    583 mtd_ioctl(ifp, cmd, data)
    584 	struct ifnet * ifp;
    585 	u_long cmd;
    586 	caddr_t data;
    587 {
    588 	struct mtd_softc *sc = ifp->if_softc;
    589 	struct ifreq *ifr = (struct ifreq *)data;
    590 	int s, error = 0;
    591 
    592 	s = splnet();
    593 
    594 	/* Don't do anything special */
    595 	switch(cmd) {
    596 		case SIOCADDMULTI:
    597 		case SIOCDELMULTI:
    598 			error = (cmd == SIOCADDMULTI) ?
    599 			    ether_addmulti(ifr, &sc->ethercom) :
    600 			    ether_delmulti(ifr, &sc->ethercom);
    601 
    602 			if (error == ENETRESET) {
    603 				/*
    604 				 * Multicast list has changed; set the hardware
    605 				 * filter accordingly.
    606 				 */
    607 				 mtd_setmulti(sc);
    608 				 error = 0;
    609 			}
    610 			break;
    611 
    612 		default:
    613 			error = ether_ioctl(ifp, cmd, data);
    614 			break;
    615 	}
    616 
    617 	splx(s);
    618 	return error;
    619 }
    620 
    621 
    622 struct mbuf *
    623 mtd_get(sc, index, totlen)
    624 	struct mtd_softc *sc;
    625 	int index;
    626 	int totlen;
    627 {
    628 	struct ifnet *ifp = &sc->ethercom.ec_if;
    629 	struct mbuf *m, *m0, *newm;
    630 	int len;
    631 	caddr_t buf = sc->buf + index * MTD_RXBUF_SIZE;
    632 
    633 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
    634 	if (m0 == NULL)
    635 		return NULL;
    636 
    637 	m0->m_pkthdr.rcvif = ifp;
    638 	m0->m_pkthdr.len = totlen;
    639 	m = m0;
    640 	len = MHLEN;
    641 
    642 	while (totlen > 0) {
    643 		if (totlen >= MINCLSIZE) {
    644 			MCLGET(m, M_DONTWAIT);
    645 			if (!(m->m_flags & M_EXT)) {
    646 				m_freem(m0);
    647 				return NULL;
    648 			}
    649 			len = MCLBYTES;
    650 		}
    651 
    652 		if (m == m0) {
    653 			caddr_t newdata = (caddr_t)
    654 				ALIGN(m->m_data + sizeof(struct ether_header)) -
    655 				sizeof(struct ether_header);
    656 			len -= newdata - m->m_data;
    657 			m->m_data = newdata;
    658 		}
    659 
    660 		m->m_len = len = min(totlen, len);
    661 		memcpy(mtod(m, caddr_t), buf, len);
    662 		buf += len;
    663 
    664 		totlen -= len;
    665 		if (totlen > 0) {
    666 			MGET(newm, M_DONTWAIT, MT_DATA);
    667 			if (newm == NULL) {
    668 				m_freem(m0);
    669 				return NULL;
    670 			}
    671 			len = MLEN;
    672 			m = m->m_next = newm;
    673 		}
    674 	}
    675 
    676 	return m0;
    677 }
    678 
    679 
    680 int
    681 mtd_rxirq(sc)
    682 	struct mtd_softc *sc;
    683 {
    684 	struct ifnet *ifp = &sc->ethercom.ec_if;
    685 	int len;
    686 	struct mbuf *m;
    687 
    688 	for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
    689 		/* Error summary set? */
    690 		if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
    691 			printf("%s: received packet with errors\n",
    692 				sc->dev.dv_xname);
    693 			/* Give up packet, since an error occurred */
    694 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    695 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
    696 							MTD_RXD_CONF_BUFS;
    697 			++ifp->if_ierrors;
    698 			if (++sc->cur_rx >= MTD_NUM_RXD)
    699 				sc->cur_rx = 0;
    700 			continue;
    701 		}
    702 		/* Get buffer length */
    703 		len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
    704 			>> MTD_RXD_FLEN_SHIFT;
    705 		len -= ETHER_CRC_LEN;
    706 
    707 		/* Check packet size */
    708 		if (len <= sizeof(struct ether_header)) {
    709 			printf("%s: invalid packet size %d; dropping\n",
    710 				sc->dev.dv_xname, len);
    711 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    712 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
    713 							MTD_RXD_CONF_BUFS;
    714 			++ifp->if_ierrors;
    715 			if (++sc->cur_rx >= MTD_NUM_RXD)
    716 				sc->cur_rx = 0;
    717 			continue;
    718 		}
    719 
    720 		m = mtd_get(sc, (sc->cur_rx), len);
    721 
    722 		/* Give descriptor back to card */
    723 		sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
    724 		sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    725 
    726 		if (++sc->cur_rx >= MTD_NUM_RXD)
    727 			sc->cur_rx = 0;
    728 
    729 		if (m == NULL) {
    730 			printf("%s: error pulling packet off interface\n",
    731 				sc->dev.dv_xname);
    732 			++ifp->if_ierrors;
    733 			continue;
    734 		}
    735 
    736 		++ifp->if_ipackets;
    737 
    738 #if NBPFILTER > 0
    739 		if (ifp->if_bpf)
    740 			bpf_mtap(ifp->if_bpf, m);
    741 #endif
    742 		/* Pass the packet up */
    743 		(*ifp->if_input)(ifp, m);
    744 	}
    745 
    746 	return 1;
    747 }
    748 
    749 
    750 int
    751 mtd_txirq(sc)
    752 	struct mtd_softc *sc;
    753 {
    754 	struct ifnet *ifp = &sc->ethercom.ec_if;
    755 
    756 	/* Clear timeout */
    757 	ifp->if_timer = 0;
    758 
    759 	ifp->if_flags &= ~IFF_OACTIVE;
    760 	++ifp->if_opackets;
    761 
    762 	/* XXX FIXME If there is some queued, do an mtd_start? */
    763 
    764 	return 1;
    765 }
    766 
    767 
    768 int
    769 mtd_bufirq(sc)
    770 	struct mtd_softc *sc;
    771 {
    772 	struct ifnet *ifp = &sc->ethercom.ec_if;
    773 
    774 	/* Clear timeout */
    775 	ifp->if_timer = 0;
    776 
    777 	/* XXX FIXME: Do something here to make sure we get some buffers! */
    778 
    779 	return 1;
    780 }
    781 
    782 
    783 int
    784 mtd_irq_h(args)
    785 	void *args;
    786 {
    787 	struct mtd_softc *sc = args;
    788 	struct ifnet *ifp = &sc->ethercom.ec_if;
    789 	u_int32_t status;
    790 	int r = 0;
    791 
    792 	if (!(ifp->if_flags & IFF_RUNNING) ||
    793 		!(sc->dev.dv_flags & DVF_ACTIVE))
    794 		return 0;
    795 
    796 	/* Disable interrupts */
    797 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
    798 
    799 	for(;;) {
    800 		status = MTD_READ_4(sc, MTD_ISR);
    801 #if NRND > 0
    802 		/* Add random seed before masking out bits */
    803 		if (status)
    804 			rnd_add_uint32(&sc->rnd_src, status);
    805 #endif
    806 		status &= MTD_ISR_MASK;
    807 		if (!status)		/* We didn't ask for this */
    808 			break;
    809 
    810 		MTD_WRITE_4(sc, MTD_ISR, status);
    811 
    812 		/* NOTE: Perhaps we should reset with some of these errors? */
    813 
    814 		if (status & MTD_ISR_RXBUN) {
    815 			printf("%s: receive buffer unavailable\n",
    816 				sc->dev.dv_xname);
    817 			++ifp->if_ierrors;
    818 		}
    819 
    820 		if (status & MTD_ISR_RXERR) {
    821 			printf("%s: receive error\n", sc->dev.dv_xname);
    822 			++ifp->if_ierrors;
    823 		}
    824 
    825 		if (status & MTD_ISR_TXBUN) {
    826 			printf("%s: transmit buffer unavailable\n",
    827 				sc->dev.dv_xname);
    828 			++ifp->if_ierrors;
    829 		}
    830 
    831 		if ((status & MTD_ISR_PDF)) {
    832 			printf("%s: parallel detection fault\n",
    833 				sc->dev.dv_xname);
    834 			++ifp->if_ierrors;
    835 		}
    836 
    837 		if (status & MTD_ISR_FBUSERR) {
    838 			printf("%s: fatal bus error\n",
    839 				sc->dev.dv_xname);
    840 			++ifp->if_ierrors;
    841 		}
    842 
    843 		if (status & MTD_ISR_TARERR) {
    844 			printf("%s: target error\n",
    845 				sc->dev.dv_xname);
    846 			++ifp->if_ierrors;
    847 		}
    848 
    849 		if (status & MTD_ISR_MASTERR) {
    850 			printf("%s: master error\n",
    851 				sc->dev.dv_xname);
    852 			++ifp->if_ierrors;
    853 		}
    854 
    855 		if (status & MTD_ISR_PARERR) {
    856 			printf("%s: parity error\n",
    857 				sc->dev.dv_xname);
    858 			++ifp->if_ierrors;
    859 		}
    860 
    861 		if (status & MTD_ISR_RXIRQ)	/* Receive interrupt */
    862 			r |= mtd_rxirq(sc);
    863 
    864 		if (status & MTD_ISR_TXIRQ)	/* Transmit interrupt */
    865 			r |= mtd_txirq(sc);
    866 
    867 		if (status & MTD_ISR_TXEARLY)	/* Transmit early */
    868 			r |= mtd_txirq(sc);
    869 
    870 		if (status & MTD_ISR_TXBUN)	/* Transmit buffer n/a */
    871 			r |= mtd_bufirq(sc);
    872 
    873 	}
    874 
    875 	/* Enable interrupts */
    876 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
    877 
    878 	return r;
    879 }
    880 
    881 
    882 void
    883 mtd_setmulti(sc)
    884 	struct mtd_softc *sc;
    885 {
    886 	struct ifnet *ifp = &sc->ethercom.ec_if;
    887 	u_int32_t rxtx_stat;
    888 	u_int32_t hash[2] = {0, 0};
    889 	u_int32_t crc;
    890 	struct ether_multi *enm;
    891 	struct ether_multistep step;
    892 	int mcnt = 0;
    893 
    894 	/* Get old status */
    895 	rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
    896 
    897 	if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
    898 		rxtx_stat |= MTD_RX_AMULTI;
    899 		MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
    900 		MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
    901 		MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
    902 		return;
    903 	}
    904 
    905 	ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
    906 	while (enm != NULL) {
    907 		/* We need the 6 most significant bits of the CRC */
    908 		crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
    909 
    910 		hash[crc >> 5] |= 1 << (crc & 0xf);
    911 
    912 		++mcnt;
    913 		ETHER_NEXT_MULTI(step, enm);
    914 	}
    915 
    916 	/* Accept multicast bit needs to be on? */
    917 	if (mcnt)
    918 		rxtx_stat |= MTD_RX_AMULTI;
    919 	else
    920 		rxtx_stat &= ~MTD_RX_AMULTI;
    921 
    922 	/* Write out the hash */
    923 	MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
    924 	MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
    925 	MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
    926 }
    927 
    928 
    929 void
    930 mtd_reset(sc)
    931 	struct mtd_softc *sc;
    932 {
    933 	int i;
    934 
    935 	MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
    936 
    937 	/* Reset descriptor status */
    938 	sc->cur_tx = 0;
    939 	sc->cur_rx = 0;
    940 
    941 	/* Wait until done with reset */
    942 	for (i = 0; i < MTD_TIMEOUT; ++i) {
    943 		DELAY(10);
    944 		if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
    945 			break;
    946 	}
    947 
    948 	if (i == MTD_TIMEOUT) {
    949 		printf("%s: reset timed out\n", sc->dev.dv_xname);
    950 	}
    951 
    952 	/* Wait a little so chip can stabilize */
    953 	DELAY(1000);
    954 }
    955 
    956 
    957 int
    958 mtd_mediachange(ifp)
    959 	struct ifnet *ifp;
    960 {
    961 	struct mtd_softc *sc = ifp->if_softc;
    962 
    963 	if (IFM_TYPE(sc->mii.mii_media.ifm_media) != IFM_ETHER)
    964 		return EINVAL;
    965 
    966 	return mii_mediachg(&sc->mii);
    967 }
    968 
    969 
    970 void
    971 mtd_mediastatus(ifp, ifmr)
    972 	struct ifnet *ifp;
    973 	struct ifmediareq *ifmr;
    974 {
    975 	struct mtd_softc *sc = ifp->if_softc;
    976 
    977 	if ((ifp->if_flags & IFF_UP) == 0)
    978 		return;
    979 
    980 	mii_pollstat(&sc->mii);
    981 	ifmr->ifm_active = sc->mii.mii_media_active;
    982 	ifmr->ifm_status = sc->mii.mii_media_status;
    983 }
    984 
    985 
    986 void
    987 mtd_shutdown (arg)
    988 	void *arg;
    989 {
    990 	struct mtd_softc *sc = arg;
    991 	struct ifnet *ifp = &sc->ethercom.ec_if;
    992 
    993 #if NRND > 0
    994 	rnd_detach_source(&sc->rnd_src);
    995 #endif
    996 	mtd_stop(ifp, 1);
    997 }
    998