Home | History | Annotate | Line # | Download | only in ic
mtd803.c revision 1.5
      1 /* $NetBSD: mtd803.c,v 1.5 2003/11/02 11:07:45 wiz Exp $ */
      2 
      3 /*-
      4  *
      5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Peter Bex <Peter.Bex (at) student.kun.nl>.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *      This product includes software developed by the NetBSD
     22  *      Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * TODO:
     42  * - Most importantly, get some bus_dmamap_syncs in the correct places.
     43  *    I don't have access to a computer with PCI other than i386, and i386
     44  *    is just such a machine where dmamap_syncs don't do anything.
     45  * - Powerhook for when resuming after standby.
     46  * - Watchdog stuff doesn't work yet, the system crashes.(lockmgr: no context)
     47  * - There seems to be a CardBus version of the card. (see datasheet)
     48  *    Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
     49  * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
     50  *    raised every time a packet is sent. Strange, since everything works anyway
     51  */
     52 
     53 #include <sys/cdefs.h>
     54 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.5 2003/11/02 11:07:45 wiz Exp $");
     55 
     56 #include "bpfilter.h"
     57 
     58 #include <sys/param.h>
     59 #include <sys/mbuf.h>
     60 #include <sys/systm.h>
     61 #include <sys/device.h>
     62 #include <sys/socket.h>
     63 #include <sys/ioctl.h>
     64 #include <sys/syslog.h>
     65 
     66 #include <net/if.h>
     67 #include <net/if_ether.h>
     68 #include <net/if_media.h>
     69 
     70 #ifdef INET
     71 #include <netinet/in.h>
     72 #include <netinet/if_inarp.h>
     73 #include <netinet/in_systm.h>
     74 #include <netinet/in_var.h>
     75 #include <netinet/ip.h>
     76 #endif
     77 
     78 #if NBPFILTER > 0
     79 #include <net/bpf.h>
     80 #include <net/bpfdesc.h>
     81 #endif
     82 
     83 #include <machine/bus.h>
     84 
     85 #include <dev/ic/mtd803reg.h>
     86 #include <dev/ic/mtd803var.h>
     87 #include <dev/mii/mii.h>
     88 #include <dev/mii/miivar.h>
     89 
     90 /*
     91  * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
     92  * Written by Peter Bex (peter.bex (at) student.kun.nl)
     93  *
     94  * Datasheet at:   http://www.myson.com.tw   or   http://www.century-semi.com
     95  */
     96 
     97 #define MTD_READ_1(sc, reg) \
     98 	bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
     99 #define MTD_WRITE_1(sc, reg, data) \
    100 	bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
    101 
    102 #define MTD_READ_2(sc, reg) \
    103 	bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
    104 #define MTD_WRITE_2(sc, reg, data) \
    105 	bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
    106 
    107 #define MTD_READ_4(sc, reg) \
    108 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
    109 #define MTD_WRITE_4(sc, reg, data) \
    110 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
    111 
    112 #define MTD_SETBIT(sc, reg, x) \
    113 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
    114 #define MTD_CLRBIT(sc, reg, x) \
    115 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
    116 
    117 #define ETHER_CRC32(buf, len)	(ether_crc32_be((buf), (len)))
    118 
    119 int mtd_mii_readreg __P((struct device *, int, int));
    120 void mtd_mii_writereg __P((struct device *, int, int, int));
    121 void mtd_mii_statchg __P((struct device *));
    122 
    123 void mtd_start __P((struct ifnet *));
    124 void mtd_stop __P((struct ifnet *, int));
    125 int mtd_ioctl __P((struct ifnet *, u_long, caddr_t));
    126 void mtd_setmulti __P((struct mtd_softc *));
    127 void mtd_watchdog __P((struct ifnet *));
    128 int mtd_mediachange __P((struct ifnet *));
    129 void mtd_mediastatus __P((struct ifnet *, struct ifmediareq *));
    130 
    131 int mtd_init __P((struct ifnet *));
    132 void mtd_reset __P((struct mtd_softc *));
    133 void mtd_shutdown __P((void *));
    134 int mtd_init_desc __P((struct mtd_softc *));
    135 int mtd_put __P((struct mtd_softc *, int, struct mbuf *));
    136 struct mbuf *mtd_get __P((struct mtd_softc *, int, int));
    137 
    138 int mtd_rxirq __P((struct mtd_softc *));
    139 int mtd_txirq __P((struct mtd_softc *));
    140 int mtd_bufirq __P((struct mtd_softc *));
    141 
    142 
    143 int
    144 mtd_config(sc)
    145 	struct mtd_softc *sc;
    146 {
    147 	struct ifnet *ifp = &sc->ethercom.ec_if;
    148 	int i;
    149 
    150 	/* Read station address */
    151 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
    152 		sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
    153 
    154 	/* Initialize ifnet structure */
    155 	memcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
    156 	ifp->if_softc = sc;
    157 	ifp->if_init = mtd_init;
    158 	ifp->if_start = mtd_start;
    159 	ifp->if_stop = mtd_stop;
    160 	ifp->if_ioctl = mtd_ioctl;
    161 	ifp->if_watchdog = mtd_watchdog;
    162 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    163 	IFQ_SET_READY(&ifp->if_snd);
    164 
    165 	/* Setup MII interface */
    166 	sc->mii.mii_ifp = ifp;
    167 	sc->mii.mii_readreg = mtd_mii_readreg;
    168 	sc->mii.mii_writereg = mtd_mii_writereg;
    169 	sc->mii.mii_statchg = mtd_mii_statchg;
    170 
    171 	ifmedia_init(&sc->mii.mii_media, 0, mtd_mediachange, mtd_mediastatus);
    172 
    173 	mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
    174 
    175 	if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
    176 		printf("%s: Unable to configure MII\n", sc->dev.dv_xname);
    177 		return 1;
    178 	} else {
    179 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
    180 	}
    181 
    182 	if (mtd_init_desc(sc))
    183 		return 1;
    184 
    185 	/* Attach interface */
    186 	if_attach(ifp);
    187 	ether_ifattach(ifp, sc->eaddr);
    188 
    189 #if NRND > 0
    190 	/* Initialise random source */
    191 	rnd_attach_source(&sc->rnd_src, sc->dev.dv_xname, RND_TYPE_NET, 0);
    192 #endif
    193 
    194 	/* Add shutdown hook to reset card when we reboot */
    195 	sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
    196 
    197 	return 0;
    198 }
    199 
    200 
    201 /*
    202  * mtd_init
    203  * Must be called at splnet()
    204  */
    205 int
    206 mtd_init(ifp)
    207 	struct ifnet *ifp;
    208 {
    209 	struct mtd_softc *sc = ifp->if_softc;
    210 
    211 	mtd_reset(sc);
    212 
    213 	/*
    214 	 * Set cache alignment and burst length. Don't really know what these
    215 	 * mean, so their values are probably suboptimal.
    216 	 */
    217 	MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
    218 
    219 	MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX);
    220 
    221 	/* Promiscuous mode? */
    222 	if (ifp->if_flags & IFF_PROMISC)
    223 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
    224 	else
    225 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
    226 
    227 	/* Broadcast mode? */
    228 	if (ifp->if_flags & IFF_BROADCAST)
    229 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
    230 	else
    231 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
    232 
    233 	mtd_setmulti(sc);
    234 
    235 	/* Enable interrupts */
    236 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
    237 	MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
    238 
    239 	/* Set descriptor base addresses */
    240 	MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    241 				+ sizeof(struct mtd_desc) * MTD_NUM_RXD));
    242 	MTD_WRITE_4(sc, MTD_RXLBA,
    243 		htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
    244 
    245 	/* Enable receiver and transmitter */
    246 	MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
    247 	MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
    248 
    249 	/* Interface is running */
    250 	ifp->if_flags |= IFF_RUNNING;
    251 	ifp->if_flags &= ~IFF_OACTIVE;
    252 
    253 	return 0;
    254 }
    255 
    256 
    257 int
    258 mtd_init_desc(sc)
    259 	struct mtd_softc *sc;
    260 {
    261 	int rseg, err, i;
    262 	bus_dma_segment_t seg;
    263 	bus_size_t size;
    264 
    265 	/* Allocate memory for descriptors */
    266 	size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
    267 
    268 	/* Allocate DMA-safe memory */
    269 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
    270 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    271 		printf("%s: unable to allocate DMA buffer, error = %d\n",
    272 			sc->dev.dv_xname, err);
    273 		return 1;
    274 	}
    275 
    276 	/* Map memory to kernel addressable space */
    277 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
    278 		(caddr_t *)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    279 		printf("%s: unable to map DMA buffer, error = %d\n",
    280 			sc->dev.dv_xname, err);
    281 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    282 		return 1;
    283 	}
    284 
    285 	/* Create a DMA map */
    286 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
    287 		size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
    288 		printf("%s: unable to create DMA map, error = %d\n",
    289 			sc->dev.dv_xname, err);
    290 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    291 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    292 		return 1;
    293 	}
    294 
    295 	/* Load the DMA map */
    296 	if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
    297 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
    298 		printf("%s: unable to load DMA map, error = %d\n",
    299 			sc->dev.dv_xname, err);
    300 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    301 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    302 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    303 		return 1;
    304 	}
    305 
    306 	/* Allocate memory for the buffers */
    307 	size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
    308 
    309 	/* Allocate DMA-safe memory */
    310 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
    311 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    312 		printf("%s: unable to allocate DMA buffer, error = %d\n",
    313 			sc->dev.dv_xname, err);
    314 
    315 		/* Undo DMA map for descriptors */
    316 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    317 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    318 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    319 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    320 		return 1;
    321 	}
    322 
    323 	/* Map memory to kernel addressable space */
    324 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
    325 		&sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    326 		printf("%s: unable to map DMA buffer, error = %d\n",
    327 			sc->dev.dv_xname, err);
    328 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    329 
    330 		/* Undo DMA map for descriptors */
    331 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    332 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    333 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    334 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    335 		return 1;
    336 	}
    337 
    338 	/* Create a DMA map */
    339 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
    340 		size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
    341 		printf("%s: unable to create DMA map, error = %d\n",
    342 			sc->dev.dv_xname, err);
    343 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
    344 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    345 
    346 		/* Undo DMA map for descriptors */
    347 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    348 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    349 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    350 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    351 		return 1;
    352 	}
    353 
    354 	/* Load the DMA map */
    355 	if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
    356 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
    357 		printf("%s: unable to load DMA map, error = %d\n",
    358 			sc->dev.dv_xname, err);
    359 		bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
    360 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
    361 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    362 
    363 		/* Undo DMA map for descriptors */
    364 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    365 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    366 		bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
    367 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    368 		return 1;
    369 	}
    370 
    371 	/* Descriptors are stored as a circular linked list */
    372 	/* Fill in rx descriptors */
    373 	for (i = 0; i < MTD_NUM_RXD; ++i) {
    374 		sc->desc[i].stat = MTD_RXD_OWNER;
    375 		if (i == MTD_NUM_RXD - 1) {	/* Last descriptor */
    376 			/* Link back to first rx descriptor */
    377 			sc->desc[i].next =
    378 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
    379 		} else {
    380 			/* Link forward to next rx descriptor */
    381 			sc->desc[i].next =
    382 			htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    383 					+ (i + 1) * sizeof(struct mtd_desc));
    384 		}
    385 		sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
    386 		/* Set buffer's address */
    387 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
    388 					+ i * MTD_RXBUF_SIZE);
    389 	}
    390 
    391 	/* Fill in tx descriptors */
    392 	for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
    393 		sc->desc[i].stat = 0;	/* At least, NOT MTD_TXD_OWNER! */
    394 		if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) {	/* Last descr */
    395 			/* Link back to first tx descriptor */
    396 			sc->desc[i].next =
    397 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    398 					+MTD_NUM_RXD * sizeof(struct mtd_desc));
    399 		} else {
    400 			/* Link forward to next tx descriptor */
    401 			sc->desc[i].next =
    402 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    403 					+ (i + 1) * sizeof(struct mtd_desc));
    404 		}
    405 		/* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
    406 		/* Set buffer's address */
    407 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
    408 					+ MTD_NUM_RXD * MTD_RXBUF_SIZE
    409 					+ (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
    410 	}
    411 
    412 	return 0;
    413 }
    414 
    415 
    416 void
    417 mtd_mii_statchg(self)
    418 	struct device *self;
    419 {
    420 	/*struct mtd_softc *sc = (void *)self;*/
    421 
    422 	/* Should we do something here? :) */
    423 }
    424 
    425 
    426 int
    427 mtd_mii_readreg(self, phy, reg)
    428 	struct device *self;
    429 	int phy, reg;
    430 {
    431 	struct mtd_softc *sc = (void *)self;
    432 
    433 	return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
    434 }
    435 
    436 
    437 void
    438 mtd_mii_writereg(self, phy, reg, val)
    439 	struct device *self;
    440 	int phy, reg, val;
    441 {
    442 	struct mtd_softc *sc = (void *)self;
    443 
    444 	MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
    445 }
    446 
    447 
    448 int
    449 mtd_put(sc, index, m)
    450 	struct mtd_softc *sc;
    451 	int index;
    452 	struct mbuf *m;
    453 {
    454 	int len, tlen;
    455 	caddr_t buf = sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
    456 			+ index * MTD_TXBUF_SIZE;
    457 	struct mbuf *n;
    458 
    459 	for (tlen = 0; m != NULL; m = n) {
    460 		len = m->m_len;
    461 		if (len == 0) {
    462 			MFREE(m, n);
    463 			continue;
    464 		} else if (tlen > MTD_TXBUF_SIZE) {
    465 			/* XXX FIXME: No idea what to do here. */
    466 			printf("%s: packet too large! Size = %i\n",
    467 				sc->dev.dv_xname, tlen);
    468 			MFREE(m, n);
    469 			continue;
    470 		}
    471 		memcpy(buf, mtod(m, caddr_t), len);
    472 		buf += len;
    473 		tlen += len;
    474 		MFREE(m, n);
    475 	}
    476 	sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
    477 		| MTD_TXD_CONF_IRQC
    478 		| ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
    479 		| (tlen & MTD_TXD_CONF_BUFS);
    480 
    481 	return tlen;
    482 }
    483 
    484 
    485 void
    486 mtd_start(ifp)
    487 	struct ifnet *ifp;
    488 {
    489 	struct mtd_softc *sc = ifp->if_softc;
    490 	struct mbuf *m;
    491 	int len;
    492 	int first_tx = sc->cur_tx;
    493 
    494 	/* Don't transmit when the interface is busy or inactive */
    495 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    496 		return;
    497 
    498 	for (;;) {
    499 		IF_DEQUEUE(&ifp->if_snd, m);
    500 
    501 		if (m == NULL)
    502 			break;
    503 
    504 #if NBPFILTER > 0
    505 		if (ifp->if_bpf)
    506 			bpf_mtap(ifp->if_bpf, m);
    507 #endif
    508 
    509 		/* Copy mbuf chain into tx buffer */
    510 		len = mtd_put(sc, sc->cur_tx, m);
    511 
    512 		if (sc->cur_tx != first_tx)
    513 			sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
    514 
    515 		if (++sc->cur_tx >= MTD_NUM_TXD)
    516 			sc->cur_tx = 0;
    517 	}
    518 	/* Mark first & last descriptor */
    519 	sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
    520 
    521 	if (sc->cur_tx == 0) {
    522 		sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
    523 	} else {
    524 		sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
    525 	}
    526 
    527 	/* Give first descriptor to chip to complete transaction */
    528 	sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
    529 
    530 	/* Transmit polling demand */
    531 	MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
    532 
    533 	/* XXX FIXME: Set up a watchdog timer */
    534 	/* ifp->if_timer = 5; */
    535 }
    536 
    537 
    538 void
    539 mtd_stop (ifp, disable)
    540 	struct ifnet *ifp;
    541 	int disable;
    542 {
    543 	struct mtd_softc *sc = ifp->if_softc;
    544 
    545 	/* Disable transmitter and receiver */
    546 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
    547 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
    548 
    549 	/* Disable interrupts */
    550 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
    551 
    552 	/* Must do more at disable??... */
    553 	if (disable) {
    554 		/* Delete tx and rx descriptor base addresses */
    555 		MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
    556 		MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
    557 	}
    558 
    559 	ifp->if_timer = 0;
    560 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    561 }
    562 
    563 
    564 void
    565 mtd_watchdog(ifp)
    566 	struct ifnet *ifp;
    567 {
    568 	struct mtd_softc *sc = ifp->if_softc;
    569 	int s;
    570 
    571 	log(LOG_ERR, "%s: device timeout\n", sc->dev.dv_xname);
    572 	++sc->ethercom.ec_if.if_oerrors;
    573 
    574 	mtd_stop(ifp, 0);
    575 
    576 	s = splnet();
    577 	mtd_init(ifp);
    578 	splx(s);
    579 
    580 	return;
    581 }
    582 
    583 
    584 int
    585 mtd_ioctl(ifp, cmd, data)
    586 	struct ifnet * ifp;
    587 	u_long cmd;
    588 	caddr_t data;
    589 {
    590 	struct mtd_softc *sc = ifp->if_softc;
    591 	struct ifreq *ifr = (struct ifreq *)data;
    592 	int s, error = 0;
    593 
    594 	s = splnet();
    595 
    596 	/* Don't do anything special */
    597 	switch(cmd) {
    598 		case SIOCADDMULTI:
    599 		case SIOCDELMULTI:
    600 			error = (cmd == SIOCADDMULTI) ?
    601 			    ether_addmulti(ifr, &sc->ethercom) :
    602 			    ether_delmulti(ifr, &sc->ethercom);
    603 
    604 			if (error == ENETRESET) {
    605 				/*
    606 				 * Multicast list has changed; set the hardware
    607 				 * filter accordingly.
    608 				 */
    609 				 mtd_setmulti(sc);
    610 				 error = 0;
    611 			}
    612 			break;
    613 
    614 		default:
    615 			error = ether_ioctl(ifp, cmd, data);
    616 			break;
    617 	}
    618 
    619 	splx(s);
    620 	return error;
    621 }
    622 
    623 
    624 struct mbuf *
    625 mtd_get(sc, index, totlen)
    626 	struct mtd_softc *sc;
    627 	int index;
    628 	int totlen;
    629 {
    630 	struct ifnet *ifp = &sc->ethercom.ec_if;
    631 	struct mbuf *m, *m0, *newm;
    632 	int len;
    633 	caddr_t buf = sc->buf + index * MTD_RXBUF_SIZE;
    634 
    635 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
    636 	if (m0 == NULL)
    637 		return NULL;
    638 
    639 	m0->m_pkthdr.rcvif = ifp;
    640 	m0->m_pkthdr.len = totlen;
    641 	m = m0;
    642 	len = MHLEN;
    643 
    644 	while (totlen > 0) {
    645 		if (totlen >= MINCLSIZE) {
    646 			MCLGET(m, M_DONTWAIT);
    647 			if (!(m->m_flags & M_EXT)) {
    648 				m_freem(m0);
    649 				return NULL;
    650 			}
    651 			len = MCLBYTES;
    652 		}
    653 
    654 		if (m == m0) {
    655 			caddr_t newdata = (caddr_t)
    656 				ALIGN(m->m_data + sizeof(struct ether_header)) -
    657 				sizeof(struct ether_header);
    658 			len -= newdata - m->m_data;
    659 			m->m_data = newdata;
    660 		}
    661 
    662 		m->m_len = len = min(totlen, len);
    663 		memcpy(mtod(m, caddr_t), buf, len);
    664 		buf += len;
    665 
    666 		totlen -= len;
    667 		if (totlen > 0) {
    668 			MGET(newm, M_DONTWAIT, MT_DATA);
    669 			if (newm == NULL) {
    670 				m_freem(m0);
    671 				return NULL;
    672 			}
    673 			len = MLEN;
    674 			m = m->m_next = newm;
    675 		}
    676 	}
    677 
    678 	return m0;
    679 }
    680 
    681 
    682 int
    683 mtd_rxirq(sc)
    684 	struct mtd_softc *sc;
    685 {
    686 	struct ifnet *ifp = &sc->ethercom.ec_if;
    687 	int len;
    688 	struct mbuf *m;
    689 
    690 	for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
    691 		/* Error summary set? */
    692 		if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
    693 			printf("%s: received packet with errors\n",
    694 				sc->dev.dv_xname);
    695 			/* Give up packet, since an error occurred */
    696 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    697 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
    698 							MTD_RXD_CONF_BUFS;
    699 			++ifp->if_ierrors;
    700 			if (++sc->cur_rx >= MTD_NUM_RXD)
    701 				sc->cur_rx = 0;
    702 			continue;
    703 		}
    704 		/* Get buffer length */
    705 		len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
    706 			>> MTD_RXD_FLEN_SHIFT;
    707 		len -= ETHER_CRC_LEN;
    708 
    709 		/* Check packet size */
    710 		if (len <= sizeof(struct ether_header)) {
    711 			printf("%s: invalid packet size %d; dropping\n",
    712 				sc->dev.dv_xname, len);
    713 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    714 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
    715 							MTD_RXD_CONF_BUFS;
    716 			++ifp->if_ierrors;
    717 			if (++sc->cur_rx >= MTD_NUM_RXD)
    718 				sc->cur_rx = 0;
    719 			continue;
    720 		}
    721 
    722 		m = mtd_get(sc, (sc->cur_rx), len);
    723 
    724 		/* Give descriptor back to card */
    725 		sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
    726 		sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    727 
    728 		if (++sc->cur_rx >= MTD_NUM_RXD)
    729 			sc->cur_rx = 0;
    730 
    731 		if (m == NULL) {
    732 			printf("%s: error pulling packet off interface\n",
    733 				sc->dev.dv_xname);
    734 			++ifp->if_ierrors;
    735 			continue;
    736 		}
    737 
    738 		++ifp->if_ipackets;
    739 
    740 #if NBPFILTER > 0
    741 		if (ifp->if_bpf)
    742 			bpf_mtap(ifp->if_bpf, m);
    743 #endif
    744 		/* Pass the packet up */
    745 		(*ifp->if_input)(ifp, m);
    746 	}
    747 
    748 	return 1;
    749 }
    750 
    751 
    752 int
    753 mtd_txirq(sc)
    754 	struct mtd_softc *sc;
    755 {
    756 	struct ifnet *ifp = &sc->ethercom.ec_if;
    757 
    758 	/* Clear timeout */
    759 	ifp->if_timer = 0;
    760 
    761 	ifp->if_flags &= ~IFF_OACTIVE;
    762 	++ifp->if_opackets;
    763 
    764 	/* XXX FIXME If there is some queued, do an mtd_start? */
    765 
    766 	return 1;
    767 }
    768 
    769 
    770 int
    771 mtd_bufirq(sc)
    772 	struct mtd_softc *sc;
    773 {
    774 	struct ifnet *ifp = &sc->ethercom.ec_if;
    775 
    776 	/* Clear timeout */
    777 	ifp->if_timer = 0;
    778 
    779 	/* XXX FIXME: Do something here to make sure we get some buffers! */
    780 
    781 	return 1;
    782 }
    783 
    784 
    785 int
    786 mtd_irq_h(args)
    787 	void *args;
    788 {
    789 	struct mtd_softc *sc = args;
    790 	struct ifnet *ifp = &sc->ethercom.ec_if;
    791 	u_int32_t status;
    792 	int r = 0;
    793 
    794 	if (!(ifp->if_flags & IFF_RUNNING) ||
    795 		!(sc->dev.dv_flags & DVF_ACTIVE))
    796 		return 0;
    797 
    798 	/* Disable interrupts */
    799 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
    800 
    801 	for(;;) {
    802 		status = MTD_READ_4(sc, MTD_ISR);
    803 #if NRND > 0
    804 		/* Add random seed before masking out bits */
    805 		if (status)
    806 			rnd_add_uint32(&sc->rnd_src, status);
    807 #endif
    808 		status &= MTD_ISR_MASK;
    809 		if (!status)		/* We didn't ask for this */
    810 			break;
    811 
    812 		MTD_WRITE_4(sc, MTD_ISR, status);
    813 
    814 		/* NOTE: Perhaps we should reset with some of these errors? */
    815 
    816 		if (status & MTD_ISR_RXBUN) {
    817 			printf("%s: receive buffer unavailable\n",
    818 				sc->dev.dv_xname);
    819 			++ifp->if_ierrors;
    820 		}
    821 
    822 		if (status & MTD_ISR_RXERR) {
    823 			printf("%s: receive error\n", sc->dev.dv_xname);
    824 			++ifp->if_ierrors;
    825 		}
    826 
    827 		if (status & MTD_ISR_TXBUN) {
    828 			printf("%s: transmit buffer unavailable\n",
    829 				sc->dev.dv_xname);
    830 			++ifp->if_ierrors;
    831 		}
    832 
    833 		if ((status & MTD_ISR_PDF)) {
    834 			printf("%s: parallel detection fault\n",
    835 				sc->dev.dv_xname);
    836 			++ifp->if_ierrors;
    837 		}
    838 
    839 		if (status & MTD_ISR_FBUSERR) {
    840 			printf("%s: fatal bus error\n",
    841 				sc->dev.dv_xname);
    842 			++ifp->if_ierrors;
    843 		}
    844 
    845 		if (status & MTD_ISR_TARERR) {
    846 			printf("%s: target error\n",
    847 				sc->dev.dv_xname);
    848 			++ifp->if_ierrors;
    849 		}
    850 
    851 		if (status & MTD_ISR_MASTERR) {
    852 			printf("%s: master error\n",
    853 				sc->dev.dv_xname);
    854 			++ifp->if_ierrors;
    855 		}
    856 
    857 		if (status & MTD_ISR_PARERR) {
    858 			printf("%s: parity error\n",
    859 				sc->dev.dv_xname);
    860 			++ifp->if_ierrors;
    861 		}
    862 
    863 		if (status & MTD_ISR_RXIRQ)	/* Receive interrupt */
    864 			r |= mtd_rxirq(sc);
    865 
    866 		if (status & MTD_ISR_TXIRQ)	/* Transmit interrupt */
    867 			r |= mtd_txirq(sc);
    868 
    869 		if (status & MTD_ISR_TXEARLY)	/* Transmit early */
    870 			r |= mtd_txirq(sc);
    871 
    872 		if (status & MTD_ISR_TXBUN)	/* Transmit buffer n/a */
    873 			r |= mtd_bufirq(sc);
    874 
    875 	}
    876 
    877 	/* Enable interrupts */
    878 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
    879 
    880 	return r;
    881 }
    882 
    883 
    884 void
    885 mtd_setmulti(sc)
    886 	struct mtd_softc *sc;
    887 {
    888 	struct ifnet *ifp = &sc->ethercom.ec_if;
    889 	u_int32_t rxtx_stat;
    890 	u_int32_t hash[2] = {0, 0};
    891 	u_int32_t crc;
    892 	struct ether_multi *enm;
    893 	struct ether_multistep step;
    894 	int mcnt = 0;
    895 
    896 	/* Get old status */
    897 	rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
    898 
    899 	if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
    900 		rxtx_stat |= MTD_RX_AMULTI;
    901 		MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
    902 		MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
    903 		MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
    904 		return;
    905 	}
    906 
    907 	ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
    908 	while (enm != NULL) {
    909 		/* We need the 6 most significant bits of the CRC */
    910 		crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
    911 
    912 		hash[crc >> 5] |= 1 << (crc & 0xf);
    913 
    914 		++mcnt;
    915 		ETHER_NEXT_MULTI(step, enm);
    916 	}
    917 
    918 	/* Accept multicast bit needs to be on? */
    919 	if (mcnt)
    920 		rxtx_stat |= MTD_RX_AMULTI;
    921 	else
    922 		rxtx_stat &= ~MTD_RX_AMULTI;
    923 
    924 	/* Write out the hash */
    925 	MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
    926 	MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
    927 	MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
    928 }
    929 
    930 
    931 void
    932 mtd_reset(sc)
    933 	struct mtd_softc *sc;
    934 {
    935 	int i;
    936 
    937 	MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
    938 
    939 	/* Reset descriptor status */
    940 	sc->cur_tx = 0;
    941 	sc->cur_rx = 0;
    942 
    943 	/* Wait until done with reset */
    944 	for (i = 0; i < MTD_TIMEOUT; ++i) {
    945 		DELAY(10);
    946 		if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
    947 			break;
    948 	}
    949 
    950 	if (i == MTD_TIMEOUT) {
    951 		printf("%s: reset timed out\n", sc->dev.dv_xname);
    952 	}
    953 
    954 	/* Wait a little so chip can stabilize */
    955 	DELAY(1000);
    956 }
    957 
    958 
    959 int
    960 mtd_mediachange(ifp)
    961 	struct ifnet *ifp;
    962 {
    963 	struct mtd_softc *sc = ifp->if_softc;
    964 
    965 	if (IFM_TYPE(sc->mii.mii_media.ifm_media) != IFM_ETHER)
    966 		return EINVAL;
    967 
    968 	return mii_mediachg(&sc->mii);
    969 }
    970 
    971 
    972 void
    973 mtd_mediastatus(ifp, ifmr)
    974 	struct ifnet *ifp;
    975 	struct ifmediareq *ifmr;
    976 {
    977 	struct mtd_softc *sc = ifp->if_softc;
    978 
    979 	if ((ifp->if_flags & IFF_UP) == 0)
    980 		return;
    981 
    982 	mii_pollstat(&sc->mii);
    983 	ifmr->ifm_active = sc->mii.mii_media_active;
    984 	ifmr->ifm_status = sc->mii.mii_media_status;
    985 }
    986 
    987 
    988 void
    989 mtd_shutdown (arg)
    990 	void *arg;
    991 {
    992 	struct mtd_softc *sc = arg;
    993 	struct ifnet *ifp = &sc->ethercom.ec_if;
    994 
    995 #if NRND > 0
    996 	rnd_detach_source(&sc->rnd_src);
    997 #endif
    998 	mtd_stop(ifp, 1);
    999 }
   1000