Home | History | Annotate | Line # | Download | only in ic
      1 /* $NetBSD: mtd803.c,v 1.41 2020/01/29 15:00:39 thorpej Exp $ */
      2 
      3 /*-
      4  *
      5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Peter Bex <Peter.Bex (at) student.kun.nl>.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * TODO:
     35  * - Most importantly, get some bus_dmamap_syncs in the correct places.
     36  *    I don't have access to a computer with PCI other than i386, and i386
     37  *    is just such a machine where dmamap_syncs don't do anything.
     38  * - Powerhook for when resuming after standby.
     39  * - Watchdog stuff doesn't work yet, the system crashes.
     40  * - There seems to be a CardBus version of the card. (see datasheet)
     41  *    Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
     42  * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
     43  *    raised every time a packet is sent. Strange, since everything works anyway
     44  */
     45 
     46 #include <sys/cdefs.h>
     47 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.41 2020/01/29 15:00:39 thorpej Exp $");
     48 
     49 
     50 #include <sys/param.h>
     51 #include <sys/mbuf.h>
     52 #include <sys/systm.h>
     53 #include <sys/device.h>
     54 #include <sys/socket.h>
     55 #include <sys/ioctl.h>
     56 #include <sys/syslog.h>
     57 
     58 #include <net/if.h>
     59 #include <net/if_ether.h>
     60 #include <net/if_media.h>
     61 #include <net/bpf.h>
     62 
     63 #ifdef INET
     64 #include <netinet/in.h>
     65 #include <netinet/if_inarp.h>
     66 #include <netinet/in_systm.h>
     67 #include <netinet/in_var.h>
     68 #include <netinet/ip.h>
     69 #endif
     70 
     71 #include <sys/bus.h>
     72 
     73 #include <dev/ic/mtd803reg.h>
     74 #include <dev/ic/mtd803var.h>
     75 #include <dev/mii/mii.h>
     76 #include <dev/mii/miivar.h>
     77 
     78 /*
     79  * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
     80  * Written by Peter Bex (peter.bex (at) student.kun.nl)
     81  *
     82  * Datasheet at:   http://www.myson.com.tw   or   http://www.century-semi.com
     83  */
     84 
     85 #define MTD_READ_1(sc, reg) \
     86 	bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
     87 #define MTD_WRITE_1(sc, reg, data) \
     88 	bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
     89 
     90 #define MTD_READ_2(sc, reg) \
     91 	bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
     92 #define MTD_WRITE_2(sc, reg, data) \
     93 	bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
     94 
     95 #define MTD_READ_4(sc, reg) \
     96 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
     97 #define MTD_WRITE_4(sc, reg, data) \
     98 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
     99 
    100 #define MTD_SETBIT(sc, reg, x) \
    101 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
    102 #define MTD_CLRBIT(sc, reg, x) \
    103 	MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
    104 
    105 #define ETHER_CRC32(buf, len)	(ether_crc32_be((buf), (len)))
    106 
    107 int mtd_mii_readreg(device_t, int, int, uint16_t *);
    108 int mtd_mii_writereg(device_t, int, int, uint16_t);
    109 void mtd_mii_statchg(struct ifnet *);
    110 
    111 void mtd_start(struct ifnet *);
    112 void mtd_stop(struct ifnet *, int);
    113 int mtd_ioctl(struct ifnet *, u_long, void *);
    114 void mtd_setmulti(struct mtd_softc *);
    115 void mtd_watchdog(struct ifnet *);
    116 
    117 int mtd_init(struct ifnet *);
    118 void mtd_reset(struct mtd_softc *);
    119 void mtd_shutdown(void *);
    120 int mtd_init_desc(struct mtd_softc *);
    121 int mtd_put(struct mtd_softc *, int, struct mbuf *);
    122 struct mbuf *mtd_get(struct mtd_softc *, int, int);
    123 
    124 int mtd_rxirq(struct mtd_softc *);
    125 int mtd_txirq(struct mtd_softc *);
    126 int mtd_bufirq(struct mtd_softc *);
    127 
    128 
    129 int
    130 mtd_config(struct mtd_softc *sc)
    131 {
    132 	struct ifnet *ifp = &sc->ethercom.ec_if;
    133 	struct mii_data *mii = &sc->mii;
    134 	int i;
    135 
    136 	/* Read station address */
    137 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
    138 		sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
    139 
    140 	/* Initialize ifnet structure */
    141 	memcpy(ifp->if_xname, device_xname(sc->dev), IFNAMSIZ);
    142 	ifp->if_softc = sc;
    143 	ifp->if_init = mtd_init;
    144 	ifp->if_start = mtd_start;
    145 	ifp->if_stop = mtd_stop;
    146 	ifp->if_ioctl = mtd_ioctl;
    147 	ifp->if_watchdog = mtd_watchdog;
    148 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    149 	IFQ_SET_READY(&ifp->if_snd);
    150 
    151 	/* Setup MII interface */
    152 	mii->mii_ifp = ifp;
    153 	mii->mii_readreg = mtd_mii_readreg;
    154 	mii->mii_writereg = mtd_mii_writereg;
    155 	mii->mii_statchg = mtd_mii_statchg;
    156 
    157 	sc->ethercom.ec_mii = mii;
    158 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
    159 
    160 	mii_attach(sc->dev, mii, 0xffffffff, MII_PHY_ANY, 0, 0);
    161 
    162 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
    163 		aprint_error_dev(sc->dev, "Unable to configure MII\n");
    164 		return 1;
    165 	} else
    166 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
    167 
    168 	if (mtd_init_desc(sc))
    169 		return 1;
    170 
    171 	/* Attach interface */
    172 	if_attach(ifp);
    173 	ether_ifattach(ifp, sc->eaddr);
    174 
    175 	/* Initialise random source */
    176 	rnd_attach_source(&sc->rnd_src, device_xname(sc->dev),
    177 			  RND_TYPE_NET, RND_FLAG_DEFAULT);
    178 
    179 	/* Add shutdown hook to reset card when we reboot */
    180 	sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
    181 
    182 	return 0;
    183 }
    184 
    185 
    186 /*
    187  * mtd_init
    188  * Must be called at splnet()
    189  */
    190 int
    191 mtd_init(struct ifnet *ifp)
    192 {
    193 	struct mtd_softc *sc = ifp->if_softc;
    194 
    195 	mtd_reset(sc);
    196 
    197 	/*
    198 	 * Set cache alignment and burst length. Don't really know what these
    199 	 * mean, so their values are probably suboptimal.
    200 	 */
    201 	MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
    202 
    203 	MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX);
    204 
    205 	/* Promiscuous mode? */
    206 	if (ifp->if_flags & IFF_PROMISC)
    207 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
    208 	else
    209 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
    210 
    211 	/* Broadcast mode? */
    212 	if (ifp->if_flags & IFF_BROADCAST)
    213 		MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
    214 	else
    215 		MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
    216 
    217 	mtd_setmulti(sc);
    218 
    219 	/* Enable interrupts */
    220 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
    221 	MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
    222 
    223 	/* Set descriptor base addresses */
    224 	MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    225 				+ sizeof(struct mtd_desc) * MTD_NUM_RXD));
    226 	MTD_WRITE_4(sc, MTD_RXLBA,
    227 		htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
    228 
    229 	/* Enable receiver and transmitter */
    230 	MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
    231 	MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
    232 
    233 	/* Interface is running */
    234 	ifp->if_flags |= IFF_RUNNING;
    235 	ifp->if_flags &= ~IFF_OACTIVE;
    236 
    237 	return 0;
    238 }
    239 
    240 
    241 int
    242 mtd_init_desc(struct mtd_softc *sc)
    243 {
    244 	int rseg, err, i;
    245 	bus_dma_segment_t seg;
    246 	bus_size_t size;
    247 
    248 	/* Allocate memory for descriptors */
    249 	size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
    250 
    251 	/* Allocate DMA-safe memory */
    252 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
    253 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    254 		aprint_error_dev(sc->dev,
    255 		    "unable to allocate DMA buffer, error = %d\n", err);
    256 		return 1;
    257 	}
    258 
    259 	/* Map memory to kernel addressable space */
    260 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
    261 		(void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    262 		aprint_error_dev(sc->dev,
    263 		    "unable to map DMA buffer, error = %d\n", err);
    264 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    265 		return 1;
    266 	}
    267 
    268 	/* Create a DMA map */
    269 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
    270 		size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
    271 		aprint_error_dev(sc->dev,
    272 		    "unable to create DMA map, error = %d\n", err);
    273 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    274 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    275 		return 1;
    276 	}
    277 
    278 	/* Load the DMA map */
    279 	if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
    280 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
    281 		aprint_error_dev(sc->dev,
    282 		    "unable to load DMA map, error = %d\n", err);
    283 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    284 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    285 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    286 		return 1;
    287 	}
    288 
    289 	/* Allocate memory for the buffers */
    290 	size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
    291 
    292 	/* Allocate DMA-safe memory */
    293 	if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
    294 			 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    295 		aprint_error_dev(sc->dev,
    296 		    "unable to allocate DMA buffer, error = %d\n", err);
    297 
    298 		/* Undo DMA map for descriptors */
    299 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    300 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    301 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    302 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    303 		return 1;
    304 	}
    305 
    306 	/* Map memory to kernel addressable space */
    307 	if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
    308 		&sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    309 		aprint_error_dev(sc->dev,
    310 		    "unable to map DMA buffer, error = %d\n", err);
    311 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    312 
    313 		/* Undo DMA map for descriptors */
    314 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    315 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    316 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    317 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    318 		return 1;
    319 	}
    320 
    321 	/* Create a DMA map */
    322 	if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
    323 		size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
    324 		aprint_error_dev(sc->dev,
    325 		    "unable to create DMA map, error = %d\n", err);
    326 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
    327 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    328 
    329 		/* Undo DMA map for descriptors */
    330 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    331 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    332 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    333 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    334 		return 1;
    335 	}
    336 
    337 	/* Load the DMA map */
    338 	if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
    339 		size, NULL, BUS_DMA_NOWAIT)) != 0) {
    340 		aprint_error_dev(sc->dev,
    341 		    "unable to load DMA map, error = %d\n", err);
    342 		bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
    343 		bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
    344 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    345 
    346 		/* Undo DMA map for descriptors */
    347 		bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
    348 		bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
    349 		bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
    350 		bus_dmamem_free(sc->dma_tag, &seg, rseg);
    351 		return 1;
    352 	}
    353 
    354 	/* Descriptors are stored as a circular linked list */
    355 	/* Fill in rx descriptors */
    356 	for (i = 0; i < MTD_NUM_RXD; ++i) {
    357 		sc->desc[i].stat = MTD_RXD_OWNER;
    358 		if (i == MTD_NUM_RXD - 1) {	/* Last descriptor */
    359 			/* Link back to first rx descriptor */
    360 			sc->desc[i].next =
    361 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
    362 		} else {
    363 			/* Link forward to next rx descriptor */
    364 			sc->desc[i].next =
    365 			htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    366 					+ (i + 1) * sizeof(struct mtd_desc));
    367 		}
    368 		sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
    369 		/* Set buffer's address */
    370 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
    371 					+ i * MTD_RXBUF_SIZE);
    372 	}
    373 
    374 	/* Fill in tx descriptors */
    375 	for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
    376 		sc->desc[i].stat = 0;	/* At least, NOT MTD_TXD_OWNER! */
    377 		if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */
    378 			/* Link back to first tx descriptor */
    379 			sc->desc[i].next =
    380 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    381 				    +MTD_NUM_RXD * sizeof(struct mtd_desc));
    382 		} else {
    383 			/* Link forward to next tx descriptor */
    384 			sc->desc[i].next =
    385 				htole32(sc->desc_dma_map->dm_segs[0].ds_addr
    386 				    + (i + 1) * sizeof(struct mtd_desc));
    387 		}
    388 		/* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
    389 		/* Set buffer's address */
    390 		sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
    391 					+ MTD_NUM_RXD * MTD_RXBUF_SIZE
    392 					+ (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
    393 	}
    394 
    395 	return 0;
    396 }
    397 
    398 
    399 void
    400 mtd_mii_statchg(struct ifnet *ifp)
    401 {
    402 	/* Should we do something here? :) */
    403 }
    404 
    405 
    406 int
    407 mtd_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
    408 {
    409 	struct mtd_softc *sc = device_private(self);
    410 
    411 	*val = MTD_READ_2(sc, MTD_PHYBASE + reg * 2);
    412 
    413 	return 0;
    414 }
    415 
    416 
    417 int
    418 mtd_mii_writereg(device_t self, int phy, int reg, uint16_t val)
    419 {
    420 	struct mtd_softc *sc = device_private(self);
    421 
    422 	MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
    423 
    424 	return 0;
    425 }
    426 
    427 
    428 int
    429 mtd_put(struct mtd_softc *sc, int index, struct mbuf *m)
    430 {
    431 	int len, tlen;
    432 	char *buf = (char *)sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
    433 			+ index * MTD_TXBUF_SIZE;
    434 	struct mbuf *n;
    435 
    436 	for (tlen = 0; m != NULL; m = n) {
    437 		len = m->m_len;
    438 		if (len == 0) {
    439 			n = m_free(m);
    440 			continue;
    441 		} else if (tlen > MTD_TXBUF_SIZE) {
    442 			/* XXX FIXME: No idea what to do here. */
    443 			aprint_error_dev(sc->dev,
    444 			    "packet too large! Size = %i\n", tlen);
    445 			n = m_free(m);
    446 			continue;
    447 		}
    448 		memcpy(buf, mtod(m, void *), len);
    449 		buf += len;
    450 		tlen += len;
    451 		n = m_free(m);
    452 	}
    453 	sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
    454 		| MTD_TXD_CONF_IRQC
    455 		| ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
    456 		| (tlen & MTD_TXD_CONF_BUFS);
    457 
    458 	return tlen;
    459 }
    460 
    461 
    462 void
    463 mtd_start(struct ifnet *ifp)
    464 {
    465 	struct mtd_softc *sc = ifp->if_softc;
    466 	struct mbuf *m;
    467 	int first_tx = sc->cur_tx;
    468 
    469 	/* Don't transmit when the interface is busy or inactive */
    470 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    471 		return;
    472 
    473 	for (;;) {
    474 		IF_DEQUEUE(&ifp->if_snd, m);
    475 
    476 		if (m == NULL)
    477 			break;
    478 
    479 		bpf_mtap(ifp, m, BPF_D_OUT);
    480 
    481 		/* Copy mbuf chain into tx buffer */
    482 		(void)mtd_put(sc, sc->cur_tx, m);
    483 
    484 		if (sc->cur_tx != first_tx)
    485 			sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
    486 
    487 		if (++sc->cur_tx >= MTD_NUM_TXD)
    488 			sc->cur_tx = 0;
    489 	}
    490 	/* Mark first & last descriptor */
    491 	sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
    492 
    493 	if (sc->cur_tx == 0)
    494 		sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf
    495 		    |= MTD_TXD_CONF_LSD;
    496 	else
    497 		sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf
    498 		    |= MTD_TXD_CONF_LSD;
    499 
    500 	/* Give first descriptor to chip to complete transaction */
    501 	sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
    502 
    503 	/* Transmit polling demand */
    504 	MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
    505 
    506 	/* XXX FIXME: Set up a watchdog timer */
    507 	/* ifp->if_timer = 5; */
    508 }
    509 
    510 
    511 void
    512 mtd_stop(struct ifnet *ifp, int disable)
    513 {
    514 	struct mtd_softc *sc = ifp->if_softc;
    515 
    516 	/* Disable transmitter and receiver */
    517 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
    518 	MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
    519 
    520 	/* Disable interrupts */
    521 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
    522 
    523 	/* Must do more at disable??... */
    524 	if (disable) {
    525 		/* Delete tx and rx descriptor base addresses */
    526 		MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
    527 		MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
    528 	}
    529 
    530 	ifp->if_timer = 0;
    531 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    532 }
    533 
    534 
    535 void
    536 mtd_watchdog(struct ifnet *ifp)
    537 {
    538 	struct mtd_softc *sc = ifp->if_softc;
    539 	int s;
    540 
    541 	log(LOG_ERR, "%s: device timeout\n", device_xname(sc->dev));
    542 	if_statinc(ifp, if_oerrors);
    543 
    544 	mtd_stop(ifp, 0);
    545 
    546 	s = splnet();
    547 	mtd_init(ifp);
    548 	splx(s);
    549 
    550 	return;
    551 }
    552 
    553 
    554 int
    555 mtd_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    556 {
    557 	struct mtd_softc *sc = ifp->if_softc;
    558 	int s, error = 0;
    559 
    560 	s = splnet();
    561 
    562 	if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
    563 		/*
    564 		 * Multicast list has changed; set the hardware
    565 		 * filter accordingly.
    566 		 */
    567 		 if (ifp->if_flags & IFF_RUNNING)
    568 			 mtd_setmulti(sc);
    569 		 error = 0;
    570 	}
    571 
    572 	splx(s);
    573 	return error;
    574 }
    575 
    576 
    577 struct mbuf *
    578 mtd_get(struct mtd_softc *sc, int index, int totlen)
    579 {
    580 	struct ifnet *ifp = &sc->ethercom.ec_if;
    581 	struct mbuf *m, *m0, *newm;
    582 	int len;
    583 	char *buf = (char *)sc->buf + index * MTD_RXBUF_SIZE;
    584 
    585 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
    586 	if (m0 == NULL)
    587 		return NULL;
    588 
    589 	m_set_rcvif(m0, ifp);
    590 	m0->m_pkthdr.len = totlen;
    591 	m = m0;
    592 	len = MHLEN;
    593 
    594 	while (totlen > 0) {
    595 		if (totlen >= MINCLSIZE) {
    596 			MCLGET(m, M_DONTWAIT);
    597 			if (!(m->m_flags & M_EXT)) {
    598 				m_freem(m0);
    599 				return NULL;
    600 			}
    601 			len = MCLBYTES;
    602 		}
    603 
    604 		if (m == m0) {
    605 			char *newdata = (char *)
    606 				ALIGN(m->m_data + sizeof(struct ether_header)) -
    607 				sizeof(struct ether_header);
    608 			len -= newdata - m->m_data;
    609 			m->m_data = newdata;
    610 		}
    611 
    612 		m->m_len = len = uimin(totlen, len);
    613 		memcpy(mtod(m, void *), buf, len);
    614 		buf += len;
    615 
    616 		totlen -= len;
    617 		if (totlen > 0) {
    618 			MGET(newm, M_DONTWAIT, MT_DATA);
    619 			if (newm == NULL) {
    620 				m_freem(m0);
    621 				return NULL;
    622 			}
    623 			len = MLEN;
    624 			m = m->m_next = newm;
    625 		}
    626 	}
    627 
    628 	return m0;
    629 }
    630 
    631 
    632 int
    633 mtd_rxirq(struct mtd_softc *sc)
    634 {
    635 	struct ifnet *ifp = &sc->ethercom.ec_if;
    636 	int len;
    637 	struct mbuf *m;
    638 
    639 	for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
    640 		/* Error summary set? */
    641 		if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
    642 			aprint_error_dev(sc->dev,
    643 			    "received packet with errors\n");
    644 			/* Give up packet, since an error occurred */
    645 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    646 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
    647 							MTD_RXD_CONF_BUFS;
    648 			if_statinc(ifp, if_ierrors);
    649 			if (++sc->cur_rx >= MTD_NUM_RXD)
    650 				sc->cur_rx = 0;
    651 			continue;
    652 		}
    653 		/* Get buffer length */
    654 		len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
    655 			>> MTD_RXD_FLEN_SHIFT;
    656 		len -= ETHER_CRC_LEN;
    657 
    658 		/* Check packet size */
    659 		if (len <= sizeof(struct ether_header)) {
    660 			aprint_error_dev(sc->dev,
    661 			    "invalid packet size %d; dropping\n", len);
    662 			sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    663 			sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
    664 							MTD_RXD_CONF_BUFS;
    665 			if_statinc(ifp, if_ierrors);
    666 			if (++sc->cur_rx >= MTD_NUM_RXD)
    667 				sc->cur_rx = 0;
    668 			continue;
    669 		}
    670 
    671 		m = mtd_get(sc, (sc->cur_rx), len);
    672 
    673 		/* Give descriptor back to card */
    674 		sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
    675 		sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
    676 
    677 		if (++sc->cur_rx >= MTD_NUM_RXD)
    678 			sc->cur_rx = 0;
    679 
    680 		if (m == NULL) {
    681 			aprint_error_dev(sc->dev,
    682 			    "error pulling packet off interface\n");
    683 			if_statinc(ifp, if_ierrors);
    684 			continue;
    685 		}
    686 
    687 		/* Pass the packet up */
    688 		if_percpuq_enqueue(ifp->if_percpuq, m);
    689 	}
    690 
    691 	return 1;
    692 }
    693 
    694 
    695 int
    696 mtd_txirq(struct mtd_softc *sc)
    697 {
    698 	struct ifnet *ifp = &sc->ethercom.ec_if;
    699 
    700 	/* Clear timeout */
    701 	ifp->if_timer = 0;
    702 
    703 	ifp->if_flags &= ~IFF_OACTIVE;
    704 	if_statinc(ifp, if_opackets);
    705 
    706 	/* XXX FIXME If there is some queued, do an mtd_start? */
    707 
    708 	return 1;
    709 }
    710 
    711 
    712 int
    713 mtd_bufirq(struct mtd_softc *sc)
    714 {
    715 	struct ifnet *ifp = &sc->ethercom.ec_if;
    716 
    717 	/* Clear timeout */
    718 	ifp->if_timer = 0;
    719 
    720 	/* XXX FIXME: Do something here to make sure we get some buffers! */
    721 
    722 	return 1;
    723 }
    724 
    725 
    726 int
    727 mtd_irq_h(void *args)
    728 {
    729 	struct mtd_softc *sc = args;
    730 	struct ifnet *ifp = &sc->ethercom.ec_if;
    731 	uint32_t status;
    732 	int r = 0;
    733 
    734 	if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(sc->dev))
    735 		return 0;
    736 
    737 	/* Disable interrupts */
    738 	MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
    739 
    740 	for (;;) {
    741 		status = MTD_READ_4(sc, MTD_ISR);
    742 
    743 		/* Add random seed before masking out bits */
    744 		if (status)
    745 			rnd_add_uint32(&sc->rnd_src, status);
    746 
    747 		status &= MTD_ISR_MASK;
    748 		if (!status)		/* We didn't ask for this */
    749 			break;
    750 
    751 		MTD_WRITE_4(sc, MTD_ISR, status);
    752 
    753 		/* NOTE: Perhaps we should reset with some of these errors? */
    754 
    755 		if (status & MTD_ISR_RXBUN) {
    756 			aprint_error_dev(sc->dev,
    757 			    "receive buffer unavailable\n");
    758 			if_statinc(ifp, if_ierrors);
    759 		}
    760 
    761 		if (status & MTD_ISR_RXERR) {
    762 			aprint_error_dev(sc->dev, "receive error\n");
    763 			if_statinc(ifp, if_ierrors);
    764 		}
    765 
    766 		if (status & MTD_ISR_TXBUN) {
    767 			aprint_error_dev(sc->dev,
    768 			    "transmit buffer unavailable\n");
    769 			if_statinc(ifp, if_ierrors);
    770 		}
    771 
    772 		if ((status & MTD_ISR_PDF)) {
    773 			aprint_error_dev(sc->dev,
    774 			    "parallel detection fault\n");
    775 			if_statinc(ifp, if_ierrors);
    776 		}
    777 
    778 		if (status & MTD_ISR_FBUSERR) {
    779 			aprint_error_dev(sc->dev, "fatal bus error\n");
    780 			if_statinc(ifp, if_ierrors);
    781 		}
    782 
    783 		if (status & MTD_ISR_TARERR) {
    784 			aprint_error_dev(sc->dev, "target error\n");
    785 			if_statinc(ifp, if_ierrors);
    786 		}
    787 
    788 		if (status & MTD_ISR_MASTERR) {
    789 			aprint_error_dev(sc->dev, "master error\n");
    790 			if_statinc(ifp, if_ierrors);
    791 		}
    792 
    793 		if (status & MTD_ISR_PARERR) {
    794 			aprint_error_dev(sc->dev, "parity error\n");
    795 			if_statinc(ifp, if_ierrors);
    796 		}
    797 
    798 		if (status & MTD_ISR_RXIRQ)	/* Receive interrupt */
    799 			r |= mtd_rxirq(sc);
    800 
    801 		if (status & MTD_ISR_TXIRQ)	/* Transmit interrupt */
    802 			r |= mtd_txirq(sc);
    803 
    804 		if (status & MTD_ISR_TXEARLY)	/* Transmit early */
    805 			r |= mtd_txirq(sc);
    806 
    807 		if (status & MTD_ISR_TXBUN)	/* Transmit buffer n/a */
    808 			r |= mtd_bufirq(sc);
    809 
    810 	}
    811 
    812 	/* Enable interrupts */
    813 	MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
    814 
    815 	return r;
    816 }
    817 
    818 
    819 void
    820 mtd_setmulti(struct mtd_softc *sc)
    821 {
    822 	struct ethercom *ec = &sc->ethercom;
    823 	struct ifnet *ifp = &ec->ec_if;
    824 	uint32_t rxtx_stat;
    825 	uint32_t hash[2] = {0, 0};
    826 	uint32_t crc;
    827 	struct ether_multi *enm;
    828 	struct ether_multistep step;
    829 	int mcnt = 0;
    830 
    831 	/* Get old status */
    832 	rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
    833 
    834 	if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
    835 		rxtx_stat |= MTD_RX_AMULTI;
    836 		MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
    837 		MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
    838 		MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
    839 		return;
    840 	}
    841 
    842 	ETHER_LOCK(ec);
    843 	ETHER_FIRST_MULTI(step, ec, enm);
    844 	while (enm != NULL) {
    845 		/* We need the 6 most significant bits of the CRC */
    846 		crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
    847 
    848 		hash[crc >> 5] |= 1 << (crc & 0xf);
    849 
    850 		++mcnt;
    851 		ETHER_NEXT_MULTI(step, enm);
    852 	}
    853 	ETHER_UNLOCK(ec);
    854 
    855 	/* Accept multicast bit needs to be on? */
    856 	if (mcnt)
    857 		rxtx_stat |= MTD_RX_AMULTI;
    858 	else
    859 		rxtx_stat &= ~MTD_RX_AMULTI;
    860 
    861 	/* Write out the hash */
    862 	MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
    863 	MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
    864 	MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
    865 }
    866 
    867 
    868 void
    869 mtd_reset(struct mtd_softc *sc)
    870 {
    871 	int i;
    872 
    873 	MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
    874 
    875 	/* Reset descriptor status */
    876 	sc->cur_tx = 0;
    877 	sc->cur_rx = 0;
    878 
    879 	/* Wait until done with reset */
    880 	for (i = 0; i < MTD_TIMEOUT; ++i) {
    881 		DELAY(10);
    882 		if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
    883 			break;
    884 	}
    885 
    886 	if (i == MTD_TIMEOUT)
    887 		aprint_error_dev(sc->dev, "reset timed out\n");
    888 
    889 	/* Wait a little so chip can stabilize */
    890 	DELAY(1000);
    891 }
    892 
    893 
    894 void
    895 mtd_shutdown (void *arg)
    896 {
    897 	struct mtd_softc *sc = arg;
    898 	struct ifnet *ifp = &sc->ethercom.ec_if;
    899 
    900 	rnd_detach_source(&sc->rnd_src);
    901 	mtd_stop(ifp, 1);
    902 }
    903