Home | History | Annotate | Line # | Download | only in tc
if_le_ioasic.c revision 1.13
      1 /*	$NetBSD: if_le_ioasic.c,v 1.13 1999/09/09 06:33:38 nisimura Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1996 Carnegie-Mellon University.
      5  * All rights reserved.
      6  *
      7  * Author: Chris G. Demetriou
      8  *
      9  * Permission to use, copy, modify and distribute this software and
     10  * its documentation is hereby granted, provided that both the copyright
     11  * notice and this permission notice appear in all copies of the
     12  * software, derivative works or modified versions, and any portions
     13  * thereof, and that both notices appear in supporting documentation.
     14  *
     15  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     16  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     17  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     18  *
     19  * Carnegie Mellon requests users of this software to return to
     20  *
     21  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     22  *  School of Computer Science
     23  *  Carnegie Mellon University
     24  *  Pittsburgh PA 15213-3890
     25  *
     26  * any improvements or extensions that they make and grant Carnegie the
     27  * rights to redistribute these changes.
     28  */
     29 
     30 /*
     31  * LANCE on DEC IOCTL ASIC.
     32  */
     33 
     34 #include <sys/cdefs.h>			/* RCS ID &  macro defns */
     35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.13 1999/09/09 06:33:38 nisimura Exp $");
     36 
     37 #include "opt_inet.h"
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/mbuf.h>
     42 #include <sys/syslog.h>
     43 #include <sys/socket.h>
     44 #include <sys/device.h>
     45 
     46 #include <net/if.h>
     47 #include <net/if_ether.h>
     48 #include <net/if_media.h>
     49 
     50 #ifdef INET
     51 #include <netinet/in.h>
     52 #include <netinet/if_inarp.h>
     53 #endif
     54 
     55 #include <dev/ic/lancereg.h>
     56 #include <dev/ic/lancevar.h>
     57 #include <dev/ic/am7990reg.h>
     58 #include <dev/ic/am7990var.h>
     59 
     60 #include <dev/tc/if_levar.h>
     61 #include <dev/tc/tcvar.h>
     62 #include <dev/tc/ioasicreg.h>
     63 #include <dev/tc/ioasicvar.h>
     64 
     65 #if defined(_KERNEL) && !defined(_LKM)
     66 #include "opt_ddb.h"
     67 #endif
     68 
     69 caddr_t le_iomem;
     70 
     71 static int  le_ioasic_match __P((struct device *, struct cfdata *, void *));
     72 static void le_ioasic_attach __P((struct device *, struct device *, void *));
     73 
     74 struct cfattach le_ioasic_ca = {
     75 	sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach
     76 };
     77 
     78 static void ioasic_lance_dma_setup __P((struct device *));
     79 
     80 #ifdef DDB
     81 #define	integrate
     82 #define hide
     83 #else
     84 #define	integrate	static __inline
     85 #define hide		static
     86 #endif
     87 
     88 hide void le_ioasic_copytobuf_gap2 __P((struct lance_softc *, void *,
     89 	    int, int));
     90 hide void le_ioasic_copyfrombuf_gap2 __P((struct lance_softc *, void *,
     91 	    int, int));
     92 
     93 hide void le_ioasic_copytobuf_gap16 __P((struct lance_softc *, void *,
     94 	    int, int));
     95 hide void le_ioasic_copyfrombuf_gap16 __P((struct lance_softc *, void *,
     96 	    int, int));
     97 hide void le_ioasic_zerobuf_gap16 __P((struct lance_softc *, int, int));
     98 
     99 int
    100 le_ioasic_match(parent, match, aux)
    101 	struct device *parent;
    102 	struct cfdata *match;
    103 	void *aux;
    104 {
    105 	struct ioasicdev_attach_args *d = aux;
    106 
    107 	if (!ioasic_submatch(match, aux))
    108 		return (0);
    109 	if (strncmp("lance", d->iada_modname, TC_ROM_LLEN))
    110 		return (0);
    111 
    112 	return (1);
    113 }
    114 
    115 void
    116 le_ioasic_attach(parent, self, aux)
    117 	struct device *parent, *self;
    118 	void *aux;
    119 {
    120 	struct ioasicdev_attach_args *d = aux;
    121 	register struct le_softc *lesc = (void *)self;
    122 	register struct lance_softc *sc = &lesc->sc_am7990.lsc;
    123 
    124 	ioasic_lance_dma_setup(parent);
    125 
    126 	if (le_iomem == 0) {
    127 		printf("%s: DMA area not set up\n", sc->sc_dev.dv_xname);
    128 		return;
    129 	}
    130 
    131 	lesc->sc_r1 = (struct lereg1 *)
    132 		TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
    133 	sc->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
    134 
    135 	sc->sc_copytodesc = le_ioasic_copytobuf_gap2;
    136 	sc->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
    137 	sc->sc_copytobuf = le_ioasic_copytobuf_gap16;
    138 	sc->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
    139 	sc->sc_zerobuf = le_ioasic_zerobuf_gap16;
    140 
    141 	dec_le_common_attach(&lesc->sc_am7990, ioasic_lance_ether_address());
    142 
    143 	ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
    144 	    am7990_intr, sc);
    145 }
    146 
    147 /*
    148  * Special memory access functions needed by ioasic-attached LANCE
    149  * chips.
    150  */
    151 
    152 /*
    153  * gap2: two bytes of data followed by two bytes of pad.
    154  *
    155  * Buffers must be 4-byte aligned.  The code doesn't worry about
    156  * doing an extra byte.
    157  */
    158 
    159 void
    160 le_ioasic_copytobuf_gap2(sc, fromv, boff, len)
    161 	struct lance_softc *sc;
    162 	void *fromv;
    163 	int boff;
    164 	register int len;
    165 {
    166 	volatile caddr_t buf = sc->sc_mem;
    167 	register caddr_t from = fromv;
    168 	register volatile u_int16_t *bptr;
    169 
    170 	if (boff & 0x1) {
    171 		/* handle unaligned first byte */
    172 		bptr = ((volatile u_int16_t *)buf) + (boff - 1);
    173 		*bptr = (*from++ << 8) | (*bptr & 0xff);
    174 		bptr += 2;
    175 		len--;
    176 	} else
    177 		bptr = ((volatile u_int16_t *)buf) + boff;
    178 	while (len > 1) {
    179 		*bptr = (from[1] << 8) | (from[0] & 0xff);
    180 		bptr += 2;
    181 		from += 2;
    182 		len -= 2;
    183 	}
    184 	if (len == 1)
    185 		*bptr = (u_int16_t)*from;
    186 }
    187 
    188 void
    189 le_ioasic_copyfrombuf_gap2(sc, tov, boff, len)
    190 	struct lance_softc *sc;
    191 	void *tov;
    192 	int boff, len;
    193 {
    194 	volatile caddr_t buf = sc->sc_mem;
    195 	register caddr_t to = tov;
    196 	register volatile u_int16_t *bptr;
    197 	register u_int16_t tmp;
    198 
    199 	if (boff & 0x1) {
    200 		/* handle unaligned first byte */
    201 		bptr = ((volatile u_int16_t *)buf) + (boff - 1);
    202 		*to++ = (*bptr >> 8) & 0xff;
    203 		bptr += 2;
    204 		len--;
    205 	} else
    206 		bptr = ((volatile u_int16_t *)buf) + boff;
    207 	while (len > 1) {
    208 		tmp = *bptr;
    209 		*to++ = tmp & 0xff;
    210 		*to++ = (tmp >> 8) & 0xff;
    211 		bptr += 2;
    212 		len -= 2;
    213 	}
    214 	if (len == 1)
    215 		*to = *bptr & 0xff;
    216 }
    217 
    218 /*
    219  * gap16: 16 bytes of data followed by 16 bytes of pad.
    220  *
    221  * Buffers must be 32-byte aligned.
    222  */
    223 
    224 void
    225 le_ioasic_copytobuf_gap16(sc, fromv, boff, len)
    226 	struct lance_softc *sc;
    227 	void *fromv;
    228 	int boff;
    229 	register int len;
    230 {
    231 	volatile caddr_t buf = sc->sc_mem;
    232 	register caddr_t from = fromv;
    233 	register caddr_t bptr;
    234 
    235 	bptr = buf + ((boff << 1) & ~0x1f);
    236 	boff &= 0xf;
    237 
    238 	/*
    239 	 * Dispose of boff so destination of subsequent copies is
    240 	 * 16-byte aligned.
    241 	 */
    242 	if (boff) {
    243 		register int xfer;
    244 		xfer = min(len, 16 - boff);
    245 		bcopy(from, bptr + boff, xfer);
    246 		from += xfer;
    247 		bptr += 32;
    248 		len -= xfer;
    249 	}
    250 
    251 	/* Destination of  copies is now 16-byte aligned. */
    252 	if (len >= 16)
    253 		switch ((u_long)from & (sizeof(u_int32_t) -1)) {
    254 		case 2:
    255 			/*  Ethernet headers make this the dominant case. */
    256 		do {
    257 			register u_int32_t *dst = (u_int32_t*)bptr;
    258 			register u_int16_t t0;
    259 			register u_int32_t t1,  t2, t3, t4;
    260 
    261 			/* read from odd-16-bit-aligned, cached src */
    262 			t0 = *(u_int16_t*)from;
    263 			t1 = *(u_int32_t*)(from+2);
    264 			t2 = *(u_int32_t*)(from+6);
    265 			t3 = *(u_int32_t*)(from+10);
    266 			t4 = *(u_int16_t*)(from+14);
    267 
    268 			/* DMA buffer is uncached on mips */
    269 			dst[0] =         t0 |  (t1 << 16);
    270 			dst[1] = (t1 >> 16) |  (t2 << 16);
    271 			dst[2] = (t2 >> 16) |  (t3 << 16);
    272 			dst[3] = (t3 >> 16) |  (t4 << 16);
    273 
    274 			from += 16;
    275 			bptr += 32;
    276 			len -= 16;
    277 		} while (len >= 16);
    278 		break;
    279 
    280 		case 0:
    281 		do {
    282 			register u_int32_t *src = (u_int32_t*)from;
    283 			register u_int32_t *dst = (u_int32_t*)bptr;
    284 			register u_int32_t t0, t1, t2, t3;
    285 
    286 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
    287 			dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
    288 
    289 			from += 16;
    290 			bptr += 32;
    291 			len -= 16;
    292 		} while (len >= 16);
    293 		break;
    294 
    295 		default:
    296 		/* Does odd-aligned case ever happen? */
    297 		do {
    298 			bcopy(from, bptr, 16);
    299 			from += 16;
    300 			bptr += 32;
    301 			len -= 16;
    302 		} while (len >= 16);
    303 		break;
    304 	}
    305 	if (len)
    306 		bcopy(from, bptr, len);
    307 }
    308 
    309 void
    310 le_ioasic_copyfrombuf_gap16(sc, tov, boff, len)
    311 	struct lance_softc *sc;
    312 	void *tov;
    313 	int boff, len;
    314 {
    315 	volatile caddr_t buf = sc->sc_mem;
    316 	register caddr_t to = tov;
    317 	register caddr_t bptr;
    318 
    319 	bptr = buf + ((boff << 1) & ~0x1f);
    320 	boff &= 0xf;
    321 
    322 	/* Dispose of boff. source of copy is subsequently 16-byte aligned. */
    323 	if (boff) {
    324 		register int xfer;
    325 		xfer = min(len, 16 - boff);
    326 		bcopy(bptr+boff, to, xfer);
    327 		to += xfer;
    328 		bptr += 32;
    329 		len -= xfer;
    330 	}
    331 	if (len >= 16)
    332 	switch ((u_long)to & (sizeof(u_int32_t) -1)) {
    333 	case 2:
    334 		/*
    335 		 * to is aligned to an odd 16-bit boundary.  Ethernet headers
    336 		 * make this the dominant case (98% or more).
    337 		 */
    338 		do {
    339 			register u_int32_t *src = (u_int32_t*)bptr;
    340 			register u_int32_t t0, t1, t2, t3;
    341 
    342 			/* read from uncached aligned DMA buf */
    343 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
    344 
    345 			/* write to odd-16-bit-word aligned dst */
    346 			*(u_int16_t *) (to+0)  = (u_short)  t0;
    347 			*(u_int32_t *) (to+2)  = (t0 >> 16) |  (t1 << 16);
    348 			*(u_int32_t *) (to+6)  = (t1 >> 16) |  (t2 << 16);
    349 			*(u_int32_t *) (to+10) = (t2 >> 16) |  (t3 << 16);
    350 			*(u_int16_t *) (to+14) = (t3 >> 16);
    351 			bptr += 32;
    352 			to += 16;
    353 			len -= 16;
    354 		} while (len > 16);
    355 		break;
    356 	case 0:
    357 		/* 32-bit aligned aligned copy. Rare. */
    358 		do {
    359 			register u_int32_t *src = (u_int32_t*)bptr;
    360 			register u_int32_t *dst = (u_int32_t*)to;
    361 			register u_int32_t t0, t1, t2, t3;
    362 
    363 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
    364 			dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
    365 			to += 16;
    366 			bptr += 32;
    367 			len -= 16;
    368 		} while (len  > 16);
    369 		break;
    370 
    371 	/* XXX Does odd-byte-aligned case ever happen? */
    372 	default:
    373 		do {
    374 			bcopy(bptr, to, 16);
    375 			to += 16;
    376 			bptr += 32;
    377 			len -= 16;
    378 		} while (len  > 16);
    379 		break;
    380 	}
    381 	if (len)
    382 		bcopy(bptr, to, len);
    383 }
    384 
    385 void
    386 le_ioasic_zerobuf_gap16(sc, boff, len)
    387 	struct lance_softc *sc;
    388 	int boff, len;
    389 {
    390 	volatile caddr_t buf = sc->sc_mem;
    391 	register caddr_t bptr;
    392 	register int xfer;
    393 
    394 	bptr = buf + ((boff << 1) & ~0x1f);
    395 	boff &= 0xf;
    396 	xfer = min(len, 16 - boff);
    397 	while (len > 0) {
    398 		bzero(bptr + boff, xfer);
    399 		bptr += 32;
    400 		boff = 0;
    401 		len -= xfer;
    402 		xfer = min(len, 16);
    403 	}
    404 }
    405 
    406 #define	LE_IOASIC_MEMSIZE	(128*1024)
    407 #define	LE_IOASIC_MEMALIGN	(128*1024)
    408 
    409 void
    410 ioasic_lance_dma_setup(parent)
    411 	struct device *parent;
    412 {
    413 	struct ioasic_softc *sc = (void *)parent;
    414 	bus_dma_tag_t dmat = sc->sc_dmat;
    415 	bus_dma_segment_t seg;
    416 	tc_addr_t tca;
    417 	int rseg;
    418 
    419 	/*
    420 	 * Allocate a DMA area for the chip.
    421 	 */
    422 	if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
    423 	    0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
    424 		printf("%s: can't allocate DMA area for LANCE\n",
    425 		    sc->sc_dv.dv_xname);
    426 		return;
    427 	}
    428 	if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
    429 	    &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
    430 		printf("%s: can't map DMA area for LANCE\n",
    431 		    sc->sc_dv.dv_xname);
    432 		bus_dmamem_free(dmat, &seg, rseg);
    433 		return;
    434 	}
    435 
    436 	/*
    437 	 * Create and load the DMA map for the DMA area.
    438 	 */
    439 	if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
    440 	    LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_lance_dmam)) {
    441 		printf("%s: can't create DMA map\n", sc->sc_dv.dv_xname);
    442 		goto bad;
    443 	}
    444 	if (bus_dmamap_load(dmat, sc->sc_lance_dmam,
    445 	    le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
    446 		printf("%s: can't load DMA map\n", sc->sc_dv.dv_xname);
    447 		goto bad;
    448 	}
    449 
    450 	tca = (tc_addr_t)sc->sc_lance_dmam->dm_segs[0].ds_addr;
    451 	*(u_int32_t *)(ioasic_base + IOASIC_LANCE_DMAPTR)
    452 		= ((tca << 3) & ~(tc_addr_t)0x1f) | ((tca >> 29) & 0x1f);
    453 	tc_wmb();
    454 
    455 	*(u_int32_t *)(ioasic_base + IOASIC_CSR) |= IOASIC_CSR_DMAEN_LANCE;
    456 	tc_wmb();
    457 	return;
    458 
    459  bad:
    460 	bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
    461 	bus_dmamem_free(dmat, &seg, rseg);
    462 	le_iomem = 0;
    463 }
    464