Home | History | Annotate | Line # | Download | only in tc
if_le_ioasic.c revision 1.14
      1 /*	$NetBSD: if_le_ioasic.c,v 1.14 1999/10/01 09:19:42 nisimura Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1996 Carnegie-Mellon University.
      5  * All rights reserved.
      6  *
      7  * Author: Chris G. Demetriou
      8  *
      9  * Permission to use, copy, modify and distribute this software and
     10  * its documentation is hereby granted, provided that both the copyright
     11  * notice and this permission notice appear in all copies of the
     12  * software, derivative works or modified versions, and any portions
     13  * thereof, and that both notices appear in supporting documentation.
     14  *
     15  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     16  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     17  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     18  *
     19  * Carnegie Mellon requests users of this software to return to
     20  *
     21  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     22  *  School of Computer Science
     23  *  Carnegie Mellon University
     24  *  Pittsburgh PA 15213-3890
     25  *
     26  * any improvements or extensions that they make and grant Carnegie the
     27  * rights to redistribute these changes.
     28  */
     29 
     30 /*
     31  * LANCE on DEC IOCTL ASIC.
     32  */
     33 
     34 #include <sys/cdefs.h>			/* RCS ID &  macro defns */
     35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.14 1999/10/01 09:19:42 nisimura Exp $");
     36 
     37 #include "opt_inet.h"
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/mbuf.h>
     42 #include <sys/syslog.h>
     43 #include <sys/socket.h>
     44 #include <sys/device.h>
     45 
     46 #include <net/if.h>
     47 #include <net/if_ether.h>
     48 #include <net/if_media.h>
     49 
     50 #ifdef INET
     51 #include <netinet/in.h>
     52 #include <netinet/if_inarp.h>
     53 #endif
     54 
     55 #include <dev/ic/lancereg.h>
     56 #include <dev/ic/lancevar.h>
     57 #include <dev/ic/am7990reg.h>
     58 #include <dev/ic/am7990var.h>
     59 
     60 #include <dev/tc/if_levar.h>
     61 #include <dev/tc/tcvar.h>
     62 #include <dev/tc/ioasicreg.h>
     63 #include <dev/tc/ioasicvar.h>
     64 
     65 #if defined(_KERNEL) && !defined(_LKM)
     66 #include "opt_ddb.h"
     67 #endif
     68 
     69 caddr_t le_iomem;
     70 
     71 static int  le_ioasic_match __P((struct device *, struct cfdata *, void *));
     72 static void le_ioasic_attach __P((struct device *, struct device *, void *));
     73 
     74 struct cfattach le_ioasic_ca = {
     75 	sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach
     76 };
     77 
     78 static void ioasic_lance_dma_setup __P((struct device *));
     79 static char *ioasic_lance_ether_address __P((void));
     80 
     81 #ifdef DDB
     82 #define	integrate
     83 #define hide
     84 #else
     85 #define	integrate	static __inline
     86 #define hide		static
     87 #endif
     88 
     89 hide void le_ioasic_copytobuf_gap2 __P((struct lance_softc *, void *,
     90 	    int, int));
     91 hide void le_ioasic_copyfrombuf_gap2 __P((struct lance_softc *, void *,
     92 	    int, int));
     93 
     94 hide void le_ioasic_copytobuf_gap16 __P((struct lance_softc *, void *,
     95 	    int, int));
     96 hide void le_ioasic_copyfrombuf_gap16 __P((struct lance_softc *, void *,
     97 	    int, int));
     98 hide void le_ioasic_zerobuf_gap16 __P((struct lance_softc *, int, int));
     99 
    100 int
    101 le_ioasic_match(parent, match, aux)
    102 	struct device *parent;
    103 	struct cfdata *match;
    104 	void *aux;
    105 {
    106 	struct ioasicdev_attach_args *d = aux;
    107 
    108 	if (!ioasic_submatch(match, aux))
    109 		return (0);
    110 	if (strncmp("lance", d->iada_modname, TC_ROM_LLEN))
    111 		return (0);
    112 
    113 	return (1);
    114 }
    115 
    116 void
    117 le_ioasic_attach(parent, self, aux)
    118 	struct device *parent, *self;
    119 	void *aux;
    120 {
    121 	struct ioasicdev_attach_args *d = aux;
    122 	register struct le_softc *lesc = (void *)self;
    123 	register struct lance_softc *sc = &lesc->sc_am7990.lsc;
    124 
    125 	ioasic_lance_dma_setup(parent);
    126 
    127 	if (le_iomem == 0) {
    128 		printf("%s: DMA area not set up\n", sc->sc_dev.dv_xname);
    129 		return;
    130 	}
    131 
    132 	lesc->sc_r1 = (struct lereg1 *)
    133 		TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
    134 	sc->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
    135 
    136 	sc->sc_copytodesc = le_ioasic_copytobuf_gap2;
    137 	sc->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
    138 	sc->sc_copytobuf = le_ioasic_copytobuf_gap16;
    139 	sc->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
    140 	sc->sc_zerobuf = le_ioasic_zerobuf_gap16;
    141 
    142 	dec_le_common_attach(&lesc->sc_am7990, ioasic_lance_ether_address());
    143 
    144 	ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
    145 	    am7990_intr, sc);
    146 }
    147 
    148 /*
    149  * Special memory access functions needed by ioasic-attached LANCE
    150  * chips.
    151  */
    152 
    153 /*
    154  * gap2: two bytes of data followed by two bytes of pad.
    155  *
    156  * Buffers must be 4-byte aligned.  The code doesn't worry about
    157  * doing an extra byte.
    158  */
    159 
    160 void
    161 le_ioasic_copytobuf_gap2(sc, fromv, boff, len)
    162 	struct lance_softc *sc;
    163 	void *fromv;
    164 	int boff;
    165 	register int len;
    166 {
    167 	volatile caddr_t buf = sc->sc_mem;
    168 	register caddr_t from = fromv;
    169 	register volatile u_int16_t *bptr;
    170 
    171 	if (boff & 0x1) {
    172 		/* handle unaligned first byte */
    173 		bptr = ((volatile u_int16_t *)buf) + (boff - 1);
    174 		*bptr = (*from++ << 8) | (*bptr & 0xff);
    175 		bptr += 2;
    176 		len--;
    177 	} else
    178 		bptr = ((volatile u_int16_t *)buf) + boff;
    179 	while (len > 1) {
    180 		*bptr = (from[1] << 8) | (from[0] & 0xff);
    181 		bptr += 2;
    182 		from += 2;
    183 		len -= 2;
    184 	}
    185 	if (len == 1)
    186 		*bptr = (u_int16_t)*from;
    187 }
    188 
    189 void
    190 le_ioasic_copyfrombuf_gap2(sc, tov, boff, len)
    191 	struct lance_softc *sc;
    192 	void *tov;
    193 	int boff, len;
    194 {
    195 	volatile caddr_t buf = sc->sc_mem;
    196 	register caddr_t to = tov;
    197 	register volatile u_int16_t *bptr;
    198 	register u_int16_t tmp;
    199 
    200 	if (boff & 0x1) {
    201 		/* handle unaligned first byte */
    202 		bptr = ((volatile u_int16_t *)buf) + (boff - 1);
    203 		*to++ = (*bptr >> 8) & 0xff;
    204 		bptr += 2;
    205 		len--;
    206 	} else
    207 		bptr = ((volatile u_int16_t *)buf) + boff;
    208 	while (len > 1) {
    209 		tmp = *bptr;
    210 		*to++ = tmp & 0xff;
    211 		*to++ = (tmp >> 8) & 0xff;
    212 		bptr += 2;
    213 		len -= 2;
    214 	}
    215 	if (len == 1)
    216 		*to = *bptr & 0xff;
    217 }
    218 
    219 /*
    220  * gap16: 16 bytes of data followed by 16 bytes of pad.
    221  *
    222  * Buffers must be 32-byte aligned.
    223  */
    224 
    225 void
    226 le_ioasic_copytobuf_gap16(sc, fromv, boff, len)
    227 	struct lance_softc *sc;
    228 	void *fromv;
    229 	int boff;
    230 	register int len;
    231 {
    232 	volatile caddr_t buf = sc->sc_mem;
    233 	register caddr_t from = fromv;
    234 	register caddr_t bptr;
    235 
    236 	bptr = buf + ((boff << 1) & ~0x1f);
    237 	boff &= 0xf;
    238 
    239 	/*
    240 	 * Dispose of boff so destination of subsequent copies is
    241 	 * 16-byte aligned.
    242 	 */
    243 	if (boff) {
    244 		register int xfer;
    245 		xfer = min(len, 16 - boff);
    246 		bcopy(from, bptr + boff, xfer);
    247 		from += xfer;
    248 		bptr += 32;
    249 		len -= xfer;
    250 	}
    251 
    252 	/* Destination of  copies is now 16-byte aligned. */
    253 	if (len >= 16)
    254 		switch ((u_long)from & (sizeof(u_int32_t) -1)) {
    255 		case 2:
    256 			/*  Ethernet headers make this the dominant case. */
    257 		do {
    258 			register u_int32_t *dst = (u_int32_t*)bptr;
    259 			register u_int16_t t0;
    260 			register u_int32_t t1,  t2, t3, t4;
    261 
    262 			/* read from odd-16-bit-aligned, cached src */
    263 			t0 = *(u_int16_t*)from;
    264 			t1 = *(u_int32_t*)(from+2);
    265 			t2 = *(u_int32_t*)(from+6);
    266 			t3 = *(u_int32_t*)(from+10);
    267 			t4 = *(u_int16_t*)(from+14);
    268 
    269 			/* DMA buffer is uncached on mips */
    270 			dst[0] =         t0 |  (t1 << 16);
    271 			dst[1] = (t1 >> 16) |  (t2 << 16);
    272 			dst[2] = (t2 >> 16) |  (t3 << 16);
    273 			dst[3] = (t3 >> 16) |  (t4 << 16);
    274 
    275 			from += 16;
    276 			bptr += 32;
    277 			len -= 16;
    278 		} while (len >= 16);
    279 		break;
    280 
    281 		case 0:
    282 		do {
    283 			register u_int32_t *src = (u_int32_t*)from;
    284 			register u_int32_t *dst = (u_int32_t*)bptr;
    285 			register u_int32_t t0, t1, t2, t3;
    286 
    287 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
    288 			dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
    289 
    290 			from += 16;
    291 			bptr += 32;
    292 			len -= 16;
    293 		} while (len >= 16);
    294 		break;
    295 
    296 		default:
    297 		/* Does odd-aligned case ever happen? */
    298 		do {
    299 			bcopy(from, bptr, 16);
    300 			from += 16;
    301 			bptr += 32;
    302 			len -= 16;
    303 		} while (len >= 16);
    304 		break;
    305 	}
    306 	if (len)
    307 		bcopy(from, bptr, len);
    308 }
    309 
    310 void
    311 le_ioasic_copyfrombuf_gap16(sc, tov, boff, len)
    312 	struct lance_softc *sc;
    313 	void *tov;
    314 	int boff, len;
    315 {
    316 	volatile caddr_t buf = sc->sc_mem;
    317 	register caddr_t to = tov;
    318 	register caddr_t bptr;
    319 
    320 	bptr = buf + ((boff << 1) & ~0x1f);
    321 	boff &= 0xf;
    322 
    323 	/* Dispose of boff. source of copy is subsequently 16-byte aligned. */
    324 	if (boff) {
    325 		register int xfer;
    326 		xfer = min(len, 16 - boff);
    327 		bcopy(bptr+boff, to, xfer);
    328 		to += xfer;
    329 		bptr += 32;
    330 		len -= xfer;
    331 	}
    332 	if (len >= 16)
    333 	switch ((u_long)to & (sizeof(u_int32_t) -1)) {
    334 	case 2:
    335 		/*
    336 		 * to is aligned to an odd 16-bit boundary.  Ethernet headers
    337 		 * make this the dominant case (98% or more).
    338 		 */
    339 		do {
    340 			register u_int32_t *src = (u_int32_t*)bptr;
    341 			register u_int32_t t0, t1, t2, t3;
    342 
    343 			/* read from uncached aligned DMA buf */
    344 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
    345 
    346 			/* write to odd-16-bit-word aligned dst */
    347 			*(u_int16_t *) (to+0)  = (u_short)  t0;
    348 			*(u_int32_t *) (to+2)  = (t0 >> 16) |  (t1 << 16);
    349 			*(u_int32_t *) (to+6)  = (t1 >> 16) |  (t2 << 16);
    350 			*(u_int32_t *) (to+10) = (t2 >> 16) |  (t3 << 16);
    351 			*(u_int16_t *) (to+14) = (t3 >> 16);
    352 			bptr += 32;
    353 			to += 16;
    354 			len -= 16;
    355 		} while (len > 16);
    356 		break;
    357 	case 0:
    358 		/* 32-bit aligned aligned copy. Rare. */
    359 		do {
    360 			register u_int32_t *src = (u_int32_t*)bptr;
    361 			register u_int32_t *dst = (u_int32_t*)to;
    362 			register u_int32_t t0, t1, t2, t3;
    363 
    364 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
    365 			dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
    366 			to += 16;
    367 			bptr += 32;
    368 			len -= 16;
    369 		} while (len  > 16);
    370 		break;
    371 
    372 	/* XXX Does odd-byte-aligned case ever happen? */
    373 	default:
    374 		do {
    375 			bcopy(bptr, to, 16);
    376 			to += 16;
    377 			bptr += 32;
    378 			len -= 16;
    379 		} while (len  > 16);
    380 		break;
    381 	}
    382 	if (len)
    383 		bcopy(bptr, to, len);
    384 }
    385 
    386 void
    387 le_ioasic_zerobuf_gap16(sc, boff, len)
    388 	struct lance_softc *sc;
    389 	int boff, len;
    390 {
    391 	volatile caddr_t buf = sc->sc_mem;
    392 	register caddr_t bptr;
    393 	register int xfer;
    394 
    395 	bptr = buf + ((boff << 1) & ~0x1f);
    396 	boff &= 0xf;
    397 	xfer = min(len, 16 - boff);
    398 	while (len > 0) {
    399 		bzero(bptr + boff, xfer);
    400 		bptr += 32;
    401 		boff = 0;
    402 		len -= xfer;
    403 		xfer = min(len, 16);
    404 	}
    405 }
    406 
    407 #define	LE_IOASIC_MEMSIZE	(128*1024)
    408 #define	LE_IOASIC_MEMALIGN	(128*1024)
    409 
    410 void
    411 ioasic_lance_dma_setup(parent)
    412 	struct device *parent;
    413 {
    414 	struct ioasic_softc *sc = (void *)parent;
    415 	bus_dma_tag_t dmat = sc->sc_dmat;
    416 	bus_dma_segment_t seg;
    417 	tc_addr_t tca;
    418 	u_int32_t ssr;
    419 	int rseg;
    420 
    421 	/*
    422 	 * Allocate a DMA area for the chip.
    423 	 */
    424 	if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
    425 	    0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
    426 		printf("%s: can't allocate DMA area for LANCE\n",
    427 		    sc->sc_dv.dv_xname);
    428 		return;
    429 	}
    430 	if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
    431 	    &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
    432 		printf("%s: can't map DMA area for LANCE\n",
    433 		    sc->sc_dv.dv_xname);
    434 		bus_dmamem_free(dmat, &seg, rseg);
    435 		return;
    436 	}
    437 
    438 	/*
    439 	 * Create and load the DMA map for the DMA area.
    440 	 */
    441 	if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
    442 	    LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_lance_dmam)) {
    443 		printf("%s: can't create DMA map\n", sc->sc_dv.dv_xname);
    444 		goto bad;
    445 	}
    446 	if (bus_dmamap_load(dmat, sc->sc_lance_dmam,
    447 	    le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
    448 		printf("%s: can't load DMA map\n", sc->sc_dv.dv_xname);
    449 		goto bad;
    450 	}
    451 
    452 	tca = (tc_addr_t)sc->sc_lance_dmam->dm_segs[0].ds_addr;
    453 	tca = ((tca << 3) & ~0x1f) | ((tca >> 29) & 0x1f);
    454 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, IOASIC_LANCE_DMAPTR, tca);
    455 	ssr = bus_space_read_4(sc->sc_bst, sc->sc_bsh, IOASIC_CSR);
    456 	ssr |= IOASIC_CSR_DMAEN_LANCE;
    457 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, IOASIC_CSR, ssr);
    458 	return;
    459 
    460  bad:
    461 	bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
    462 	bus_dmamem_free(dmat, &seg, rseg);
    463 	le_iomem = 0;
    464 }
    465 
    466 /* XXX */
    467 char *
    468 ioasic_lance_ether_address()
    469 {
    470 
    471         return (char *)(ioasic_base + IOASIC_SLOT_2_START);
    472 }
    473