Home | History | Annotate | Line # | Download | only in tc
if_le_ioasic.c revision 1.18
      1 /*	$NetBSD: if_le_ioasic.c,v 1.18 2001/11/13 06:26:10 lukem Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1996 Carnegie-Mellon University.
      5  * All rights reserved.
      6  *
      7  * Author: Chris G. Demetriou
      8  *
      9  * Permission to use, copy, modify and distribute this software and
     10  * its documentation is hereby granted, provided that both the copyright
     11  * notice and this permission notice appear in all copies of the
     12  * software, derivative works or modified versions, and any portions
     13  * thereof, and that both notices appear in supporting documentation.
     14  *
     15  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     16  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     17  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     18  *
     19  * Carnegie Mellon requests users of this software to return to
     20  *
     21  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     22  *  School of Computer Science
     23  *  Carnegie Mellon University
     24  *  Pittsburgh PA 15213-3890
     25  *
     26  * any improvements or extensions that they make and grant Carnegie the
     27  * rights to redistribute these changes.
     28  */
     29 
     30 /*
     31  * LANCE on DEC IOCTL ASIC.
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.18 2001/11/13 06:26:10 lukem Exp $");
     36 
     37 #include "opt_inet.h"
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/mbuf.h>
     42 #include <sys/syslog.h>
     43 #include <sys/socket.h>
     44 #include <sys/device.h>
     45 
     46 #include <net/if.h>
     47 #include <net/if_ether.h>
     48 #include <net/if_media.h>
     49 
     50 #ifdef INET
     51 #include <netinet/in.h>
     52 #include <netinet/if_inarp.h>
     53 #endif
     54 
     55 #include <dev/ic/lancereg.h>
     56 #include <dev/ic/lancevar.h>
     57 #include <dev/ic/am7990reg.h>
     58 #include <dev/ic/am7990var.h>
     59 
     60 #include <dev/tc/if_levar.h>
     61 #include <dev/tc/tcvar.h>
     62 #include <dev/tc/ioasicreg.h>
     63 #include <dev/tc/ioasicvar.h>
     64 
     65 struct le_ioasic_softc {
     66 	struct	am7990_softc sc_am7990;	/* glue to MI code */
     67 	struct	lereg1 *sc_r1;		/* LANCE registers */
     68 	/* XXX must match with le_softc of if_levar.h XXX */
     69 
     70 	bus_dma_tag_t sc_dmat;		/* bus dma tag */
     71 	bus_dmamap_t sc_dmamap;		/* bus dmamap */
     72 };
     73 
     74 static int  le_ioasic_match __P((struct device *, struct cfdata *, void *));
     75 static void le_ioasic_attach __P((struct device *, struct device *, void *));
     76 
     77 struct cfattach le_ioasic_ca = {
     78 	sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach
     79 };
     80 
     81 static void le_ioasic_copytobuf_gap2 __P((struct lance_softc *, void *,
     82 	    int, int));
     83 static void le_ioasic_copyfrombuf_gap2 __P((struct lance_softc *, void *,
     84 	    int, int));
     85 static void le_ioasic_copytobuf_gap16 __P((struct lance_softc *, void *,
     86 	    int, int));
     87 static void le_ioasic_copyfrombuf_gap16 __P((struct lance_softc *, void *,
     88 	    int, int));
     89 static void le_ioasic_zerobuf_gap16 __P((struct lance_softc *, int, int));
     90 
     91 static int
     92 le_ioasic_match(parent, match, aux)
     93 	struct device *parent;
     94 	struct cfdata *match;
     95 	void *aux;
     96 {
     97 	struct ioasicdev_attach_args *d = aux;
     98 
     99 	if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0)
    100 		return 0;
    101 
    102 	return 1;
    103 }
    104 
    105 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */
    106 #define	LE_IOASIC_MEMSIZE	(128*1024)
    107 #define	LE_IOASIC_MEMALIGN	(128*1024)
    108 
    109 static void
    110 le_ioasic_attach(parent, self, aux)
    111 	struct device *parent, *self;
    112 	void *aux;
    113 {
    114 	struct le_ioasic_softc *sc = (void *)self;
    115 	struct ioasicdev_attach_args *d = aux;
    116 	struct lance_softc *le = &sc->sc_am7990.lsc;
    117 	bus_space_tag_t ioasic_bst;
    118 	bus_space_handle_t ioasic_bsh;
    119 	bus_dma_tag_t dmat;
    120 	bus_dma_segment_t seg;
    121 	tc_addr_t tca;
    122 	u_int32_t ssr;
    123 	int rseg;
    124 	caddr_t le_iomem;
    125 
    126 	ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst;
    127 	ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh;
    128 	dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat;
    129 	/*
    130 	 * Allocate a DMA area for the chip.
    131 	 */
    132 	if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
    133 	    0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
    134 		printf("can't allocate DMA area for LANCE\n");
    135 		return;
    136 	}
    137 	if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
    138 	    &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
    139 		printf("can't map DMA area for LANCE\n");
    140 		bus_dmamem_free(dmat, &seg, rseg);
    141 		return;
    142 	}
    143 	/*
    144 	 * Create and load the DMA map for the DMA area.
    145 	 */
    146 	if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
    147 	    LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
    148 		printf("can't create DMA map\n");
    149 		goto bad;
    150 	}
    151 	if (bus_dmamap_load(dmat, sc->sc_dmamap,
    152 	    le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
    153 		printf("can't load DMA map\n");
    154 		goto bad;
    155 	}
    156 	/*
    157 	 * Bind 128KB buffer with IOASIC DMA.
    158 	 */
    159 	tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr);
    160 	bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca);
    161 	ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR);
    162 	ssr |= IOASIC_CSR_DMAEN_LANCE;
    163 	bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr);
    164 
    165 	sc->sc_r1 = (struct lereg1 *)
    166 		TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
    167 	le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
    168 	le->sc_copytodesc = le_ioasic_copytobuf_gap2;
    169 	le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
    170 	le->sc_copytobuf = le_ioasic_copytobuf_gap16;
    171 	le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
    172 	le->sc_zerobuf = le_ioasic_zerobuf_gap16;
    173 
    174 	dec_le_common_attach(&sc->sc_am7990,
    175 	    (u_char *)((struct ioasic_softc *)parent)->sc_base
    176 	        + IOASIC_SLOT_2_START);
    177 
    178 	ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
    179 	    am7990_intr, sc);
    180 	return;
    181 
    182  bad:
    183 	bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
    184 	bus_dmamem_free(dmat, &seg, rseg);
    185 }
    186 
    187 /*
    188  * Special memory access functions needed by ioasic-attached LANCE
    189  * chips.
    190  */
    191 
    192 /*
    193  * gap2: two bytes of data followed by two bytes of pad.
    194  *
    195  * Buffers must be 4-byte aligned.  The code doesn't worry about
    196  * doing an extra byte.
    197  */
    198 
    199 void
    200 le_ioasic_copytobuf_gap2(sc, fromv, boff, len)
    201 	struct lance_softc *sc;
    202 	void *fromv;
    203 	int boff;
    204 	int len;
    205 {
    206 	volatile caddr_t buf = sc->sc_mem;
    207 	caddr_t from = fromv;
    208 	volatile u_int16_t *bptr;
    209 
    210 	if (boff & 0x1) {
    211 		/* handle unaligned first byte */
    212 		bptr = ((volatile u_int16_t *)buf) + (boff - 1);
    213 		*bptr = (*from++ << 8) | (*bptr & 0xff);
    214 		bptr += 2;
    215 		len--;
    216 	} else
    217 		bptr = ((volatile u_int16_t *)buf) + boff;
    218 	while (len > 1) {
    219 		*bptr = (from[1] << 8) | (from[0] & 0xff);
    220 		bptr += 2;
    221 		from += 2;
    222 		len -= 2;
    223 	}
    224 	if (len == 1)
    225 		*bptr = (u_int16_t)*from;
    226 }
    227 
    228 void
    229 le_ioasic_copyfrombuf_gap2(sc, tov, boff, len)
    230 	struct lance_softc *sc;
    231 	void *tov;
    232 	int boff, len;
    233 {
    234 	volatile caddr_t buf = sc->sc_mem;
    235 	caddr_t to = tov;
    236 	volatile u_int16_t *bptr;
    237 	u_int16_t tmp;
    238 
    239 	if (boff & 0x1) {
    240 		/* handle unaligned first byte */
    241 		bptr = ((volatile u_int16_t *)buf) + (boff - 1);
    242 		*to++ = (*bptr >> 8) & 0xff;
    243 		bptr += 2;
    244 		len--;
    245 	} else
    246 		bptr = ((volatile u_int16_t *)buf) + boff;
    247 	while (len > 1) {
    248 		tmp = *bptr;
    249 		*to++ = tmp & 0xff;
    250 		*to++ = (tmp >> 8) & 0xff;
    251 		bptr += 2;
    252 		len -= 2;
    253 	}
    254 	if (len == 1)
    255 		*to = *bptr & 0xff;
    256 }
    257 
    258 /*
    259  * gap16: 16 bytes of data followed by 16 bytes of pad.
    260  *
    261  * Buffers must be 32-byte aligned.
    262  */
    263 
    264 void
    265 le_ioasic_copytobuf_gap16(sc, fromv, boff, len)
    266 	struct lance_softc *sc;
    267 	void *fromv;
    268 	int boff;
    269 	int len;
    270 {
    271 	volatile caddr_t buf = sc->sc_mem;
    272 	caddr_t from = fromv;
    273 	caddr_t bptr;
    274 
    275 	bptr = buf + ((boff << 1) & ~0x1f);
    276 	boff &= 0xf;
    277 
    278 	/*
    279 	 * Dispose of boff so destination of subsequent copies is
    280 	 * 16-byte aligned.
    281 	 */
    282 	if (boff) {
    283 		int xfer;
    284 		xfer = min(len, 16 - boff);
    285 		bcopy(from, bptr + boff, xfer);
    286 		from += xfer;
    287 		bptr += 32;
    288 		len -= xfer;
    289 	}
    290 
    291 	/* Destination of  copies is now 16-byte aligned. */
    292 	if (len >= 16)
    293 		switch ((u_long)from & (sizeof(u_int32_t) -1)) {
    294 		case 2:
    295 			/*  Ethernet headers make this the dominant case. */
    296 		do {
    297 			u_int32_t *dst = (u_int32_t*)bptr;
    298 			u_int16_t t0;
    299 			u_int32_t t1,  t2, t3, t4;
    300 
    301 			/* read from odd-16-bit-aligned, cached src */
    302 			t0 = *(u_int16_t*)from;
    303 			t1 = *(u_int32_t*)(from+2);
    304 			t2 = *(u_int32_t*)(from+6);
    305 			t3 = *(u_int32_t*)(from+10);
    306 			t4 = *(u_int16_t*)(from+14);
    307 
    308 			/* DMA buffer is uncached on mips */
    309 			dst[0] =         t0 |  (t1 << 16);
    310 			dst[1] = (t1 >> 16) |  (t2 << 16);
    311 			dst[2] = (t2 >> 16) |  (t3 << 16);
    312 			dst[3] = (t3 >> 16) |  (t4 << 16);
    313 
    314 			from += 16;
    315 			bptr += 32;
    316 			len -= 16;
    317 		} while (len >= 16);
    318 		break;
    319 
    320 		case 0:
    321 		do {
    322 			u_int32_t *src = (u_int32_t*)from;
    323 			u_int32_t *dst = (u_int32_t*)bptr;
    324 			u_int32_t t0, t1, t2, t3;
    325 
    326 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
    327 			dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
    328 
    329 			from += 16;
    330 			bptr += 32;
    331 			len -= 16;
    332 		} while (len >= 16);
    333 		break;
    334 
    335 		default:
    336 		/* Does odd-aligned case ever happen? */
    337 		do {
    338 			bcopy(from, bptr, 16);
    339 			from += 16;
    340 			bptr += 32;
    341 			len -= 16;
    342 		} while (len >= 16);
    343 		break;
    344 	}
    345 	if (len)
    346 		bcopy(from, bptr, len);
    347 }
    348 
    349 void
    350 le_ioasic_copyfrombuf_gap16(sc, tov, boff, len)
    351 	struct lance_softc *sc;
    352 	void *tov;
    353 	int boff, len;
    354 {
    355 	volatile caddr_t buf = sc->sc_mem;
    356 	caddr_t to = tov;
    357 	caddr_t bptr;
    358 
    359 	bptr = buf + ((boff << 1) & ~0x1f);
    360 	boff &= 0xf;
    361 
    362 	/* Dispose of boff. source of copy is subsequently 16-byte aligned. */
    363 	if (boff) {
    364 		int xfer;
    365 		xfer = min(len, 16 - boff);
    366 		bcopy(bptr+boff, to, xfer);
    367 		to += xfer;
    368 		bptr += 32;
    369 		len -= xfer;
    370 	}
    371 	if (len >= 16)
    372 	switch ((u_long)to & (sizeof(u_int32_t) -1)) {
    373 	case 2:
    374 		/*
    375 		 * to is aligned to an odd 16-bit boundary.  Ethernet headers
    376 		 * make this the dominant case (98% or more).
    377 		 */
    378 		do {
    379 			u_int32_t *src = (u_int32_t*)bptr;
    380 			u_int32_t t0, t1, t2, t3;
    381 
    382 			/* read from uncached aligned DMA buf */
    383 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
    384 
    385 			/* write to odd-16-bit-word aligned dst */
    386 			*(u_int16_t *) (to+0)  = (u_short)  t0;
    387 			*(u_int32_t *) (to+2)  = (t0 >> 16) |  (t1 << 16);
    388 			*(u_int32_t *) (to+6)  = (t1 >> 16) |  (t2 << 16);
    389 			*(u_int32_t *) (to+10) = (t2 >> 16) |  (t3 << 16);
    390 			*(u_int16_t *) (to+14) = (t3 >> 16);
    391 			bptr += 32;
    392 			to += 16;
    393 			len -= 16;
    394 		} while (len > 16);
    395 		break;
    396 	case 0:
    397 		/* 32-bit aligned aligned copy. Rare. */
    398 		do {
    399 			u_int32_t *src = (u_int32_t*)bptr;
    400 			u_int32_t *dst = (u_int32_t*)to;
    401 			u_int32_t t0, t1, t2, t3;
    402 
    403 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
    404 			dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
    405 			to += 16;
    406 			bptr += 32;
    407 			len -= 16;
    408 		} while (len  > 16);
    409 		break;
    410 
    411 	/* XXX Does odd-byte-aligned case ever happen? */
    412 	default:
    413 		do {
    414 			bcopy(bptr, to, 16);
    415 			to += 16;
    416 			bptr += 32;
    417 			len -= 16;
    418 		} while (len  > 16);
    419 		break;
    420 	}
    421 	if (len)
    422 		bcopy(bptr, to, len);
    423 }
    424 
    425 void
    426 le_ioasic_zerobuf_gap16(sc, boff, len)
    427 	struct lance_softc *sc;
    428 	int boff, len;
    429 {
    430 	volatile caddr_t buf = sc->sc_mem;
    431 	caddr_t bptr;
    432 	int xfer;
    433 
    434 	bptr = buf + ((boff << 1) & ~0x1f);
    435 	boff &= 0xf;
    436 	xfer = min(len, 16 - boff);
    437 	while (len > 0) {
    438 		bzero(bptr + boff, xfer);
    439 		bptr += 32;
    440 		boff = 0;
    441 		len -= xfer;
    442 		xfer = min(len, 16);
    443 	}
    444 }
    445