if_le_ioasic.c revision 1.16 1 /* $NetBSD: if_le_ioasic.c,v 1.16 2000/07/11 04:10:25 nisimura Exp $ */
2
3 /*
4 * Copyright (c) 1996 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30 /*
31 * LANCE on DEC IOCTL ASIC.
32 */
33
34 #include <sys/cdefs.h> /* RCS ID & macro defns */
35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.16 2000/07/11 04:10:25 nisimura Exp $");
36
37 #include "opt_inet.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mbuf.h>
42 #include <sys/syslog.h>
43 #include <sys/socket.h>
44 #include <sys/device.h>
45
46 #include <net/if.h>
47 #include <net/if_ether.h>
48 #include <net/if_media.h>
49
50 #ifdef INET
51 #include <netinet/in.h>
52 #include <netinet/if_inarp.h>
53 #endif
54
55 #include <dev/ic/lancereg.h>
56 #include <dev/ic/lancevar.h>
57 #include <dev/ic/am7990reg.h>
58 #include <dev/ic/am7990var.h>
59
60 #include <dev/tc/if_levar.h>
61 #include <dev/tc/tcvar.h>
62 #include <dev/tc/ioasicreg.h>
63 #include <dev/tc/ioasicvar.h>
64
65 struct le_ioasic_softc {
66 struct am7990_softc sc_am7990; /* glue to MI code */
67 struct lereg1 *sc_r1; /* LANCE registers */
68 /* XXX must match with le_softc of if_levar.h XXX */
69
70 bus_dma_tag_t sc_dmat; /* bus dma tag */
71 bus_dmamap_t sc_dmamap; /* bus dmamap */
72 };
73
74 static int le_ioasic_match __P((struct device *, struct cfdata *, void *));
75 static void le_ioasic_attach __P((struct device *, struct device *, void *));
76
77 struct cfattach le_ioasic_ca = {
78 sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach
79 };
80
81 static void le_ioasic_copytobuf_gap2 __P((struct lance_softc *, void *,
82 int, int));
83 static void le_ioasic_copyfrombuf_gap2 __P((struct lance_softc *, void *,
84 int, int));
85 static void le_ioasic_copytobuf_gap16 __P((struct lance_softc *, void *,
86 int, int));
87 static void le_ioasic_copyfrombuf_gap16 __P((struct lance_softc *, void *,
88 int, int));
89 static void le_ioasic_zerobuf_gap16 __P((struct lance_softc *, int, int));
90
91 static int
92 le_ioasic_match(parent, match, aux)
93 struct device *parent;
94 struct cfdata *match;
95 void *aux;
96 {
97 struct ioasicdev_attach_args *d = aux;
98
99 if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0)
100 return 0;
101
102 return 1;
103 }
104
105 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */
106 #define LE_IOASIC_MEMSIZE (128*1024)
107 #define LE_IOASIC_MEMALIGN (128*1024)
108
109 static void
110 le_ioasic_attach(parent, self, aux)
111 struct device *parent, *self;
112 void *aux;
113 {
114 struct le_ioasic_softc *sc = (void *)self;
115 struct ioasicdev_attach_args *d = aux;
116 struct lance_softc *le = &sc->sc_am7990.lsc;
117 bus_space_tag_t ioasic_bst;
118 bus_space_handle_t ioasic_bsh;
119 bus_dma_tag_t dmat;
120 bus_dma_segment_t seg;
121 tc_addr_t tca;
122 u_int32_t ssr;
123 int rseg;
124 caddr_t le_iomem;
125
126 ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst;
127 ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh;
128 dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat;
129 /*
130 * Allocate a DMA area for the chip.
131 */
132 if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
133 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
134 printf("can't allocate DMA area for LANCE\n");
135 return;
136 }
137 if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
138 &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
139 printf("can't map DMA area for LANCE\n");
140 bus_dmamem_free(dmat, &seg, rseg);
141 return;
142 }
143 /*
144 * Create and load the DMA map for the DMA area.
145 */
146 if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
147 LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
148 printf("can't create DMA map\n");
149 goto bad;
150 }
151 if (bus_dmamap_load(dmat, sc->sc_dmamap,
152 le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
153 printf("can't load DMA map\n");
154 goto bad;
155 }
156 /*
157 * Bind 128KB buffer with IOASIC DMA.
158 */
159 tca = (tc_addr_t)sc->sc_dmamap->dm_segs[0].ds_addr;
160 tca = ((tca << 3) & ~0x1f) | ((tca >> 29) & 0x1f);
161 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca);
162 ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR);
163 ssr |= IOASIC_CSR_DMAEN_LANCE;
164 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr);
165
166 sc->sc_r1 = (struct lereg1 *)
167 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
168 le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
169 le->sc_copytodesc = le_ioasic_copytobuf_gap2;
170 le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
171 le->sc_copytobuf = le_ioasic_copytobuf_gap16;
172 le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
173 le->sc_zerobuf = le_ioasic_zerobuf_gap16;
174
175 dec_le_common_attach(&sc->sc_am7990,
176 (u_char *)((struct ioasic_softc *)parent)->sc_base
177 + IOASIC_SLOT_2_START);
178
179 ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
180 am7990_intr, sc);
181 return;
182
183 bad:
184 bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
185 bus_dmamem_free(dmat, &seg, rseg);
186 }
187
188 /*
189 * Special memory access functions needed by ioasic-attached LANCE
190 * chips.
191 */
192
193 /*
194 * gap2: two bytes of data followed by two bytes of pad.
195 *
196 * Buffers must be 4-byte aligned. The code doesn't worry about
197 * doing an extra byte.
198 */
199
200 void
201 le_ioasic_copytobuf_gap2(sc, fromv, boff, len)
202 struct lance_softc *sc;
203 void *fromv;
204 int boff;
205 int len;
206 {
207 volatile caddr_t buf = sc->sc_mem;
208 caddr_t from = fromv;
209 volatile u_int16_t *bptr;
210
211 if (boff & 0x1) {
212 /* handle unaligned first byte */
213 bptr = ((volatile u_int16_t *)buf) + (boff - 1);
214 *bptr = (*from++ << 8) | (*bptr & 0xff);
215 bptr += 2;
216 len--;
217 } else
218 bptr = ((volatile u_int16_t *)buf) + boff;
219 while (len > 1) {
220 *bptr = (from[1] << 8) | (from[0] & 0xff);
221 bptr += 2;
222 from += 2;
223 len -= 2;
224 }
225 if (len == 1)
226 *bptr = (u_int16_t)*from;
227 }
228
229 void
230 le_ioasic_copyfrombuf_gap2(sc, tov, boff, len)
231 struct lance_softc *sc;
232 void *tov;
233 int boff, len;
234 {
235 volatile caddr_t buf = sc->sc_mem;
236 caddr_t to = tov;
237 volatile u_int16_t *bptr;
238 u_int16_t tmp;
239
240 if (boff & 0x1) {
241 /* handle unaligned first byte */
242 bptr = ((volatile u_int16_t *)buf) + (boff - 1);
243 *to++ = (*bptr >> 8) & 0xff;
244 bptr += 2;
245 len--;
246 } else
247 bptr = ((volatile u_int16_t *)buf) + boff;
248 while (len > 1) {
249 tmp = *bptr;
250 *to++ = tmp & 0xff;
251 *to++ = (tmp >> 8) & 0xff;
252 bptr += 2;
253 len -= 2;
254 }
255 if (len == 1)
256 *to = *bptr & 0xff;
257 }
258
259 /*
260 * gap16: 16 bytes of data followed by 16 bytes of pad.
261 *
262 * Buffers must be 32-byte aligned.
263 */
264
265 void
266 le_ioasic_copytobuf_gap16(sc, fromv, boff, len)
267 struct lance_softc *sc;
268 void *fromv;
269 int boff;
270 int len;
271 {
272 volatile caddr_t buf = sc->sc_mem;
273 caddr_t from = fromv;
274 caddr_t bptr;
275
276 bptr = buf + ((boff << 1) & ~0x1f);
277 boff &= 0xf;
278
279 /*
280 * Dispose of boff so destination of subsequent copies is
281 * 16-byte aligned.
282 */
283 if (boff) {
284 int xfer;
285 xfer = min(len, 16 - boff);
286 bcopy(from, bptr + boff, xfer);
287 from += xfer;
288 bptr += 32;
289 len -= xfer;
290 }
291
292 /* Destination of copies is now 16-byte aligned. */
293 if (len >= 16)
294 switch ((u_long)from & (sizeof(u_int32_t) -1)) {
295 case 2:
296 /* Ethernet headers make this the dominant case. */
297 do {
298 u_int32_t *dst = (u_int32_t*)bptr;
299 u_int16_t t0;
300 u_int32_t t1, t2, t3, t4;
301
302 /* read from odd-16-bit-aligned, cached src */
303 t0 = *(u_int16_t*)from;
304 t1 = *(u_int32_t*)(from+2);
305 t2 = *(u_int32_t*)(from+6);
306 t3 = *(u_int32_t*)(from+10);
307 t4 = *(u_int16_t*)(from+14);
308
309 /* DMA buffer is uncached on mips */
310 dst[0] = t0 | (t1 << 16);
311 dst[1] = (t1 >> 16) | (t2 << 16);
312 dst[2] = (t2 >> 16) | (t3 << 16);
313 dst[3] = (t3 >> 16) | (t4 << 16);
314
315 from += 16;
316 bptr += 32;
317 len -= 16;
318 } while (len >= 16);
319 break;
320
321 case 0:
322 do {
323 u_int32_t *src = (u_int32_t*)from;
324 u_int32_t *dst = (u_int32_t*)bptr;
325 u_int32_t t0, t1, t2, t3;
326
327 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
328 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
329
330 from += 16;
331 bptr += 32;
332 len -= 16;
333 } while (len >= 16);
334 break;
335
336 default:
337 /* Does odd-aligned case ever happen? */
338 do {
339 bcopy(from, bptr, 16);
340 from += 16;
341 bptr += 32;
342 len -= 16;
343 } while (len >= 16);
344 break;
345 }
346 if (len)
347 bcopy(from, bptr, len);
348 }
349
350 void
351 le_ioasic_copyfrombuf_gap16(sc, tov, boff, len)
352 struct lance_softc *sc;
353 void *tov;
354 int boff, len;
355 {
356 volatile caddr_t buf = sc->sc_mem;
357 caddr_t to = tov;
358 caddr_t bptr;
359
360 bptr = buf + ((boff << 1) & ~0x1f);
361 boff &= 0xf;
362
363 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */
364 if (boff) {
365 int xfer;
366 xfer = min(len, 16 - boff);
367 bcopy(bptr+boff, to, xfer);
368 to += xfer;
369 bptr += 32;
370 len -= xfer;
371 }
372 if (len >= 16)
373 switch ((u_long)to & (sizeof(u_int32_t) -1)) {
374 case 2:
375 /*
376 * to is aligned to an odd 16-bit boundary. Ethernet headers
377 * make this the dominant case (98% or more).
378 */
379 do {
380 u_int32_t *src = (u_int32_t*)bptr;
381 u_int32_t t0, t1, t2, t3;
382
383 /* read from uncached aligned DMA buf */
384 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
385
386 /* write to odd-16-bit-word aligned dst */
387 *(u_int16_t *) (to+0) = (u_short) t0;
388 *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16);
389 *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16);
390 *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16);
391 *(u_int16_t *) (to+14) = (t3 >> 16);
392 bptr += 32;
393 to += 16;
394 len -= 16;
395 } while (len > 16);
396 break;
397 case 0:
398 /* 32-bit aligned aligned copy. Rare. */
399 do {
400 u_int32_t *src = (u_int32_t*)bptr;
401 u_int32_t *dst = (u_int32_t*)to;
402 u_int32_t t0, t1, t2, t3;
403
404 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
405 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
406 to += 16;
407 bptr += 32;
408 len -= 16;
409 } while (len > 16);
410 break;
411
412 /* XXX Does odd-byte-aligned case ever happen? */
413 default:
414 do {
415 bcopy(bptr, to, 16);
416 to += 16;
417 bptr += 32;
418 len -= 16;
419 } while (len > 16);
420 break;
421 }
422 if (len)
423 bcopy(bptr, to, len);
424 }
425
426 void
427 le_ioasic_zerobuf_gap16(sc, boff, len)
428 struct lance_softc *sc;
429 int boff, len;
430 {
431 volatile caddr_t buf = sc->sc_mem;
432 caddr_t bptr;
433 int xfer;
434
435 bptr = buf + ((boff << 1) & ~0x1f);
436 boff &= 0xf;
437 xfer = min(len, 16 - boff);
438 while (len > 0) {
439 bzero(bptr + boff, xfer);
440 bptr += 32;
441 boff = 0;
442 len -= xfer;
443 xfer = min(len, 16);
444 }
445 }
446