if_le_ioasic.c revision 1.22 1 /* $NetBSD: if_le_ioasic.c,v 1.22 2005/02/04 02:10:48 perry Exp $ */
2
3 /*
4 * Copyright (c) 1996 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30 /*
31 * LANCE on DEC IOCTL ASIC.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.22 2005/02/04 02:10:48 perry Exp $");
36
37 #include "opt_inet.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mbuf.h>
42 #include <sys/syslog.h>
43 #include <sys/socket.h>
44 #include <sys/device.h>
45
46 #include <net/if.h>
47 #include <net/if_ether.h>
48 #include <net/if_media.h>
49
50 #ifdef INET
51 #include <netinet/in.h>
52 #include <netinet/if_inarp.h>
53 #endif
54
55 #include <dev/ic/lancereg.h>
56 #include <dev/ic/lancevar.h>
57 #include <dev/ic/am7990reg.h>
58 #include <dev/ic/am7990var.h>
59
60 #include <dev/tc/if_levar.h>
61 #include <dev/tc/tcvar.h>
62 #include <dev/tc/ioasicreg.h>
63 #include <dev/tc/ioasicvar.h>
64
65 struct le_ioasic_softc {
66 struct am7990_softc sc_am7990; /* glue to MI code */
67 struct lereg1 *sc_r1; /* LANCE registers */
68 /* XXX must match with le_softc of if_levar.h XXX */
69
70 bus_dma_tag_t sc_dmat; /* bus dma tag */
71 bus_dmamap_t sc_dmamap; /* bus dmamap */
72 };
73
74 static int le_ioasic_match(struct device *, struct cfdata *, void *);
75 static void le_ioasic_attach(struct device *, struct device *, void *);
76
77 CFATTACH_DECL(le_ioasic, sizeof(struct le_softc),
78 le_ioasic_match, le_ioasic_attach, NULL, NULL);
79
80 static void le_ioasic_copytobuf_gap2(struct lance_softc *, void *, int, int);
81 static void le_ioasic_copyfrombuf_gap2(struct lance_softc *, void *, int, int);
82 static void le_ioasic_copytobuf_gap16(struct lance_softc *, void *, int, int);
83 static void le_ioasic_copyfrombuf_gap16(struct lance_softc *, void *,
84 int, int);
85 static void le_ioasic_zerobuf_gap16(struct lance_softc *, int, int);
86
87 static int
88 le_ioasic_match(parent, match, aux)
89 struct device *parent;
90 struct cfdata *match;
91 void *aux;
92 {
93 struct ioasicdev_attach_args *d = aux;
94
95 if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0)
96 return 0;
97
98 return 1;
99 }
100
101 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */
102 #define LE_IOASIC_MEMSIZE (128*1024)
103 #define LE_IOASIC_MEMALIGN (128*1024)
104
105 static void
106 le_ioasic_attach(parent, self, aux)
107 struct device *parent, *self;
108 void *aux;
109 {
110 struct le_ioasic_softc *sc = (void *)self;
111 struct ioasicdev_attach_args *d = aux;
112 struct lance_softc *le = &sc->sc_am7990.lsc;
113 bus_space_tag_t ioasic_bst;
114 bus_space_handle_t ioasic_bsh;
115 bus_dma_tag_t dmat;
116 bus_dma_segment_t seg;
117 tc_addr_t tca;
118 u_int32_t ssr;
119 int rseg;
120 caddr_t le_iomem;
121
122 ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst;
123 ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh;
124 dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat;
125 /*
126 * Allocate a DMA area for the chip.
127 */
128 if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
129 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
130 printf("can't allocate DMA area for LANCE\n");
131 return;
132 }
133 if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
134 &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
135 printf("can't map DMA area for LANCE\n");
136 bus_dmamem_free(dmat, &seg, rseg);
137 return;
138 }
139 /*
140 * Create and load the DMA map for the DMA area.
141 */
142 if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
143 LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
144 printf("can't create DMA map\n");
145 goto bad;
146 }
147 if (bus_dmamap_load(dmat, sc->sc_dmamap,
148 le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
149 printf("can't load DMA map\n");
150 goto bad;
151 }
152 /*
153 * Bind 128KB buffer with IOASIC DMA.
154 */
155 tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr);
156 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca);
157 ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR);
158 ssr |= IOASIC_CSR_DMAEN_LANCE;
159 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr);
160
161 sc->sc_r1 = (struct lereg1 *)
162 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
163 le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
164 le->sc_copytodesc = le_ioasic_copytobuf_gap2;
165 le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
166 le->sc_copytobuf = le_ioasic_copytobuf_gap16;
167 le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
168 le->sc_zerobuf = le_ioasic_zerobuf_gap16;
169
170 dec_le_common_attach(&sc->sc_am7990,
171 (u_char *)((struct ioasic_softc *)parent)->sc_base
172 + IOASIC_SLOT_2_START);
173
174 ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
175 am7990_intr, sc);
176 return;
177
178 bad:
179 bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
180 bus_dmamem_free(dmat, &seg, rseg);
181 }
182
183 /*
184 * Special memory access functions needed by ioasic-attached LANCE
185 * chips.
186 */
187
188 /*
189 * gap2: two bytes of data followed by two bytes of pad.
190 *
191 * Buffers must be 4-byte aligned. The code doesn't worry about
192 * doing an extra byte.
193 */
194
195 void
196 le_ioasic_copytobuf_gap2(sc, fromv, boff, len)
197 struct lance_softc *sc;
198 void *fromv;
199 int boff;
200 int len;
201 {
202 volatile caddr_t buf = sc->sc_mem;
203 caddr_t from = fromv;
204 volatile u_int16_t *bptr;
205
206 if (boff & 0x1) {
207 /* handle unaligned first byte */
208 bptr = ((volatile u_int16_t *)buf) + (boff - 1);
209 *bptr = (*from++ << 8) | (*bptr & 0xff);
210 bptr += 2;
211 len--;
212 } else
213 bptr = ((volatile u_int16_t *)buf) + boff;
214 while (len > 1) {
215 *bptr = (from[1] << 8) | (from[0] & 0xff);
216 bptr += 2;
217 from += 2;
218 len -= 2;
219 }
220 if (len == 1)
221 *bptr = (u_int16_t)*from;
222 }
223
224 void
225 le_ioasic_copyfrombuf_gap2(sc, tov, boff, len)
226 struct lance_softc *sc;
227 void *tov;
228 int boff, len;
229 {
230 volatile caddr_t buf = sc->sc_mem;
231 caddr_t to = tov;
232 volatile u_int16_t *bptr;
233 u_int16_t tmp;
234
235 if (boff & 0x1) {
236 /* handle unaligned first byte */
237 bptr = ((volatile u_int16_t *)buf) + (boff - 1);
238 *to++ = (*bptr >> 8) & 0xff;
239 bptr += 2;
240 len--;
241 } else
242 bptr = ((volatile u_int16_t *)buf) + boff;
243 while (len > 1) {
244 tmp = *bptr;
245 *to++ = tmp & 0xff;
246 *to++ = (tmp >> 8) & 0xff;
247 bptr += 2;
248 len -= 2;
249 }
250 if (len == 1)
251 *to = *bptr & 0xff;
252 }
253
254 /*
255 * gap16: 16 bytes of data followed by 16 bytes of pad.
256 *
257 * Buffers must be 32-byte aligned.
258 */
259
260 void
261 le_ioasic_copytobuf_gap16(sc, fromv, boff, len)
262 struct lance_softc *sc;
263 void *fromv;
264 int boff;
265 int len;
266 {
267 volatile caddr_t buf = sc->sc_mem;
268 caddr_t from = fromv;
269 caddr_t bptr;
270
271 bptr = buf + ((boff << 1) & ~0x1f);
272 boff &= 0xf;
273
274 /*
275 * Dispose of boff so destination of subsequent copies is
276 * 16-byte aligned.
277 */
278 if (boff) {
279 int xfer;
280 xfer = min(len, 16 - boff);
281 bcopy(from, bptr + boff, xfer);
282 from += xfer;
283 bptr += 32;
284 len -= xfer;
285 }
286
287 /* Destination of copies is now 16-byte aligned. */
288 if (len >= 16)
289 switch ((u_long)from & (sizeof(u_int32_t) -1)) {
290 case 2:
291 /* Ethernet headers make this the dominant case. */
292 do {
293 u_int32_t *dst = (u_int32_t*)bptr;
294 u_int16_t t0;
295 u_int32_t t1, t2, t3, t4;
296
297 /* read from odd-16-bit-aligned, cached src */
298 t0 = *(u_int16_t*)from;
299 t1 = *(u_int32_t*)(from+2);
300 t2 = *(u_int32_t*)(from+6);
301 t3 = *(u_int32_t*)(from+10);
302 t4 = *(u_int16_t*)(from+14);
303
304 /* DMA buffer is uncached on mips */
305 dst[0] = t0 | (t1 << 16);
306 dst[1] = (t1 >> 16) | (t2 << 16);
307 dst[2] = (t2 >> 16) | (t3 << 16);
308 dst[3] = (t3 >> 16) | (t4 << 16);
309
310 from += 16;
311 bptr += 32;
312 len -= 16;
313 } while (len >= 16);
314 break;
315
316 case 0:
317 do {
318 u_int32_t *src = (u_int32_t*)from;
319 u_int32_t *dst = (u_int32_t*)bptr;
320 u_int32_t t0, t1, t2, t3;
321
322 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
323 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
324
325 from += 16;
326 bptr += 32;
327 len -= 16;
328 } while (len >= 16);
329 break;
330
331 default:
332 /* Does odd-aligned case ever happen? */
333 do {
334 bcopy(from, bptr, 16);
335 from += 16;
336 bptr += 32;
337 len -= 16;
338 } while (len >= 16);
339 break;
340 }
341 if (len)
342 bcopy(from, bptr, len);
343 }
344
345 void
346 le_ioasic_copyfrombuf_gap16(sc, tov, boff, len)
347 struct lance_softc *sc;
348 void *tov;
349 int boff, len;
350 {
351 volatile caddr_t buf = sc->sc_mem;
352 caddr_t to = tov;
353 caddr_t bptr;
354
355 bptr = buf + ((boff << 1) & ~0x1f);
356 boff &= 0xf;
357
358 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */
359 if (boff) {
360 int xfer;
361 xfer = min(len, 16 - boff);
362 bcopy(bptr+boff, to, xfer);
363 to += xfer;
364 bptr += 32;
365 len -= xfer;
366 }
367 if (len >= 16)
368 switch ((u_long)to & (sizeof(u_int32_t) -1)) {
369 case 2:
370 /*
371 * to is aligned to an odd 16-bit boundary. Ethernet headers
372 * make this the dominant case (98% or more).
373 */
374 do {
375 u_int32_t *src = (u_int32_t*)bptr;
376 u_int32_t t0, t1, t2, t3;
377
378 /* read from uncached aligned DMA buf */
379 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
380
381 /* write to odd-16-bit-word aligned dst */
382 *(u_int16_t *) (to+0) = (u_short) t0;
383 *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16);
384 *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16);
385 *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16);
386 *(u_int16_t *) (to+14) = (t3 >> 16);
387 bptr += 32;
388 to += 16;
389 len -= 16;
390 } while (len > 16);
391 break;
392 case 0:
393 /* 32-bit aligned aligned copy. Rare. */
394 do {
395 u_int32_t *src = (u_int32_t*)bptr;
396 u_int32_t *dst = (u_int32_t*)to;
397 u_int32_t t0, t1, t2, t3;
398
399 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
400 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
401 to += 16;
402 bptr += 32;
403 len -= 16;
404 } while (len > 16);
405 break;
406
407 /* XXX Does odd-byte-aligned case ever happen? */
408 default:
409 do {
410 bcopy(bptr, to, 16);
411 to += 16;
412 bptr += 32;
413 len -= 16;
414 } while (len > 16);
415 break;
416 }
417 if (len)
418 bcopy(bptr, to, len);
419 }
420
421 void
422 le_ioasic_zerobuf_gap16(sc, boff, len)
423 struct lance_softc *sc;
424 int boff, len;
425 {
426 volatile caddr_t buf = sc->sc_mem;
427 caddr_t bptr;
428 int xfer;
429
430 bptr = buf + ((boff << 1) & ~0x1f);
431 boff &= 0xf;
432 xfer = min(len, 16 - boff);
433 while (len > 0) {
434 bzero(bptr + boff, xfer);
435 bptr += 32;
436 boff = 0;
437 len -= xfer;
438 xfer = min(len, 16);
439 }
440 }
441