if_le_ioasic.c revision 1.27 1 /* $NetBSD: if_le_ioasic.c,v 1.27 2006/03/31 17:39:33 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1996 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30 /*
31 * LANCE on DEC IOCTL ASIC.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.27 2006/03/31 17:39:33 thorpej Exp $");
36
37 #include "opt_inet.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mbuf.h>
42 #include <sys/syslog.h>
43 #include <sys/socket.h>
44 #include <sys/device.h>
45
46 #include <net/if.h>
47 #include <net/if_ether.h>
48 #include <net/if_media.h>
49
50 #ifdef INET
51 #include <netinet/in.h>
52 #include <netinet/if_inarp.h>
53 #endif
54
55 #include <dev/ic/lancereg.h>
56 #include <dev/ic/lancevar.h>
57 #include <dev/ic/am7990reg.h>
58 #include <dev/ic/am7990var.h>
59
60 #include <dev/tc/if_levar.h>
61 #include <dev/tc/tcvar.h>
62 #include <dev/tc/ioasicreg.h>
63 #include <dev/tc/ioasicvar.h>
64
65 struct le_ioasic_softc {
66 struct am7990_softc sc_am7990; /* glue to MI code */
67 struct lereg1 *sc_r1; /* LANCE registers */
68 /* XXX must match with le_softc of if_levar.h XXX */
69
70 bus_dma_tag_t sc_dmat; /* bus dma tag */
71 bus_dmamap_t sc_dmamap; /* bus dmamap */
72 };
73
74 static int le_ioasic_match(struct device *, struct cfdata *, void *);
75 static void le_ioasic_attach(struct device *, struct device *, void *);
76
77 CFATTACH_DECL(le_ioasic, sizeof(struct le_softc),
78 le_ioasic_match, le_ioasic_attach, NULL, NULL);
79
80 static void le_ioasic_copytobuf_gap2(struct lance_softc *, void *, int, int);
81 static void le_ioasic_copyfrombuf_gap2(struct lance_softc *, void *, int, int);
82 static void le_ioasic_copytobuf_gap16(struct lance_softc *, void *, int, int);
83 static void le_ioasic_copyfrombuf_gap16(struct lance_softc *, void *,
84 int, int);
85 static void le_ioasic_zerobuf_gap16(struct lance_softc *, int, int);
86
87 static int
88 le_ioasic_match(struct device *parent, struct cfdata *match, void *aux)
89 {
90 struct ioasicdev_attach_args *d = aux;
91
92 if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0)
93 return 0;
94
95 return 1;
96 }
97
98 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */
99 #define LE_IOASIC_MEMSIZE (128*1024)
100 #define LE_IOASIC_MEMALIGN (128*1024)
101
102 static void
103 le_ioasic_attach(struct device *parent, struct device *self, void *aux)
104 {
105 struct le_ioasic_softc *sc = device_private(self);
106 struct ioasicdev_attach_args *d = aux;
107 struct lance_softc *le = &sc->sc_am7990.lsc;
108 struct ioasic_softc *iosc = device_private(parent);
109 bus_space_tag_t ioasic_bst;
110 bus_space_handle_t ioasic_bsh;
111 bus_dma_tag_t dmat;
112 bus_dma_segment_t seg;
113 tc_addr_t tca;
114 u_int32_t ssr;
115 int rseg;
116 caddr_t le_iomem;
117
118 ioasic_bst = iosc->sc_bst;
119 ioasic_bsh = iosc->sc_bsh;
120 dmat = sc->sc_dmat = iosc->sc_dmat;
121 /*
122 * Allocate a DMA area for the chip.
123 */
124 if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
125 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
126 printf("can't allocate DMA area for LANCE\n");
127 return;
128 }
129 if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
130 &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
131 printf("can't map DMA area for LANCE\n");
132 bus_dmamem_free(dmat, &seg, rseg);
133 return;
134 }
135 /*
136 * Create and load the DMA map for the DMA area.
137 */
138 if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
139 LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
140 printf("can't create DMA map\n");
141 goto bad;
142 }
143 if (bus_dmamap_load(dmat, sc->sc_dmamap,
144 le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
145 printf("can't load DMA map\n");
146 goto bad;
147 }
148 /*
149 * Bind 128KB buffer with IOASIC DMA.
150 */
151 tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr);
152 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca);
153 ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR);
154 ssr |= IOASIC_CSR_DMAEN_LANCE;
155 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr);
156
157 sc->sc_r1 = (struct lereg1 *)
158 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
159 le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
160 le->sc_copytodesc = le_ioasic_copytobuf_gap2;
161 le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
162 le->sc_copytobuf = le_ioasic_copytobuf_gap16;
163 le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
164 le->sc_zerobuf = le_ioasic_zerobuf_gap16;
165
166 dec_le_common_attach(&sc->sc_am7990,
167 (u_char *)iosc->sc_base + IOASIC_SLOT_2_START);
168
169 ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
170 am7990_intr, sc);
171 return;
172
173 bad:
174 bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
175 bus_dmamem_free(dmat, &seg, rseg);
176 }
177
178 /*
179 * Special memory access functions needed by ioasic-attached LANCE
180 * chips.
181 */
182
183 /*
184 * gap2: two bytes of data followed by two bytes of pad.
185 *
186 * Buffers must be 4-byte aligned. The code doesn't worry about
187 * doing an extra byte.
188 */
189
190 void
191 le_ioasic_copytobuf_gap2(struct lance_softc *sc, void *fromv, int boff, int len)
192 {
193 volatile caddr_t buf = sc->sc_mem;
194 caddr_t from = fromv;
195 volatile u_int16_t *bptr;
196
197 if (boff & 0x1) {
198 /* handle unaligned first byte */
199 bptr = ((volatile u_int16_t *)buf) + (boff - 1);
200 *bptr = (*from++ << 8) | (*bptr & 0xff);
201 bptr += 2;
202 len--;
203 } else
204 bptr = ((volatile u_int16_t *)buf) + boff;
205 while (len > 1) {
206 *bptr = (from[1] << 8) | (from[0] & 0xff);
207 bptr += 2;
208 from += 2;
209 len -= 2;
210 }
211 if (len == 1)
212 *bptr = (u_int16_t)*from;
213 }
214
215 void
216 le_ioasic_copyfrombuf_gap2(struct lance_softc *sc, void *tov, int boff, int len)
217 {
218 volatile caddr_t buf = sc->sc_mem;
219 caddr_t to = tov;
220 volatile u_int16_t *bptr;
221 u_int16_t tmp;
222
223 if (boff & 0x1) {
224 /* handle unaligned first byte */
225 bptr = ((volatile u_int16_t *)buf) + (boff - 1);
226 *to++ = (*bptr >> 8) & 0xff;
227 bptr += 2;
228 len--;
229 } else
230 bptr = ((volatile u_int16_t *)buf) + boff;
231 while (len > 1) {
232 tmp = *bptr;
233 *to++ = tmp & 0xff;
234 *to++ = (tmp >> 8) & 0xff;
235 bptr += 2;
236 len -= 2;
237 }
238 if (len == 1)
239 *to = *bptr & 0xff;
240 }
241
242 /*
243 * gap16: 16 bytes of data followed by 16 bytes of pad.
244 *
245 * Buffers must be 32-byte aligned.
246 */
247
248 void
249 le_ioasic_copytobuf_gap16(struct lance_softc *sc, void *fromv, int boff,
250 int len)
251 {
252 volatile caddr_t buf = sc->sc_mem;
253 caddr_t from = fromv;
254 caddr_t bptr;
255
256 bptr = buf + ((boff << 1) & ~0x1f);
257 boff &= 0xf;
258
259 /*
260 * Dispose of boff so destination of subsequent copies is
261 * 16-byte aligned.
262 */
263 if (boff) {
264 int xfer;
265 xfer = min(len, 16 - boff);
266 bcopy(from, bptr + boff, xfer);
267 from += xfer;
268 bptr += 32;
269 len -= xfer;
270 }
271
272 /* Destination of copies is now 16-byte aligned. */
273 if (len >= 16)
274 switch ((u_long)from & (sizeof(u_int32_t) -1)) {
275 case 2:
276 /* Ethernet headers make this the dominant case. */
277 do {
278 u_int32_t *dst = (u_int32_t*)bptr;
279 u_int16_t t0;
280 u_int32_t t1, t2, t3, t4;
281
282 /* read from odd-16-bit-aligned, cached src */
283 t0 = *(u_int16_t*)from;
284 t1 = *(u_int32_t*)(from+2);
285 t2 = *(u_int32_t*)(from+6);
286 t3 = *(u_int32_t*)(from+10);
287 t4 = *(u_int16_t*)(from+14);
288
289 /* DMA buffer is uncached on mips */
290 dst[0] = t0 | (t1 << 16);
291 dst[1] = (t1 >> 16) | (t2 << 16);
292 dst[2] = (t2 >> 16) | (t3 << 16);
293 dst[3] = (t3 >> 16) | (t4 << 16);
294
295 from += 16;
296 bptr += 32;
297 len -= 16;
298 } while (len >= 16);
299 break;
300
301 case 0:
302 do {
303 u_int32_t *src = (u_int32_t*)from;
304 u_int32_t *dst = (u_int32_t*)bptr;
305 u_int32_t t0, t1, t2, t3;
306
307 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
308 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
309
310 from += 16;
311 bptr += 32;
312 len -= 16;
313 } while (len >= 16);
314 break;
315
316 default:
317 /* Does odd-aligned case ever happen? */
318 do {
319 bcopy(from, bptr, 16);
320 from += 16;
321 bptr += 32;
322 len -= 16;
323 } while (len >= 16);
324 break;
325 }
326 if (len)
327 bcopy(from, bptr, len);
328 }
329
330 void
331 le_ioasic_copyfrombuf_gap16(struct lance_softc *sc, void *tov, int boff,
332 int len)
333 {
334 volatile caddr_t buf = sc->sc_mem;
335 caddr_t to = tov;
336 caddr_t bptr;
337
338 bptr = buf + ((boff << 1) & ~0x1f);
339 boff &= 0xf;
340
341 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */
342 if (boff) {
343 int xfer;
344 xfer = min(len, 16 - boff);
345 bcopy(bptr+boff, to, xfer);
346 to += xfer;
347 bptr += 32;
348 len -= xfer;
349 }
350 if (len >= 16)
351 switch ((u_long)to & (sizeof(u_int32_t) -1)) {
352 case 2:
353 /*
354 * to is aligned to an odd 16-bit boundary. Ethernet headers
355 * make this the dominant case (98% or more).
356 */
357 do {
358 u_int32_t *src = (u_int32_t*)bptr;
359 u_int32_t t0, t1, t2, t3;
360
361 /* read from uncached aligned DMA buf */
362 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
363
364 /* write to odd-16-bit-word aligned dst */
365 *(u_int16_t *) (to+0) = (u_short) t0;
366 *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16);
367 *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16);
368 *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16);
369 *(u_int16_t *) (to+14) = (t3 >> 16);
370 bptr += 32;
371 to += 16;
372 len -= 16;
373 } while (len > 16);
374 break;
375 case 0:
376 /* 32-bit aligned aligned copy. Rare. */
377 do {
378 u_int32_t *src = (u_int32_t*)bptr;
379 u_int32_t *dst = (u_int32_t*)to;
380 u_int32_t t0, t1, t2, t3;
381
382 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
383 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
384 to += 16;
385 bptr += 32;
386 len -= 16;
387 } while (len > 16);
388 break;
389
390 /* XXX Does odd-byte-aligned case ever happen? */
391 default:
392 do {
393 bcopy(bptr, to, 16);
394 to += 16;
395 bptr += 32;
396 len -= 16;
397 } while (len > 16);
398 break;
399 }
400 if (len)
401 bcopy(bptr, to, len);
402 }
403
404 void
405 le_ioasic_zerobuf_gap16(struct lance_softc *sc, int boff, int len)
406 {
407 volatile caddr_t buf = sc->sc_mem;
408 caddr_t bptr;
409 int xfer;
410
411 bptr = buf + ((boff << 1) & ~0x1f);
412 boff &= 0xf;
413 xfer = min(len, 16 - boff);
414 while (len > 0) {
415 bzero(bptr + boff, xfer);
416 bptr += 32;
417 boff = 0;
418 len -= xfer;
419 xfer = min(len, 16);
420 }
421 }
422