isadma_machdep.c revision 1.6 1 /* $NetBSD: isadma_machdep.c,v 1.6 2003/07/15 03:36:01 lukem Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: isadma_machdep.c,v 1.6 2003/07/15 03:36:01 lukem Exp $");
42
43 #define ISA_DMA_STATS
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/syslog.h>
48 #include <sys/device.h>
49 #include <sys/malloc.h>
50 #include <sys/proc.h>
51 #include <sys/mbuf.h>
52
53 #define _ARM32_BUS_DMA_PRIVATE
54 #include <machine/bus.h>
55
56 #include <dev/isa/isareg.h>
57 #include <dev/isa/isavar.h>
58
59 #include <uvm/uvm_extern.h>
60
61 /*
62 * ISA has a 24-bit address limitation, so at most it has a 16M
63 * DMA range. However, some platforms have a more limited range,
64 * e.g. the Shark NC. On these systems, we are provided with
65 * a set of DMA ranges. The pmap module is aware of these ranges
66 * and places DMA-safe memory for them onto an alternate free list
67 * so that they are protected from being used to service page faults,
68 * etc. (unless we've run out of memory elsewhere).
69 */
70 extern struct arm32_dma_range *shark_isa_dma_ranges;
71 extern int shark_isa_dma_nranges;
72
73 int _isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
74 bus_size_t, bus_size_t, int, bus_dmamap_t *));
75 void _isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
76 int _isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
77 bus_size_t, struct proc *, int));
78 int _isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
79 struct mbuf *, int));
80 int _isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
81 struct uio *, int));
82 int _isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
83 bus_dma_segment_t *, int, bus_size_t, int));
84 void _isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
85 void _isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
86 bus_addr_t, bus_size_t, int));
87
88 int _isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
89 bus_size_t, bus_dma_segment_t *, int, int *, int));
90
91 int _isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
92 bus_size_t, int));
93 void _isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
94
95 /*
96 * Entry points for ISA DMA. These are mostly wrappers around
97 * the generic functions that understand how to deal with bounce
98 * buffers, if necessary.
99 */
100 struct arm32_bus_dma_tag isa_bus_dma_tag = {
101 0, /* _ranges */
102 0, /* _nranges */
103 _isa_bus_dmamap_create,
104 _isa_bus_dmamap_destroy,
105 _isa_bus_dmamap_load,
106 _isa_bus_dmamap_load_mbuf,
107 _isa_bus_dmamap_load_uio,
108 _isa_bus_dmamap_load_raw,
109 _isa_bus_dmamap_unload,
110 _isa_bus_dmamap_sync, /* pre */
111 _isa_bus_dmamap_sync, /* post */
112 _isa_bus_dmamem_alloc,
113 _bus_dmamem_free,
114 _bus_dmamem_map,
115 _bus_dmamem_unmap,
116 _bus_dmamem_mmap,
117 };
118
119 /*
120 * Initialize ISA DMA.
121 */
122 void
123 isa_dma_init()
124 {
125
126 isa_bus_dma_tag._ranges = shark_isa_dma_ranges;
127 isa_bus_dma_tag._nranges = shark_isa_dma_nranges;
128 }
129
130 /**********************************************************************
131 * bus.h dma interface entry points
132 **********************************************************************/
133
134 #ifdef ISA_DMA_STATS
135 #define STAT_INCR(v) (v)++
136 #define STAT_DECR(v) do { \
137 if ((v) == 0) \
138 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
139 else \
140 (v)--; \
141 } while (0)
142 u_long isa_dma_stats_loads;
143 u_long isa_dma_stats_bounces;
144 u_long isa_dma_stats_nbouncebufs;
145 #else
146 #define STAT_INCR(v)
147 #define STAT_DECR(v)
148 #endif
149
150 /*
151 * Create an ISA DMA map.
152 */
153 int
154 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
155 bus_dma_tag_t t;
156 bus_size_t size;
157 int nsegments;
158 bus_size_t maxsegsz;
159 bus_size_t boundary;
160 int flags;
161 bus_dmamap_t *dmamp;
162 {
163 struct arm32_isa_dma_cookie *cookie;
164 bus_dmamap_t map;
165 int error, cookieflags;
166 void *cookiestore;
167 size_t cookiesize;
168
169 /* Call common function to create the basic map. */
170 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
171 flags, dmamp);
172 if (error)
173 return (error);
174
175 map = *dmamp;
176 map->_dm_cookie = NULL;
177
178 cookiesize = sizeof(struct arm32_isa_dma_cookie);
179
180 /*
181 * ISA only has 24-bits of address space. This means
182 * we can't DMA to pages over 16M. In order to DMA to
183 * arbitrary buffers, we use "bounce buffers" - pages
184 * in memory below the 16M boundary. On DMA reads,
185 * DMA happens to the bounce buffers, and is copied into
186 * the caller's buffer. On writes, data is copied into
187 * but bounce buffer, and the DMA happens from those
188 * pages. To software using the DMA mapping interface,
189 * this looks simply like a data cache.
190 *
191 * If we have more than 16M of RAM in the system, we may
192 * need bounce buffers. We check and remember that here.
193 *
194 * There are exceptions, however. VLB devices can do
195 * 32-bit DMA, and indicate that here.
196 *
197 * ...or, there is an opposite case. The most segments
198 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
199 * the caller can't handle that many segments (e.g. the
200 * ISA DMA controller), we may have to bounce it as well.
201 *
202 * Well, not really... see note above regarding DMA ranges.
203 * Because of the range issue on this platform, we just
204 * always "might bounce".
205 */
206 cookieflags = ID_MIGHT_NEED_BOUNCE;
207 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
208
209 /*
210 * Allocate our cookie.
211 */
212 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
213 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
214 error = ENOMEM;
215 goto out;
216 }
217 memset(cookiestore, 0, cookiesize);
218 cookie = (struct arm32_isa_dma_cookie *)cookiestore;
219 cookie->id_flags = cookieflags;
220 map->_dm_cookie = cookie;
221
222 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
223 /*
224 * Allocate the bounce pages now if the caller
225 * wishes us to do so.
226 */
227 if ((flags & BUS_DMA_ALLOCNOW) == 0)
228 goto out;
229
230 error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
231 }
232
233 out:
234 if (error) {
235 if (map->_dm_cookie != NULL)
236 free(map->_dm_cookie, M_DMAMAP);
237 _bus_dmamap_destroy(t, map);
238 }
239 return (error);
240 }
241
242 /*
243 * Destroy an ISA DMA map.
244 */
245 void
246 _isa_bus_dmamap_destroy(t, map)
247 bus_dma_tag_t t;
248 bus_dmamap_t map;
249 {
250 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
251
252 /*
253 * Free any bounce pages this map might hold.
254 */
255 if (cookie->id_flags & ID_HAS_BOUNCE)
256 _isa_dma_free_bouncebuf(t, map);
257
258 free(cookie, M_DMAMAP);
259 _bus_dmamap_destroy(t, map);
260 }
261
262 /*
263 * Load an ISA DMA map with a linear buffer.
264 */
265 int
266 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
267 bus_dma_tag_t t;
268 bus_dmamap_t map;
269 void *buf;
270 bus_size_t buflen;
271 struct proc *p;
272 int flags;
273 {
274 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
275 int error;
276
277 STAT_INCR(isa_dma_stats_loads);
278
279 /*
280 * Make sure that on error condition we return "no valid mappings."
281 */
282 map->dm_mapsize = 0;
283 map->dm_nsegs = 0;
284
285 /*
286 * Try to load the map the normal way. If this errors out,
287 * and we can bounce, we will.
288 */
289 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
290 if (error == 0 ||
291 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
292 return (error);
293
294 /*
295 * First attempt failed; bounce it.
296 */
297
298 STAT_INCR(isa_dma_stats_bounces);
299
300 /*
301 * Allocate bounce pages, if necessary.
302 */
303 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
304 error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
305 if (error)
306 return (error);
307 }
308
309 /*
310 * Cache a pointer to the caller's buffer and load the DMA map
311 * with the bounce buffer.
312 */
313 cookie->id_origbuf = buf;
314 cookie->id_origbuflen = buflen;
315 cookie->id_buftype = ID_BUFTYPE_LINEAR;
316 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
317 NULL, flags);
318 if (error) {
319 /*
320 * Free the bounce pages, unless our resources
321 * are reserved for our exclusive use.
322 */
323 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
324 _isa_dma_free_bouncebuf(t, map);
325 return (error);
326 }
327
328 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
329 cookie->id_flags |= ID_IS_BOUNCING;
330 return (0);
331 }
332
333 /*
334 * Like _isa_bus_dmamap_load(), but for mbufs.
335 */
336 int
337 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
338 bus_dma_tag_t t;
339 bus_dmamap_t map;
340 struct mbuf *m0;
341 int flags;
342 {
343 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
344 int error;
345
346 /*
347 * Make sure that on error condition we return "no valid mappings."
348 */
349 map->dm_mapsize = 0;
350 map->dm_nsegs = 0;
351
352 #ifdef DIAGNOSTIC
353 if ((m0->m_flags & M_PKTHDR) == 0)
354 panic("_isa_bus_dmamap_load_mbuf: no packet header");
355 #endif
356
357 if (m0->m_pkthdr.len > map->_dm_size)
358 return (EINVAL);
359
360 /*
361 * Try to load the map the normal way. If this errors out,
362 * and we can bounce, we will.
363 */
364 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
365 if (error == 0 ||
366 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
367 return (error);
368
369 /*
370 * First attempt failed; bounce it.
371 */
372
373 STAT_INCR(isa_dma_stats_bounces);
374
375 /*
376 * Allocate bounce pages, if necessary.
377 */
378 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
379 error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
380 flags);
381 if (error)
382 return (error);
383 }
384
385 /*
386 * Cache a pointer to the caller's buffer and load the DMA map
387 * with the bounce buffer.
388 */
389 cookie->id_origbuf = m0;
390 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
391 cookie->id_buftype = ID_BUFTYPE_MBUF;
392 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
393 m0->m_pkthdr.len, NULL, flags);
394 if (error) {
395 /*
396 * Free the bounce pages, unless our resources
397 * are reserved for our exclusive use.
398 */
399 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
400 _isa_dma_free_bouncebuf(t, map);
401 return (error);
402 }
403
404 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
405 cookie->id_flags |= ID_IS_BOUNCING;
406 return (0);
407 }
408
409 /*
410 * Like _isa_bus_dmamap_load(), but for uios.
411 */
412 int
413 _isa_bus_dmamap_load_uio(t, map, uio, flags)
414 bus_dma_tag_t t;
415 bus_dmamap_t map;
416 struct uio *uio;
417 int flags;
418 {
419
420 panic("_isa_bus_dmamap_load_uio: not implemented");
421 }
422
423 /*
424 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
425 * bus_dmamem_alloc().
426 */
427 int
428 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
429 bus_dma_tag_t t;
430 bus_dmamap_t map;
431 bus_dma_segment_t *segs;
432 int nsegs;
433 bus_size_t size;
434 int flags;
435 {
436
437 panic("_isa_bus_dmamap_load_raw: not implemented");
438 }
439
440 /*
441 * Unload an ISA DMA map.
442 */
443 void
444 _isa_bus_dmamap_unload(t, map)
445 bus_dma_tag_t t;
446 bus_dmamap_t map;
447 {
448 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
449
450 /*
451 * If we have bounce pages, free them, unless they're
452 * reserved for our exclusive use.
453 */
454 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
455 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
456 _isa_dma_free_bouncebuf(t, map);
457
458 cookie->id_flags &= ~ID_IS_BOUNCING;
459 cookie->id_buftype = ID_BUFTYPE_INVALID;
460
461 /*
462 * Do the generic bits of the unload.
463 */
464 _bus_dmamap_unload(t, map);
465 }
466
467 /*
468 * Synchronize an ISA DMA map.
469 */
470 void
471 _isa_bus_dmamap_sync(t, map, offset, len, ops)
472 bus_dma_tag_t t;
473 bus_dmamap_t map;
474 bus_addr_t offset;
475 bus_size_t len;
476 int ops;
477 {
478 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
479
480 /*
481 * Mixing PRE and POST operations is not allowed.
482 */
483 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
484 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
485 panic("_isa_bus_dmamap_sync: mix PRE and POST");
486
487 #ifdef DIAGNOSTIC
488 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
489 if (offset >= map->dm_mapsize)
490 panic("_isa_bus_dmamap_sync: bad offset");
491 if (len == 0 || (offset + len) > map->dm_mapsize)
492 panic("_isa_bus_dmamap_sync: bad length");
493 }
494 #endif
495
496 /*
497 * If we're not bouncing, just return; nothing to do.
498 */
499 if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
500 return;
501
502 switch (cookie->id_buftype) {
503 case ID_BUFTYPE_LINEAR:
504 /*
505 * Nothing to do for pre-read.
506 */
507
508 if (ops & BUS_DMASYNC_PREWRITE) {
509 /*
510 * Copy the caller's buffer to the bounce buffer.
511 */
512 memcpy((char *)cookie->id_bouncebuf + offset,
513 (char *)cookie->id_origbuf + offset, len);
514 }
515
516 if (ops & BUS_DMASYNC_POSTREAD) {
517 /*
518 * Copy the bounce buffer to the caller's buffer.
519 */
520 memcpy((char *)cookie->id_origbuf + offset,
521 (char *)cookie->id_bouncebuf + offset, len);
522 }
523
524 /*
525 * Nothing to do for post-write.
526 */
527 break;
528
529 case ID_BUFTYPE_MBUF:
530 {
531 struct mbuf *m, *m0 = cookie->id_origbuf;
532 bus_size_t minlen, moff;
533
534 /*
535 * Nothing to do for pre-read.
536 */
537
538 if (ops & BUS_DMASYNC_PREWRITE) {
539 /*
540 * Copy the caller's buffer to the bounce buffer.
541 */
542 m_copydata(m0, offset, len,
543 (char *)cookie->id_bouncebuf + offset);
544 }
545
546 if (ops & BUS_DMASYNC_POSTREAD) {
547 /*
548 * Copy the bounce buffer to the caller's buffer.
549 */
550 for (moff = offset, m = m0; m != NULL && len != 0;
551 m = m->m_next) {
552 /* Find the beginning mbuf. */
553 if (moff >= m->m_len) {
554 moff -= m->m_len;
555 continue;
556 }
557
558 /*
559 * Now at the first mbuf to sync; nail
560 * each one until we have exhausted the
561 * length.
562 */
563 minlen = len < m->m_len - moff ?
564 len : m->m_len - moff;
565
566 memcpy(mtod(m, caddr_t) + moff,
567 (char *)cookie->id_bouncebuf + offset,
568 minlen);
569
570 moff = 0;
571 len -= minlen;
572 offset += minlen;
573 }
574 }
575
576 /*
577 * Nothing to do for post-write.
578 */
579 break;
580 }
581
582 case ID_BUFTYPE_UIO:
583 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
584 break;
585
586 case ID_BUFTYPE_RAW:
587 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
588 break;
589
590 case ID_BUFTYPE_INVALID:
591 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
592 break;
593
594 default:
595 printf("unknown buffer type %d\n", cookie->id_buftype);
596 panic("_isa_bus_dmamap_sync");
597 }
598 }
599
600 /*
601 * Allocate memory safe for ISA DMA.
602 */
603 int
604 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
605 bus_dma_tag_t t;
606 bus_size_t size, alignment, boundary;
607 bus_dma_segment_t *segs;
608 int nsegs;
609 int *rsegs;
610 int flags;
611 {
612
613 if (t->_ranges == NULL)
614 return (ENOMEM);
615
616 /* _bus_dmamem_alloc() does the range checks for us. */
617 return (_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs,
618 rsegs, flags));
619 }
620
621 /**********************************************************************
622 * ISA DMA utility functions
623 **********************************************************************/
624
625 int
626 _isa_dma_alloc_bouncebuf(t, map, size, flags)
627 bus_dma_tag_t t;
628 bus_dmamap_t map;
629 bus_size_t size;
630 int flags;
631 {
632 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
633 int error = 0;
634
635 cookie->id_bouncebuflen = round_page(size);
636 error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
637 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
638 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
639 if (error)
640 goto out;
641 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
642 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
643 (caddr_t *)&cookie->id_bouncebuf, flags);
644
645 out:
646 if (error) {
647 _bus_dmamem_free(t, cookie->id_bouncesegs,
648 cookie->id_nbouncesegs);
649 cookie->id_bouncebuflen = 0;
650 cookie->id_nbouncesegs = 0;
651 } else {
652 cookie->id_flags |= ID_HAS_BOUNCE;
653 STAT_INCR(isa_dma_stats_nbouncebufs);
654 }
655
656 return (error);
657 }
658
659 void
660 _isa_dma_free_bouncebuf(t, map)
661 bus_dma_tag_t t;
662 bus_dmamap_t map;
663 {
664 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
665
666 STAT_DECR(isa_dma_stats_nbouncebufs);
667
668 _bus_dmamem_unmap(t, cookie->id_bouncebuf,
669 cookie->id_bouncebuflen);
670 _bus_dmamem_free(t, cookie->id_bouncesegs,
671 cookie->id_nbouncesegs);
672 cookie->id_bouncebuflen = 0;
673 cookie->id_nbouncesegs = 0;
674 cookie->id_flags &= ~ID_HAS_BOUNCE;
675 }
676