isadma_machdep.c revision 1.3 1 /* $NetBSD: isadma_machdep.c,v 1.3 2002/08/17 20:46:26 thorpej Exp $ */
2
3 #define ISA_DMA_STATS
4
5 /*-
6 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
11 * NASA Ames Research Center.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the NetBSD
24 * Foundation, Inc. and its contributors.
25 * 4. Neither the name of The NetBSD Foundation nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/syslog.h>
45 #include <sys/device.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/mbuf.h>
49
50 #define _ARM32_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52
53 #include <dev/isa/isareg.h>
54 #include <dev/isa/isavar.h>
55
56 #include <uvm/uvm_extern.h>
57
58 /*
59 * ISA has a 24-bit address limitation, so at most it has a 16M
60 * DMA range. However, some platforms have a more limited range,
61 * e.g. the Shark NC. On these systems, we are provided with
62 * a set of DMA ranges. The pmap module is aware of these ranges
63 * and places DMA-safe memory for them onto an alternate free list
64 * so that they are protected from being used to service page faults,
65 * etc. (unless we've run out of memory elsewhere).
66 */
67 #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024)
68
69 struct arm32_dma_range *footbridge_isa_dma_ranges;
70 int footbridge_isa_dma_nranges;
71
72 int _isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
73 bus_size_t, bus_size_t, int, bus_dmamap_t *));
74 void _isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
75 int _isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
76 bus_size_t, struct proc *, int));
77 int _isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
78 struct mbuf *, int));
79 int _isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
80 struct uio *, int));
81 int _isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
82 bus_dma_segment_t *, int, bus_size_t, int));
83 void _isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
84 void _isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
85 bus_addr_t, bus_size_t, int));
86
87 int _isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
88 bus_size_t, bus_dma_segment_t *, int, int *, int));
89
90 int _isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
91 bus_size_t, int));
92 void _isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
93
94 /*
95 * Entry points for ISA DMA. These are mostly wrappers around
96 * the generic functions that understand how to deal with bounce
97 * buffers, if necessary.
98 */
99 struct arm32_bus_dma_tag isa_bus_dma_tag = {
100 0, /* _ranges */
101 0, /* _nranges */
102 _isa_bus_dmamap_create,
103 _isa_bus_dmamap_destroy,
104 _isa_bus_dmamap_load,
105 _isa_bus_dmamap_load_mbuf,
106 _isa_bus_dmamap_load_uio,
107 _isa_bus_dmamap_load_raw,
108 _isa_bus_dmamap_unload,
109 _isa_bus_dmamap_sync, /* pre */
110 _isa_bus_dmamap_sync, /* post */
111 _isa_bus_dmamem_alloc,
112 _bus_dmamem_free,
113 _bus_dmamem_map,
114 _bus_dmamem_unmap,
115 _bus_dmamem_mmap,
116 };
117
118 /*
119 * Initialize ISA DMA.
120 */
121 void
122 isa_dma_init()
123 {
124
125 isa_bus_dma_tag._ranges = footbridge_isa_dma_ranges;
126 isa_bus_dma_tag._nranges = footbridge_isa_dma_nranges;
127 }
128
129 /**********************************************************************
130 * bus.h dma interface entry points
131 **********************************************************************/
132
133 #ifdef ISA_DMA_STATS
134 #define STAT_INCR(v) (v)++
135 #define STAT_DECR(v) do { \
136 if ((v) == 0) \
137 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
138 else \
139 (v)--; \
140 } while (0)
141 u_long isa_dma_stats_loads;
142 u_long isa_dma_stats_bounces;
143 u_long isa_dma_stats_nbouncebufs;
144 #else
145 #define STAT_INCR(v)
146 #define STAT_DECR(v)
147 #endif
148
149 /*
150 * Create an ISA DMA map.
151 */
152 int
153 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
154 bus_dma_tag_t t;
155 bus_size_t size;
156 int nsegments;
157 bus_size_t maxsegsz;
158 bus_size_t boundary;
159 int flags;
160 bus_dmamap_t *dmamp;
161 {
162 struct arm32_isa_dma_cookie *cookie;
163 bus_dmamap_t map;
164 int error, cookieflags;
165 void *cookiestore;
166 size_t cookiesize;
167
168 /* Call common function to create the basic map. */
169 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
170 flags, dmamp);
171 if (error)
172 return (error);
173
174 map = *dmamp;
175 map->_dm_cookie = NULL;
176
177 cookiesize = sizeof(struct arm32_isa_dma_cookie);
178
179 /*
180 * ISA only has 24-bits of address space. This means
181 * we can't DMA to pages over 16M. In order to DMA to
182 * arbitrary buffers, we use "bounce buffers" - pages
183 * in memory below the 16M boundary. On DMA reads,
184 * DMA happens to the bounce buffers, and is copied into
185 * the caller's buffer. On writes, data is copied into
186 * but bounce buffer, and the DMA happens from those
187 * pages. To software using the DMA mapping interface,
188 * this looks simply like a data cache.
189 *
190 * If we have more than 16M of RAM in the system, we may
191 * need bounce buffers. We check and remember that here.
192 *
193 * There are exceptions, however. VLB devices can do
194 * 32-bit DMA, and indicate that here.
195 *
196 * ...or, there is an opposite case. The most segments
197 * a transfer will require is (maxxfer / NBPG) + 1. If
198 * the caller can't handle that many segments (e.g. the
199 * ISA DMA controller), we may have to bounce it as well.
200 *
201 * Well, not really... see note above regarding DMA ranges.
202 * Because of the range issue on this platform, we just
203 * always "might bounce".
204 */
205 cookieflags = ID_MIGHT_NEED_BOUNCE;
206 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
207
208 /*
209 * Allocate our cookie.
210 */
211 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
212 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
213 error = ENOMEM;
214 goto out;
215 }
216 memset(cookiestore, 0, cookiesize);
217 cookie = (struct arm32_isa_dma_cookie *)cookiestore;
218 cookie->id_flags = cookieflags;
219 map->_dm_cookie = cookie;
220
221 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
222 /*
223 * Allocate the bounce pages now if the caller
224 * wishes us to do so.
225 */
226 if ((flags & BUS_DMA_ALLOCNOW) == 0)
227 goto out;
228
229 error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
230 }
231
232 out:
233 if (error) {
234 if (map->_dm_cookie != NULL)
235 free(map->_dm_cookie, M_DMAMAP);
236 _bus_dmamap_destroy(t, map);
237 }
238 return (error);
239 }
240
241 /*
242 * Destroy an ISA DMA map.
243 */
244 void
245 _isa_bus_dmamap_destroy(t, map)
246 bus_dma_tag_t t;
247 bus_dmamap_t map;
248 {
249 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
250
251 /*
252 * Free any bounce pages this map might hold.
253 */
254 if (cookie->id_flags & ID_HAS_BOUNCE)
255 _isa_dma_free_bouncebuf(t, map);
256
257 free(cookie, M_DMAMAP);
258 _bus_dmamap_destroy(t, map);
259 }
260
261 /*
262 * Load an ISA DMA map with a linear buffer.
263 */
264 int
265 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
266 bus_dma_tag_t t;
267 bus_dmamap_t map;
268 void *buf;
269 bus_size_t buflen;
270 struct proc *p;
271 int flags;
272 {
273 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
274 int error;
275
276 STAT_INCR(isa_dma_stats_loads);
277
278 /*
279 * Make sure that on error condition we return "no valid mappings."
280 */
281 map->dm_mapsize = 0;
282 map->dm_nsegs = 0;
283
284 /*
285 * Try to load the map the normal way. If this errors out,
286 * and we can bounce, we will.
287 */
288 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
289 if (error == 0 ||
290 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
291 return (error);
292
293 /*
294 * First attempt failed; bounce it.
295 */
296
297 STAT_INCR(isa_dma_stats_bounces);
298
299 /*
300 * Allocate bounce pages, if necessary.
301 */
302 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
303 error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
304 if (error)
305 return (error);
306 }
307
308 /*
309 * Cache a pointer to the caller's buffer and load the DMA map
310 * with the bounce buffer.
311 */
312 cookie->id_origbuf = buf;
313 cookie->id_origbuflen = buflen;
314 cookie->id_buftype = ID_BUFTYPE_LINEAR;
315 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
316 NULL, flags);
317 if (error) {
318 /*
319 * Free the bounce pages, unless our resources
320 * are reserved for our exclusive use.
321 */
322 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
323 _isa_dma_free_bouncebuf(t, map);
324 return (error);
325 }
326
327 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
328 cookie->id_flags |= ID_IS_BOUNCING;
329 return (0);
330 }
331
332 /*
333 * Like _isa_bus_dmamap_load(), but for mbufs.
334 */
335 int
336 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
337 bus_dma_tag_t t;
338 bus_dmamap_t map;
339 struct mbuf *m0;
340 int flags;
341 {
342 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
343 int error;
344
345 /*
346 * Make sure that on error condition we return "no valid mappings."
347 */
348 map->dm_mapsize = 0;
349 map->dm_nsegs = 0;
350
351 #ifdef DIAGNOSTIC
352 if ((m0->m_flags & M_PKTHDR) == 0)
353 panic("_isa_bus_dmamap_load_mbuf: no packet header");
354 #endif
355
356 if (m0->m_pkthdr.len > map->_dm_size)
357 return (EINVAL);
358
359 /*
360 * Try to load the map the normal way. If this errors out,
361 * and we can bounce, we will.
362 */
363 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
364 if (error == 0 ||
365 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
366 return (error);
367
368 /*
369 * First attempt failed; bounce it.
370 */
371
372 STAT_INCR(isa_dma_stats_bounces);
373
374 /*
375 * Allocate bounce pages, if necessary.
376 */
377 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
378 error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
379 flags);
380 if (error)
381 return (error);
382 }
383
384 /*
385 * Cache a pointer to the caller's buffer and load the DMA map
386 * with the bounce buffer.
387 */
388 cookie->id_origbuf = m0;
389 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
390 cookie->id_buftype = ID_BUFTYPE_MBUF;
391 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
392 m0->m_pkthdr.len, NULL, flags);
393 if (error) {
394 /*
395 * Free the bounce pages, unless our resources
396 * are reserved for our exclusive use.
397 */
398 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
399 _isa_dma_free_bouncebuf(t, map);
400 return (error);
401 }
402
403 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
404 cookie->id_flags |= ID_IS_BOUNCING;
405 return (0);
406 }
407
408 /*
409 * Like _isa_bus_dmamap_load(), but for uios.
410 */
411 int
412 _isa_bus_dmamap_load_uio(t, map, uio, flags)
413 bus_dma_tag_t t;
414 bus_dmamap_t map;
415 struct uio *uio;
416 int flags;
417 {
418
419 panic("_isa_bus_dmamap_load_uio: not implemented");
420 }
421
422 /*
423 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
424 * bus_dmamem_alloc().
425 */
426 int
427 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
428 bus_dma_tag_t t;
429 bus_dmamap_t map;
430 bus_dma_segment_t *segs;
431 int nsegs;
432 bus_size_t size;
433 int flags;
434 {
435
436 panic("_isa_bus_dmamap_load_raw: not implemented");
437 }
438
439 /*
440 * Unload an ISA DMA map.
441 */
442 void
443 _isa_bus_dmamap_unload(t, map)
444 bus_dma_tag_t t;
445 bus_dmamap_t map;
446 {
447 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
448
449 /*
450 * If we have bounce pages, free them, unless they're
451 * reserved for our exclusive use.
452 */
453 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
454 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
455 _isa_dma_free_bouncebuf(t, map);
456
457 cookie->id_flags &= ~ID_IS_BOUNCING;
458 cookie->id_buftype = ID_BUFTYPE_INVALID;
459
460 /*
461 * Do the generic bits of the unload.
462 */
463 _bus_dmamap_unload(t, map);
464 }
465
466 /*
467 * Synchronize an ISA DMA map.
468 */
469 void
470 _isa_bus_dmamap_sync(t, map, offset, len, ops)
471 bus_dma_tag_t t;
472 bus_dmamap_t map;
473 bus_addr_t offset;
474 bus_size_t len;
475 int ops;
476 {
477 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
478
479 /*
480 * Mixing PRE and POST operations is not allowed.
481 */
482 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
483 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
484 panic("_isa_bus_dmamap_sync: mix PRE and POST");
485
486 #ifdef DIAGNOSTIC
487 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
488 if (offset >= map->dm_mapsize)
489 panic("_isa_bus_dmamap_sync: bad offset");
490 if (len == 0 || (offset + len) > map->dm_mapsize)
491 panic("_isa_bus_dmamap_sync: bad length");
492 }
493 #endif
494
495 /*
496 * If we're not bouncing, just return; nothing to do.
497 */
498 if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
499 return;
500
501 switch (cookie->id_buftype) {
502 case ID_BUFTYPE_LINEAR:
503 /*
504 * Nothing to do for pre-read.
505 */
506
507 if (ops & BUS_DMASYNC_PREWRITE) {
508 /*
509 * Copy the caller's buffer to the bounce buffer.
510 */
511 memcpy((char *)cookie->id_bouncebuf + offset,
512 (char *)cookie->id_origbuf + offset, len);
513 }
514
515 if (ops & BUS_DMASYNC_POSTREAD) {
516 /*
517 * Copy the bounce buffer to the caller's buffer.
518 */
519 memcpy((char *)cookie->id_origbuf + offset,
520 (char *)cookie->id_bouncebuf + offset, len);
521 }
522
523 /*
524 * Nothing to do for post-write.
525 */
526 break;
527
528 case ID_BUFTYPE_MBUF:
529 {
530 struct mbuf *m, *m0 = cookie->id_origbuf;
531 bus_size_t minlen, moff;
532
533 /*
534 * Nothing to do for pre-read.
535 */
536
537 if (ops & BUS_DMASYNC_PREWRITE) {
538 /*
539 * Copy the caller's buffer to the bounce buffer.
540 */
541 m_copydata(m0, offset, len,
542 (char *)cookie->id_bouncebuf + offset);
543 }
544
545 if (ops & BUS_DMASYNC_POSTREAD) {
546 /*
547 * Copy the bounce buffer to the caller's buffer.
548 */
549 for (moff = offset, m = m0; m != NULL && len != 0;
550 m = m->m_next) {
551 /* Find the beginning mbuf. */
552 if (moff >= m->m_len) {
553 moff -= m->m_len;
554 continue;
555 }
556
557 /*
558 * Now at the first mbuf to sync; nail
559 * each one until we have exhausted the
560 * length.
561 */
562 minlen = len < m->m_len - moff ?
563 len : m->m_len - moff;
564
565 memcpy(mtod(m, caddr_t) + moff,
566 (char *)cookie->id_bouncebuf + offset,
567 minlen);
568
569 moff = 0;
570 len -= minlen;
571 offset += minlen;
572 }
573 }
574
575 /*
576 * Nothing to do for post-write.
577 */
578 break;
579 }
580
581 case ID_BUFTYPE_UIO:
582 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
583 break;
584
585 case ID_BUFTYPE_RAW:
586 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
587 break;
588
589 case ID_BUFTYPE_INVALID:
590 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
591 break;
592
593 default:
594 printf("unknown buffer type %d\n", cookie->id_buftype);
595 panic("_isa_bus_dmamap_sync");
596 }
597 }
598
599 /*
600 * Allocate memory safe for ISA DMA.
601 */
602 int
603 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
604 bus_dma_tag_t t;
605 bus_size_t size, alignment, boundary;
606 bus_dma_segment_t *segs;
607 int nsegs;
608 int *rsegs;
609 int flags;
610 {
611
612 if (t->_ranges == NULL)
613 return (ENOMEM);
614
615 /* _bus_dmamem_alloc() does the range checks for us. */
616 return (_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs,
617 rsegs, flags));
618 }
619
620 /**********************************************************************
621 * ISA DMA utility functions
622 **********************************************************************/
623
624 int
625 _isa_dma_alloc_bouncebuf(t, map, size, flags)
626 bus_dma_tag_t t;
627 bus_dmamap_t map;
628 bus_size_t size;
629 int flags;
630 {
631 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
632 int error = 0;
633
634 cookie->id_bouncebuflen = round_page(size);
635 error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
636 NBPG, map->_dm_boundary, cookie->id_bouncesegs,
637 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
638 if (error)
639 goto out;
640 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
641 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
642 (caddr_t *)&cookie->id_bouncebuf, flags);
643
644 out:
645 if (error) {
646 _bus_dmamem_free(t, cookie->id_bouncesegs,
647 cookie->id_nbouncesegs);
648 cookie->id_bouncebuflen = 0;
649 cookie->id_nbouncesegs = 0;
650 } else {
651 cookie->id_flags |= ID_HAS_BOUNCE;
652 STAT_INCR(isa_dma_stats_nbouncebufs);
653 }
654
655 return (error);
656 }
657
658 void
659 _isa_dma_free_bouncebuf(t, map)
660 bus_dma_tag_t t;
661 bus_dmamap_t map;
662 {
663 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
664
665 STAT_DECR(isa_dma_stats_nbouncebufs);
666
667 _bus_dmamem_unmap(t, cookie->id_bouncebuf,
668 cookie->id_bouncebuflen);
669 _bus_dmamem_free(t, cookie->id_bouncesegs,
670 cookie->id_nbouncesegs);
671 cookie->id_bouncebuflen = 0;
672 cookie->id_nbouncesegs = 0;
673 cookie->id_flags &= ~ID_HAS_BOUNCE;
674 }
675