isadma_machdep.c revision 1.2 1 /* $NetBSD: isadma_machdep.c,v 1.2 2002/07/31 17:34:26 thorpej Exp $ */
2
3 #define ISA_DMA_STATS
4
5 /*-
6 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
11 * NASA Ames Research Center.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the NetBSD
24 * Foundation, Inc. and its contributors.
25 * 4. Neither the name of The NetBSD Foundation nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/syslog.h>
45 #include <sys/device.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/mbuf.h>
49
50 #define _ARM32_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52
53 #include <dev/isa/isareg.h>
54 #include <dev/isa/isavar.h>
55
56 #include <uvm/uvm_extern.h>
57
58 /*
59 * ISA has a 24-bit address limitation, so at most it has a 16M
60 * DMA range. However, some platforms have a more limited range,
61 * e.g. the Shark NC. On these systems, we are provided with
62 * a set of DMA ranges. The pmap module is aware of these ranges
63 * and places DMA-safe memory for them onto an alternate free list
64 * so that they are protected from being used to service page faults,
65 * etc. (unless we've run out of memory elsewhere).
66 */
67 #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024)
68 extern struct arm32_dma_range *shark_isa_dma_ranges;
69 extern int shark_isa_dma_nranges;
70
71 int _isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
72 bus_size_t, bus_size_t, int, bus_dmamap_t *));
73 void _isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
74 int _isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
75 bus_size_t, struct proc *, int));
76 int _isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
77 struct mbuf *, int));
78 int _isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
79 struct uio *, int));
80 int _isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
81 bus_dma_segment_t *, int, bus_size_t, int));
82 void _isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
83 void _isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
84 bus_addr_t, bus_size_t, int));
85
86 int _isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
87 bus_size_t, bus_dma_segment_t *, int, int *, int));
88
89 int _isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
90 bus_size_t, int));
91 void _isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
92
93 /*
94 * Entry points for ISA DMA. These are mostly wrappers around
95 * the generic functions that understand how to deal with bounce
96 * buffers, if necessary.
97 */
98 struct arm32_bus_dma_tag isa_bus_dma_tag = {
99 0, /* _ranges */
100 0, /* _nranges */
101 _isa_bus_dmamap_create,
102 _isa_bus_dmamap_destroy,
103 _isa_bus_dmamap_load,
104 _isa_bus_dmamap_load_mbuf,
105 _isa_bus_dmamap_load_uio,
106 _isa_bus_dmamap_load_raw,
107 _isa_bus_dmamap_unload,
108 _isa_bus_dmamap_sync,
109 _isa_bus_dmamem_alloc,
110 _bus_dmamem_free,
111 _bus_dmamem_map,
112 _bus_dmamem_unmap,
113 _bus_dmamem_mmap,
114 };
115
116 /*
117 * Initialize ISA DMA.
118 */
119 void
120 isa_dma_init()
121 {
122
123 isa_bus_dma_tag._ranges = shark_isa_dma_ranges;
124 isa_bus_dma_tag._nranges = shark_isa_dma_nranges;
125 }
126
127 /**********************************************************************
128 * bus.h dma interface entry points
129 **********************************************************************/
130
131 #ifdef ISA_DMA_STATS
132 #define STAT_INCR(v) (v)++
133 #define STAT_DECR(v) do { \
134 if ((v) == 0) \
135 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
136 else \
137 (v)--; \
138 } while (0)
139 u_long isa_dma_stats_loads;
140 u_long isa_dma_stats_bounces;
141 u_long isa_dma_stats_nbouncebufs;
142 #else
143 #define STAT_INCR(v)
144 #define STAT_DECR(v)
145 #endif
146
147 /*
148 * Create an ISA DMA map.
149 */
150 int
151 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
152 bus_dma_tag_t t;
153 bus_size_t size;
154 int nsegments;
155 bus_size_t maxsegsz;
156 bus_size_t boundary;
157 int flags;
158 bus_dmamap_t *dmamp;
159 {
160 struct arm32_isa_dma_cookie *cookie;
161 bus_dmamap_t map;
162 int error, cookieflags;
163 void *cookiestore;
164 size_t cookiesize;
165
166 /* Call common function to create the basic map. */
167 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
168 flags, dmamp);
169 if (error)
170 return (error);
171
172 map = *dmamp;
173 map->_dm_cookie = NULL;
174
175 cookiesize = sizeof(struct arm32_isa_dma_cookie);
176
177 /*
178 * ISA only has 24-bits of address space. This means
179 * we can't DMA to pages over 16M. In order to DMA to
180 * arbitrary buffers, we use "bounce buffers" - pages
181 * in memory below the 16M boundary. On DMA reads,
182 * DMA happens to the bounce buffers, and is copied into
183 * the caller's buffer. On writes, data is copied into
184 * but bounce buffer, and the DMA happens from those
185 * pages. To software using the DMA mapping interface,
186 * this looks simply like a data cache.
187 *
188 * If we have more than 16M of RAM in the system, we may
189 * need bounce buffers. We check and remember that here.
190 *
191 * There are exceptions, however. VLB devices can do
192 * 32-bit DMA, and indicate that here.
193 *
194 * ...or, there is an opposite case. The most segments
195 * a transfer will require is (maxxfer / NBPG) + 1. If
196 * the caller can't handle that many segments (e.g. the
197 * ISA DMA controller), we may have to bounce it as well.
198 *
199 * Well, not really... see note above regarding DMA ranges.
200 * Because of the range issue on this platform, we just
201 * always "might bounce".
202 */
203 cookieflags = ID_MIGHT_NEED_BOUNCE;
204 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
205
206 /*
207 * Allocate our cookie.
208 */
209 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
210 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
211 error = ENOMEM;
212 goto out;
213 }
214 memset(cookiestore, 0, cookiesize);
215 cookie = (struct arm32_isa_dma_cookie *)cookiestore;
216 cookie->id_flags = cookieflags;
217 map->_dm_cookie = cookie;
218
219 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
220 /*
221 * Allocate the bounce pages now if the caller
222 * wishes us to do so.
223 */
224 if ((flags & BUS_DMA_ALLOCNOW) == 0)
225 goto out;
226
227 error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
228 }
229
230 out:
231 if (error) {
232 if (map->_dm_cookie != NULL)
233 free(map->_dm_cookie, M_DMAMAP);
234 _bus_dmamap_destroy(t, map);
235 }
236 return (error);
237 }
238
239 /*
240 * Destroy an ISA DMA map.
241 */
242 void
243 _isa_bus_dmamap_destroy(t, map)
244 bus_dma_tag_t t;
245 bus_dmamap_t map;
246 {
247 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
248
249 /*
250 * Free any bounce pages this map might hold.
251 */
252 if (cookie->id_flags & ID_HAS_BOUNCE)
253 _isa_dma_free_bouncebuf(t, map);
254
255 free(cookie, M_DMAMAP);
256 _bus_dmamap_destroy(t, map);
257 }
258
259 /*
260 * Load an ISA DMA map with a linear buffer.
261 */
262 int
263 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
264 bus_dma_tag_t t;
265 bus_dmamap_t map;
266 void *buf;
267 bus_size_t buflen;
268 struct proc *p;
269 int flags;
270 {
271 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
272 int error;
273
274 STAT_INCR(isa_dma_stats_loads);
275
276 /*
277 * Make sure that on error condition we return "no valid mappings."
278 */
279 map->dm_mapsize = 0;
280 map->dm_nsegs = 0;
281
282 /*
283 * Try to load the map the normal way. If this errors out,
284 * and we can bounce, we will.
285 */
286 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
287 if (error == 0 ||
288 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
289 return (error);
290
291 /*
292 * First attempt failed; bounce it.
293 */
294
295 STAT_INCR(isa_dma_stats_bounces);
296
297 /*
298 * Allocate bounce pages, if necessary.
299 */
300 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
301 error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
302 if (error)
303 return (error);
304 }
305
306 /*
307 * Cache a pointer to the caller's buffer and load the DMA map
308 * with the bounce buffer.
309 */
310 cookie->id_origbuf = buf;
311 cookie->id_origbuflen = buflen;
312 cookie->id_buftype = ID_BUFTYPE_LINEAR;
313 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
314 NULL, flags);
315 if (error) {
316 /*
317 * Free the bounce pages, unless our resources
318 * are reserved for our exclusive use.
319 */
320 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
321 _isa_dma_free_bouncebuf(t, map);
322 return (error);
323 }
324
325 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
326 cookie->id_flags |= ID_IS_BOUNCING;
327 return (0);
328 }
329
330 /*
331 * Like _isa_bus_dmamap_load(), but for mbufs.
332 */
333 int
334 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
335 bus_dma_tag_t t;
336 bus_dmamap_t map;
337 struct mbuf *m0;
338 int flags;
339 {
340 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
341 int error;
342
343 /*
344 * Make sure that on error condition we return "no valid mappings."
345 */
346 map->dm_mapsize = 0;
347 map->dm_nsegs = 0;
348
349 #ifdef DIAGNOSTIC
350 if ((m0->m_flags & M_PKTHDR) == 0)
351 panic("_isa_bus_dmamap_load_mbuf: no packet header");
352 #endif
353
354 if (m0->m_pkthdr.len > map->_dm_size)
355 return (EINVAL);
356
357 /*
358 * Try to load the map the normal way. If this errors out,
359 * and we can bounce, we will.
360 */
361 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
362 if (error == 0 ||
363 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
364 return (error);
365
366 /*
367 * First attempt failed; bounce it.
368 */
369
370 STAT_INCR(isa_dma_stats_bounces);
371
372 /*
373 * Allocate bounce pages, if necessary.
374 */
375 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
376 error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
377 flags);
378 if (error)
379 return (error);
380 }
381
382 /*
383 * Cache a pointer to the caller's buffer and load the DMA map
384 * with the bounce buffer.
385 */
386 cookie->id_origbuf = m0;
387 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
388 cookie->id_buftype = ID_BUFTYPE_MBUF;
389 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
390 m0->m_pkthdr.len, NULL, flags);
391 if (error) {
392 /*
393 * Free the bounce pages, unless our resources
394 * are reserved for our exclusive use.
395 */
396 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
397 _isa_dma_free_bouncebuf(t, map);
398 return (error);
399 }
400
401 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
402 cookie->id_flags |= ID_IS_BOUNCING;
403 return (0);
404 }
405
406 /*
407 * Like _isa_bus_dmamap_load(), but for uios.
408 */
409 int
410 _isa_bus_dmamap_load_uio(t, map, uio, flags)
411 bus_dma_tag_t t;
412 bus_dmamap_t map;
413 struct uio *uio;
414 int flags;
415 {
416
417 panic("_isa_bus_dmamap_load_uio: not implemented");
418 }
419
420 /*
421 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
422 * bus_dmamem_alloc().
423 */
424 int
425 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
426 bus_dma_tag_t t;
427 bus_dmamap_t map;
428 bus_dma_segment_t *segs;
429 int nsegs;
430 bus_size_t size;
431 int flags;
432 {
433
434 panic("_isa_bus_dmamap_load_raw: not implemented");
435 }
436
437 /*
438 * Unload an ISA DMA map.
439 */
440 void
441 _isa_bus_dmamap_unload(t, map)
442 bus_dma_tag_t t;
443 bus_dmamap_t map;
444 {
445 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
446
447 /*
448 * If we have bounce pages, free them, unless they're
449 * reserved for our exclusive use.
450 */
451 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
452 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
453 _isa_dma_free_bouncebuf(t, map);
454
455 cookie->id_flags &= ~ID_IS_BOUNCING;
456 cookie->id_buftype = ID_BUFTYPE_INVALID;
457
458 /*
459 * Do the generic bits of the unload.
460 */
461 _bus_dmamap_unload(t, map);
462 }
463
464 /*
465 * Synchronize an ISA DMA map.
466 */
467 void
468 _isa_bus_dmamap_sync(t, map, offset, len, ops)
469 bus_dma_tag_t t;
470 bus_dmamap_t map;
471 bus_addr_t offset;
472 bus_size_t len;
473 int ops;
474 {
475 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
476
477 /*
478 * Mixing PRE and POST operations is not allowed.
479 */
480 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
481 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
482 panic("_isa_bus_dmamap_sync: mix PRE and POST");
483
484 #ifdef DIAGNOSTIC
485 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
486 if (offset >= map->dm_mapsize)
487 panic("_isa_bus_dmamap_sync: bad offset");
488 if (len == 0 || (offset + len) > map->dm_mapsize)
489 panic("_isa_bus_dmamap_sync: bad length");
490 }
491 #endif
492
493 /*
494 * If we're not bouncing, just return; nothing to do.
495 */
496 if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
497 return;
498
499 switch (cookie->id_buftype) {
500 case ID_BUFTYPE_LINEAR:
501 /*
502 * Nothing to do for pre-read.
503 */
504
505 if (ops & BUS_DMASYNC_PREWRITE) {
506 /*
507 * Copy the caller's buffer to the bounce buffer.
508 */
509 memcpy((char *)cookie->id_bouncebuf + offset,
510 (char *)cookie->id_origbuf + offset, len);
511 }
512
513 if (ops & BUS_DMASYNC_POSTREAD) {
514 /*
515 * Copy the bounce buffer to the caller's buffer.
516 */
517 memcpy((char *)cookie->id_origbuf + offset,
518 (char *)cookie->id_bouncebuf + offset, len);
519 }
520
521 /*
522 * Nothing to do for post-write.
523 */
524 break;
525
526 case ID_BUFTYPE_MBUF:
527 {
528 struct mbuf *m, *m0 = cookie->id_origbuf;
529 bus_size_t minlen, moff;
530
531 /*
532 * Nothing to do for pre-read.
533 */
534
535 if (ops & BUS_DMASYNC_PREWRITE) {
536 /*
537 * Copy the caller's buffer to the bounce buffer.
538 */
539 m_copydata(m0, offset, len,
540 (char *)cookie->id_bouncebuf + offset);
541 }
542
543 if (ops & BUS_DMASYNC_POSTREAD) {
544 /*
545 * Copy the bounce buffer to the caller's buffer.
546 */
547 for (moff = offset, m = m0; m != NULL && len != 0;
548 m = m->m_next) {
549 /* Find the beginning mbuf. */
550 if (moff >= m->m_len) {
551 moff -= m->m_len;
552 continue;
553 }
554
555 /*
556 * Now at the first mbuf to sync; nail
557 * each one until we have exhausted the
558 * length.
559 */
560 minlen = len < m->m_len - moff ?
561 len : m->m_len - moff;
562
563 memcpy(mtod(m, caddr_t) + moff,
564 (char *)cookie->id_bouncebuf + offset,
565 minlen);
566
567 moff = 0;
568 len -= minlen;
569 offset += minlen;
570 }
571 }
572
573 /*
574 * Nothing to do for post-write.
575 */
576 break;
577 }
578
579 case ID_BUFTYPE_UIO:
580 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
581 break;
582
583 case ID_BUFTYPE_RAW:
584 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
585 break;
586
587 case ID_BUFTYPE_INVALID:
588 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
589 break;
590
591 default:
592 printf("unknown buffer type %d\n", cookie->id_buftype);
593 panic("_isa_bus_dmamap_sync");
594 }
595 }
596
597 /*
598 * Allocate memory safe for ISA DMA.
599 */
600 int
601 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
602 bus_dma_tag_t t;
603 bus_size_t size, alignment, boundary;
604 bus_dma_segment_t *segs;
605 int nsegs;
606 int *rsegs;
607 int flags;
608 {
609
610 if (t->_ranges == NULL)
611 return (ENOMEM);
612
613 /* _bus_dmamem_alloc() does the range checks for us. */
614 return (_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs,
615 rsegs, flags));
616 }
617
618 /**********************************************************************
619 * ISA DMA utility functions
620 **********************************************************************/
621
622 int
623 _isa_dma_alloc_bouncebuf(t, map, size, flags)
624 bus_dma_tag_t t;
625 bus_dmamap_t map;
626 bus_size_t size;
627 int flags;
628 {
629 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
630 int error = 0;
631
632 cookie->id_bouncebuflen = round_page(size);
633 error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
634 NBPG, map->_dm_boundary, cookie->id_bouncesegs,
635 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
636 if (error)
637 goto out;
638 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
639 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
640 (caddr_t *)&cookie->id_bouncebuf, flags);
641
642 out:
643 if (error) {
644 _bus_dmamem_free(t, cookie->id_bouncesegs,
645 cookie->id_nbouncesegs);
646 cookie->id_bouncebuflen = 0;
647 cookie->id_nbouncesegs = 0;
648 } else {
649 cookie->id_flags |= ID_HAS_BOUNCE;
650 STAT_INCR(isa_dma_stats_nbouncebufs);
651 }
652
653 return (error);
654 }
655
656 void
657 _isa_dma_free_bouncebuf(t, map)
658 bus_dma_tag_t t;
659 bus_dmamap_t map;
660 {
661 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
662
663 STAT_DECR(isa_dma_stats_nbouncebufs);
664
665 _bus_dmamem_unmap(t, cookie->id_bouncebuf,
666 cookie->id_bouncebuflen);
667 _bus_dmamem_free(t, cookie->id_bouncesegs,
668 cookie->id_nbouncesegs);
669 cookie->id_bouncebuflen = 0;
670 cookie->id_nbouncesegs = 0;
671 cookie->id_flags &= ~ID_HAS_BOUNCE;
672 }
673