isadma_machdep.c revision 1.12 1 /* $NetBSD: isadma_machdep.c,v 1.12 2009/03/14 14:46:06 dsl Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: isadma_machdep.c,v 1.12 2009/03/14 14:46:06 dsl Exp $");
35
36 #define ISA_DMA_STATS
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/syslog.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44 #include <sys/mbuf.h>
45
46 #define _ARM32_BUS_DMA_PRIVATE
47 #include <machine/bus.h>
48
49 #include <dev/isa/isareg.h>
50 #include <dev/isa/isavar.h>
51
52 #include <uvm/uvm_extern.h>
53
54 /*
55 * ISA has a 24-bit address limitation, so at most it has a 16M
56 * DMA range. However, some platforms have a more limited range,
57 * e.g. the Shark NC. On these systems, we are provided with
58 * a set of DMA ranges. The pmap module is aware of these ranges
59 * and places DMA-safe memory for them onto an alternate free list
60 * so that they are protected from being used to service page faults,
61 * etc. (unless we've run out of memory elsewhere).
62 */
63 extern struct arm32_dma_range *shark_isa_dma_ranges;
64 extern int shark_isa_dma_nranges;
65
66 int _isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
67 bus_size_t, bus_size_t, int, bus_dmamap_t *);
68 void _isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
69 int _isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
70 bus_size_t, struct proc *, int);
71 int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
72 struct mbuf *, int);
73 int _isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
74 struct uio *, int);
75 int _isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
76 bus_dma_segment_t *, int, bus_size_t, int);
77 void _isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
78 void _isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
79 bus_addr_t, bus_size_t, int);
80
81 int _isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
82 bus_size_t, bus_dma_segment_t *, int, int *, int);
83
84 int _isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
85 bus_size_t, int);
86 void _isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
87
88 /*
89 * Entry points for ISA DMA. These are mostly wrappers around
90 * the generic functions that understand how to deal with bounce
91 * buffers, if necessary.
92 */
93 struct arm32_bus_dma_tag isa_bus_dma_tag = {
94 0, /* _ranges */
95 0, /* _nranges */
96 NULL, /* _cookie */
97 _isa_bus_dmamap_create,
98 _isa_bus_dmamap_destroy,
99 _isa_bus_dmamap_load,
100 _isa_bus_dmamap_load_mbuf,
101 _isa_bus_dmamap_load_uio,
102 _isa_bus_dmamap_load_raw,
103 _isa_bus_dmamap_unload,
104 _isa_bus_dmamap_sync, /* pre */
105 _isa_bus_dmamap_sync, /* post */
106 _isa_bus_dmamem_alloc,
107 _bus_dmamem_free,
108 _bus_dmamem_map,
109 _bus_dmamem_unmap,
110 _bus_dmamem_mmap,
111 };
112
113 /*
114 * Initialize ISA DMA.
115 */
116 void
117 isa_dma_init()
118 {
119
120 isa_bus_dma_tag._ranges = shark_isa_dma_ranges;
121 isa_bus_dma_tag._nranges = shark_isa_dma_nranges;
122 }
123
124 /**********************************************************************
125 * bus.h dma interface entry points
126 **********************************************************************/
127
128 #ifdef ISA_DMA_STATS
129 #define STAT_INCR(v) (v)++
130 #define STAT_DECR(v) do { \
131 if ((v) == 0) \
132 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
133 else \
134 (v)--; \
135 } while (0)
136 u_long isa_dma_stats_loads;
137 u_long isa_dma_stats_bounces;
138 u_long isa_dma_stats_nbouncebufs;
139 #else
140 #define STAT_INCR(v)
141 #define STAT_DECR(v)
142 #endif
143
144 /*
145 * Create an ISA DMA map.
146 */
147 int
148 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
149 bus_dma_tag_t t;
150 bus_size_t size;
151 int nsegments;
152 bus_size_t maxsegsz;
153 bus_size_t boundary;
154 int flags;
155 bus_dmamap_t *dmamp;
156 {
157 struct arm32_isa_dma_cookie *cookie;
158 bus_dmamap_t map;
159 int error, cookieflags;
160 void *cookiestore;
161 size_t cookiesize;
162
163 /* Call common function to create the basic map. */
164 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
165 flags, dmamp);
166 if (error)
167 return (error);
168
169 map = *dmamp;
170 map->_dm_cookie = NULL;
171
172 cookiesize = sizeof(struct arm32_isa_dma_cookie);
173
174 /*
175 * ISA only has 24-bits of address space. This means
176 * we can't DMA to pages over 16M. In order to DMA to
177 * arbitrary buffers, we use "bounce buffers" - pages
178 * in memory below the 16M boundary. On DMA reads,
179 * DMA happens to the bounce buffers, and is copied into
180 * the caller's buffer. On writes, data is copied into
181 * but bounce buffer, and the DMA happens from those
182 * pages. To software using the DMA mapping interface,
183 * this looks simply like a data cache.
184 *
185 * If we have more than 16M of RAM in the system, we may
186 * need bounce buffers. We check and remember that here.
187 *
188 * There are exceptions, however. VLB devices can do
189 * 32-bit DMA, and indicate that here.
190 *
191 * ...or, there is an opposite case. The most segments
192 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
193 * the caller can't handle that many segments (e.g. the
194 * ISA DMA controller), we may have to bounce it as well.
195 *
196 * Well, not really... see note above regarding DMA ranges.
197 * Because of the range issue on this platform, we just
198 * always "might bounce".
199 */
200 cookieflags = ID_MIGHT_NEED_BOUNCE;
201 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
202
203 /*
204 * Allocate our cookie.
205 */
206 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
207 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
208 error = ENOMEM;
209 goto out;
210 }
211 memset(cookiestore, 0, cookiesize);
212 cookie = (struct arm32_isa_dma_cookie *)cookiestore;
213 cookie->id_flags = cookieflags;
214 map->_dm_cookie = cookie;
215
216 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
217 /*
218 * Allocate the bounce pages now if the caller
219 * wishes us to do so.
220 */
221 if ((flags & BUS_DMA_ALLOCNOW) == 0)
222 goto out;
223
224 error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
225 }
226
227 out:
228 if (error) {
229 if (map->_dm_cookie != NULL)
230 free(map->_dm_cookie, M_DMAMAP);
231 _bus_dmamap_destroy(t, map);
232 }
233 return (error);
234 }
235
236 /*
237 * Destroy an ISA DMA map.
238 */
239 void
240 _isa_bus_dmamap_destroy(t, map)
241 bus_dma_tag_t t;
242 bus_dmamap_t map;
243 {
244 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
245
246 /*
247 * Free any bounce pages this map might hold.
248 */
249 if (cookie->id_flags & ID_HAS_BOUNCE)
250 _isa_dma_free_bouncebuf(t, map);
251
252 free(cookie, M_DMAMAP);
253 _bus_dmamap_destroy(t, map);
254 }
255
256 /*
257 * Load an ISA DMA map with a linear buffer.
258 */
259 int
260 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
261 bus_dma_tag_t t;
262 bus_dmamap_t map;
263 void *buf;
264 bus_size_t buflen;
265 struct proc *p;
266 int flags;
267 {
268 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
269 int error;
270
271 STAT_INCR(isa_dma_stats_loads);
272
273 /*
274 * Make sure that on error condition we return "no valid mappings."
275 */
276 map->dm_mapsize = 0;
277 map->dm_nsegs = 0;
278
279 /*
280 * Try to load the map the normal way. If this errors out,
281 * and we can bounce, we will.
282 */
283 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
284 if (error == 0 ||
285 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
286 return (error);
287
288 /*
289 * First attempt failed; bounce it.
290 */
291
292 STAT_INCR(isa_dma_stats_bounces);
293
294 /*
295 * Allocate bounce pages, if necessary.
296 */
297 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
298 error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
299 if (error)
300 return (error);
301 }
302
303 /*
304 * Cache a pointer to the caller's buffer and load the DMA map
305 * with the bounce buffer.
306 */
307 cookie->id_origbuf = buf;
308 cookie->id_origbuflen = buflen;
309 cookie->id_buftype = ID_BUFTYPE_LINEAR;
310 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
311 NULL, flags);
312 if (error) {
313 /*
314 * Free the bounce pages, unless our resources
315 * are reserved for our exclusive use.
316 */
317 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
318 _isa_dma_free_bouncebuf(t, map);
319 return (error);
320 }
321
322 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
323 cookie->id_flags |= ID_IS_BOUNCING;
324 return (0);
325 }
326
327 /*
328 * Like _isa_bus_dmamap_load(), but for mbufs.
329 */
330 int
331 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
332 bus_dma_tag_t t;
333 bus_dmamap_t map;
334 struct mbuf *m0;
335 int flags;
336 {
337 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
338 int error;
339
340 /*
341 * Make sure that on error condition we return "no valid mappings."
342 */
343 map->dm_mapsize = 0;
344 map->dm_nsegs = 0;
345
346 #ifdef DIAGNOSTIC
347 if ((m0->m_flags & M_PKTHDR) == 0)
348 panic("_isa_bus_dmamap_load_mbuf: no packet header");
349 #endif
350
351 if (m0->m_pkthdr.len > map->_dm_size)
352 return (EINVAL);
353
354 /*
355 * Try to load the map the normal way. If this errors out,
356 * and we can bounce, we will.
357 */
358 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
359 if (error == 0 ||
360 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
361 return (error);
362
363 /*
364 * First attempt failed; bounce it.
365 */
366
367 STAT_INCR(isa_dma_stats_bounces);
368
369 /*
370 * Allocate bounce pages, if necessary.
371 */
372 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
373 error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
374 flags);
375 if (error)
376 return (error);
377 }
378
379 /*
380 * Cache a pointer to the caller's buffer and load the DMA map
381 * with the bounce buffer.
382 */
383 cookie->id_origbuf = m0;
384 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
385 cookie->id_buftype = ID_BUFTYPE_MBUF;
386 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
387 m0->m_pkthdr.len, NULL, flags);
388 if (error) {
389 /*
390 * Free the bounce pages, unless our resources
391 * are reserved for our exclusive use.
392 */
393 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
394 _isa_dma_free_bouncebuf(t, map);
395 return (error);
396 }
397
398 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
399 cookie->id_flags |= ID_IS_BOUNCING;
400 return (0);
401 }
402
403 /*
404 * Like _isa_bus_dmamap_load(), but for uios.
405 */
406 int
407 _isa_bus_dmamap_load_uio(t, map, uio, flags)
408 bus_dma_tag_t t;
409 bus_dmamap_t map;
410 struct uio *uio;
411 int flags;
412 {
413
414 panic("_isa_bus_dmamap_load_uio: not implemented");
415 }
416
417 /*
418 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
419 * bus_dmamem_alloc().
420 */
421 int
422 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
423 bus_dma_tag_t t;
424 bus_dmamap_t map;
425 bus_dma_segment_t *segs;
426 int nsegs;
427 bus_size_t size;
428 int flags;
429 {
430
431 panic("_isa_bus_dmamap_load_raw: not implemented");
432 }
433
434 /*
435 * Unload an ISA DMA map.
436 */
437 void
438 _isa_bus_dmamap_unload(t, map)
439 bus_dma_tag_t t;
440 bus_dmamap_t map;
441 {
442 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
443
444 /*
445 * If we have bounce pages, free them, unless they're
446 * reserved for our exclusive use.
447 */
448 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
449 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
450 _isa_dma_free_bouncebuf(t, map);
451
452 cookie->id_flags &= ~ID_IS_BOUNCING;
453 cookie->id_buftype = ID_BUFTYPE_INVALID;
454
455 /*
456 * Do the generic bits of the unload.
457 */
458 _bus_dmamap_unload(t, map);
459 }
460
461 /*
462 * Synchronize an ISA DMA map.
463 */
464 void
465 _isa_bus_dmamap_sync(t, map, offset, len, ops)
466 bus_dma_tag_t t;
467 bus_dmamap_t map;
468 bus_addr_t offset;
469 bus_size_t len;
470 int ops;
471 {
472 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
473
474 /*
475 * Mixing PRE and POST operations is not allowed.
476 */
477 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
478 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
479 panic("_isa_bus_dmamap_sync: mix PRE and POST");
480
481 #ifdef DIAGNOSTIC
482 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
483 if (offset >= map->dm_mapsize)
484 panic("_isa_bus_dmamap_sync: bad offset");
485 if (len == 0 || (offset + len) > map->dm_mapsize)
486 panic("_isa_bus_dmamap_sync: bad length");
487 }
488 #endif
489
490 /*
491 * If we're not bouncing, just return; nothing to do.
492 */
493 if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
494 return;
495
496 switch (cookie->id_buftype) {
497 case ID_BUFTYPE_LINEAR:
498 /*
499 * Nothing to do for pre-read.
500 */
501
502 if (ops & BUS_DMASYNC_PREWRITE) {
503 /*
504 * Copy the caller's buffer to the bounce buffer.
505 */
506 memcpy((char *)cookie->id_bouncebuf + offset,
507 (char *)cookie->id_origbuf + offset, len);
508 }
509
510 if (ops & BUS_DMASYNC_POSTREAD) {
511 /*
512 * Copy the bounce buffer to the caller's buffer.
513 */
514 memcpy((char *)cookie->id_origbuf + offset,
515 (char *)cookie->id_bouncebuf + offset, len);
516 }
517
518 /*
519 * Nothing to do for post-write.
520 */
521 break;
522
523 case ID_BUFTYPE_MBUF:
524 {
525 struct mbuf *m, *m0 = cookie->id_origbuf;
526 bus_size_t minlen, moff;
527
528 /*
529 * Nothing to do for pre-read.
530 */
531
532 if (ops & BUS_DMASYNC_PREWRITE) {
533 /*
534 * Copy the caller's buffer to the bounce buffer.
535 */
536 m_copydata(m0, offset, len,
537 (char *)cookie->id_bouncebuf + offset);
538 }
539
540 if (ops & BUS_DMASYNC_POSTREAD) {
541 /*
542 * Copy the bounce buffer to the caller's buffer.
543 */
544 for (moff = offset, m = m0; m != NULL && len != 0;
545 m = m->m_next) {
546 /* Find the beginning mbuf. */
547 if (moff >= m->m_len) {
548 moff -= m->m_len;
549 continue;
550 }
551
552 /*
553 * Now at the first mbuf to sync; nail
554 * each one until we have exhausted the
555 * length.
556 */
557 minlen = len < m->m_len - moff ?
558 len : m->m_len - moff;
559
560 memcpy(mtod(m, char *) + moff,
561 (char *)cookie->id_bouncebuf + offset,
562 minlen);
563
564 moff = 0;
565 len -= minlen;
566 offset += minlen;
567 }
568 }
569
570 /*
571 * Nothing to do for post-write.
572 */
573 break;
574 }
575
576 case ID_BUFTYPE_UIO:
577 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
578 break;
579
580 case ID_BUFTYPE_RAW:
581 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
582 break;
583
584 case ID_BUFTYPE_INVALID:
585 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
586 break;
587
588 default:
589 printf("unknown buffer type %d\n", cookie->id_buftype);
590 panic("_isa_bus_dmamap_sync");
591 }
592 }
593
594 /*
595 * Allocate memory safe for ISA DMA.
596 */
597 int
598 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
599 bus_dma_tag_t t;
600 bus_size_t size, alignment, boundary;
601 bus_dma_segment_t *segs;
602 int nsegs;
603 int *rsegs;
604 int flags;
605 {
606
607 if (t->_ranges == NULL)
608 return (ENOMEM);
609
610 /* _bus_dmamem_alloc() does the range checks for us. */
611 return (_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs,
612 rsegs, flags));
613 }
614
615 /**********************************************************************
616 * ISA DMA utility functions
617 **********************************************************************/
618
619 int
620 _isa_dma_alloc_bouncebuf(t, map, size, flags)
621 bus_dma_tag_t t;
622 bus_dmamap_t map;
623 bus_size_t size;
624 int flags;
625 {
626 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
627 int error = 0;
628
629 cookie->id_bouncebuflen = round_page(size);
630 error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
631 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
632 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
633 if (error)
634 goto out;
635 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
636 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
637 (void **)&cookie->id_bouncebuf, flags);
638
639 out:
640 if (error) {
641 _bus_dmamem_free(t, cookie->id_bouncesegs,
642 cookie->id_nbouncesegs);
643 cookie->id_bouncebuflen = 0;
644 cookie->id_nbouncesegs = 0;
645 } else {
646 cookie->id_flags |= ID_HAS_BOUNCE;
647 STAT_INCR(isa_dma_stats_nbouncebufs);
648 }
649
650 return (error);
651 }
652
653 void
654 _isa_dma_free_bouncebuf(t, map)
655 bus_dma_tag_t t;
656 bus_dmamap_t map;
657 {
658 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
659
660 STAT_DECR(isa_dma_stats_nbouncebufs);
661
662 _bus_dmamem_unmap(t, cookie->id_bouncebuf,
663 cookie->id_bouncebuflen);
664 _bus_dmamem_free(t, cookie->id_bouncesegs,
665 cookie->id_nbouncesegs);
666 cookie->id_bouncebuflen = 0;
667 cookie->id_nbouncesegs = 0;
668 cookie->id_flags &= ~ID_HAS_BOUNCE;
669 }
670