isadma_machdep.c revision 1.14 1 /* $NetBSD: isadma_machdep.c,v 1.14 2011/07/01 19:32:28 dyoung Exp $ */
2
3 #define ISA_DMA_STATS
4
5 /*-
6 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
11 * NASA Ames Research Center.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isadma_machdep.c,v 1.14 2011/07/01 19:32:28 dyoung Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/syslog.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44 #include <sys/mbuf.h>
45
46 #define _ARM32_BUS_DMA_PRIVATE
47 #include <sys/bus.h>
48
49 #include <dev/isa/isareg.h>
50 #include <dev/isa/isavar.h>
51
52 #include <uvm/uvm_extern.h>
53
54 /*
55 * ISA has a 24-bit address limitation, so at most it has a 16M
56 * DMA range. However, some platforms have a more limited range,
57 * e.g. the Shark NC. On these systems, we are provided with
58 * a set of DMA ranges. The pmap module is aware of these ranges
59 * and places DMA-safe memory for them onto an alternate free list
60 * so that they are protected from being used to service page faults,
61 * etc. (unless we've run out of memory elsewhere).
62 */
63 #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024)
64
65 struct arm32_dma_range *footbridge_isa_dma_ranges;
66 int footbridge_isa_dma_nranges;
67
68 int _isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
69 bus_size_t, bus_size_t, int, bus_dmamap_t *);
70 void _isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
71 int _isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
72 bus_size_t, struct proc *, int);
73 int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
74 struct mbuf *, int);
75 int _isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
76 struct uio *, int);
77 int _isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
78 bus_dma_segment_t *, int, bus_size_t, int);
79 void _isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
80 void _isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
81 bus_addr_t, bus_size_t, int);
82
83 int _isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
84 bus_size_t, bus_dma_segment_t *, int, int *, int);
85
86 int _isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
87 bus_size_t, int);
88 void _isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
89
90 /*
91 * Entry points for ISA DMA. These are mostly wrappers around
92 * the generic functions that understand how to deal with bounce
93 * buffers, if necessary.
94 */
95 struct arm32_bus_dma_tag isa_bus_dma_tag = {
96 0, /* _ranges */
97 0, /* _nranges */
98 NULL,
99 _isa_bus_dmamap_create,
100 _isa_bus_dmamap_destroy,
101 _isa_bus_dmamap_load,
102 _isa_bus_dmamap_load_mbuf,
103 _isa_bus_dmamap_load_uio,
104 _isa_bus_dmamap_load_raw,
105 _isa_bus_dmamap_unload,
106 _isa_bus_dmamap_sync, /* pre */
107 _isa_bus_dmamap_sync, /* post */
108 _isa_bus_dmamem_alloc,
109 _bus_dmamem_free,
110 _bus_dmamem_map,
111 _bus_dmamem_unmap,
112 _bus_dmamem_mmap,
113 };
114
115 /*
116 * Initialize ISA DMA.
117 */
118 void
119 isa_dma_init(void)
120 {
121
122 isa_bus_dma_tag._ranges = footbridge_isa_dma_ranges;
123 isa_bus_dma_tag._nranges = footbridge_isa_dma_nranges;
124 }
125
126 /**********************************************************************
127 * bus.h dma interface entry points
128 **********************************************************************/
129
130 #ifdef ISA_DMA_STATS
131 #define STAT_INCR(v) (v)++
132 #define STAT_DECR(v) do { \
133 if ((v) == 0) \
134 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
135 else \
136 (v)--; \
137 } while (0)
138 u_long isa_dma_stats_loads;
139 u_long isa_dma_stats_bounces;
140 u_long isa_dma_stats_nbouncebufs;
141 #else
142 #define STAT_INCR(v)
143 #define STAT_DECR(v)
144 #endif
145
146 /*
147 * Create an ISA DMA map.
148 */
149 int
150 _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
151 {
152 struct arm32_isa_dma_cookie *cookie;
153 bus_dmamap_t map;
154 int error, cookieflags;
155 void *cookiestore;
156 size_t cookiesize;
157
158 /* Call common function to create the basic map. */
159 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
160 flags, dmamp);
161 if (error)
162 return (error);
163
164 map = *dmamp;
165 map->_dm_cookie = NULL;
166
167 cookiesize = sizeof(struct arm32_isa_dma_cookie);
168
169 /*
170 * ISA only has 24-bits of address space. This means
171 * we can't DMA to pages over 16M. In order to DMA to
172 * arbitrary buffers, we use "bounce buffers" - pages
173 * in memory below the 16M boundary. On DMA reads,
174 * DMA happens to the bounce buffers, and is copied into
175 * the caller's buffer. On writes, data is copied into
176 * but bounce buffer, and the DMA happens from those
177 * pages. To software using the DMA mapping interface,
178 * this looks simply like a data cache.
179 *
180 * If we have more than 16M of RAM in the system, we may
181 * need bounce buffers. We check and remember that here.
182 *
183 * There are exceptions, however. VLB devices can do
184 * 32-bit DMA, and indicate that here.
185 *
186 * ...or, there is an opposite case. The most segments
187 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
188 * the caller can't handle that many segments (e.g. the
189 * ISA DMA controller), we may have to bounce it as well.
190 *
191 * Well, not really... see note above regarding DMA ranges.
192 * Because of the range issue on this platform, we just
193 * always "might bounce".
194 */
195 cookieflags = ID_MIGHT_NEED_BOUNCE;
196 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
197
198 /*
199 * Allocate our cookie.
200 */
201 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
202 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
203 error = ENOMEM;
204 goto out;
205 }
206 memset(cookiestore, 0, cookiesize);
207 cookie = (struct arm32_isa_dma_cookie *)cookiestore;
208 cookie->id_flags = cookieflags;
209 map->_dm_cookie = cookie;
210
211 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
212 /*
213 * Allocate the bounce pages now if the caller
214 * wishes us to do so.
215 */
216 if ((flags & BUS_DMA_ALLOCNOW) == 0)
217 goto out;
218
219 error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
220 }
221
222 out:
223 if (error) {
224 if (map->_dm_cookie != NULL)
225 free(map->_dm_cookie, M_DMAMAP);
226 _bus_dmamap_destroy(t, map);
227 }
228 return (error);
229 }
230
231 /*
232 * Destroy an ISA DMA map.
233 */
234 void
235 _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
236 {
237 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
238
239 /*
240 * Free any bounce pages this map might hold.
241 */
242 if (cookie->id_flags & ID_HAS_BOUNCE)
243 _isa_dma_free_bouncebuf(t, map);
244
245 free(cookie, M_DMAMAP);
246 _bus_dmamap_destroy(t, map);
247 }
248
249 /*
250 * Load an ISA DMA map with a linear buffer.
251 */
252 int
253 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
254 bus_dma_tag_t t;
255 bus_dmamap_t map;
256 void *buf;
257 bus_size_t buflen;
258 struct proc *p;
259 int flags;
260 {
261 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
262 int error;
263
264 STAT_INCR(isa_dma_stats_loads);
265
266 /*
267 * Make sure that on error condition we return "no valid mappings."
268 */
269 map->dm_mapsize = 0;
270 map->dm_nsegs = 0;
271
272 /*
273 * Try to load the map the normal way. If this errors out,
274 * and we can bounce, we will.
275 */
276 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
277 if (error == 0 ||
278 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
279 return (error);
280
281 /*
282 * First attempt failed; bounce it.
283 */
284
285 STAT_INCR(isa_dma_stats_bounces);
286
287 /*
288 * Allocate bounce pages, if necessary.
289 */
290 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
291 error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
292 if (error)
293 return (error);
294 }
295
296 /*
297 * Cache a pointer to the caller's buffer and load the DMA map
298 * with the bounce buffer.
299 */
300 cookie->id_origbuf = buf;
301 cookie->id_origbuflen = buflen;
302 cookie->id_buftype = ID_BUFTYPE_LINEAR;
303 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
304 NULL, flags);
305 if (error) {
306 /*
307 * Free the bounce pages, unless our resources
308 * are reserved for our exclusive use.
309 */
310 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
311 _isa_dma_free_bouncebuf(t, map);
312 return (error);
313 }
314
315 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
316 cookie->id_flags |= ID_IS_BOUNCING;
317 return (0);
318 }
319
320 /*
321 * Like _isa_bus_dmamap_load(), but for mbufs.
322 */
323 int
324 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
325 bus_dma_tag_t t;
326 bus_dmamap_t map;
327 struct mbuf *m0;
328 int flags;
329 {
330 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
331 int error;
332
333 /*
334 * Make sure that on error condition we return "no valid mappings."
335 */
336 map->dm_mapsize = 0;
337 map->dm_nsegs = 0;
338
339 #ifdef DIAGNOSTIC
340 if ((m0->m_flags & M_PKTHDR) == 0)
341 panic("_isa_bus_dmamap_load_mbuf: no packet header");
342 #endif
343
344 if (m0->m_pkthdr.len > map->_dm_size)
345 return (EINVAL);
346
347 /*
348 * Try to load the map the normal way. If this errors out,
349 * and we can bounce, we will.
350 */
351 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
352 if (error == 0 ||
353 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
354 return (error);
355
356 /*
357 * First attempt failed; bounce it.
358 */
359
360 STAT_INCR(isa_dma_stats_bounces);
361
362 /*
363 * Allocate bounce pages, if necessary.
364 */
365 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
366 error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
367 flags);
368 if (error)
369 return (error);
370 }
371
372 /*
373 * Cache a pointer to the caller's buffer and load the DMA map
374 * with the bounce buffer.
375 */
376 cookie->id_origbuf = m0;
377 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
378 cookie->id_buftype = ID_BUFTYPE_MBUF;
379 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
380 m0->m_pkthdr.len, NULL, flags);
381 if (error) {
382 /*
383 * Free the bounce pages, unless our resources
384 * are reserved for our exclusive use.
385 */
386 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
387 _isa_dma_free_bouncebuf(t, map);
388 return (error);
389 }
390
391 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
392 cookie->id_flags |= ID_IS_BOUNCING;
393 return (0);
394 }
395
396 /*
397 * Like _isa_bus_dmamap_load(), but for uios.
398 */
399 int
400 _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
401 {
402
403 panic("_isa_bus_dmamap_load_uio: not implemented");
404 }
405
406 /*
407 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
408 * bus_dmamem_alloc().
409 */
410 int
411 _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
412 {
413
414 panic("_isa_bus_dmamap_load_raw: not implemented");
415 }
416
417 /*
418 * Unload an ISA DMA map.
419 */
420 void
421 _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
422 {
423 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
424
425 /*
426 * If we have bounce pages, free them, unless they're
427 * reserved for our exclusive use.
428 */
429 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
430 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
431 _isa_dma_free_bouncebuf(t, map);
432
433 cookie->id_flags &= ~ID_IS_BOUNCING;
434 cookie->id_buftype = ID_BUFTYPE_INVALID;
435
436 /*
437 * Do the generic bits of the unload.
438 */
439 _bus_dmamap_unload(t, map);
440 }
441
442 /*
443 * Synchronize an ISA DMA map.
444 */
445 void
446 _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops)
447 {
448 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
449
450 /*
451 * Mixing PRE and POST operations is not allowed.
452 */
453 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
454 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
455 panic("_isa_bus_dmamap_sync: mix PRE and POST");
456
457 #ifdef DIAGNOSTIC
458 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
459 if (offset >= map->dm_mapsize)
460 panic("_isa_bus_dmamap_sync: bad offset");
461 if (len == 0 || (offset + len) > map->dm_mapsize)
462 panic("_isa_bus_dmamap_sync: bad length");
463 }
464 #endif
465
466 /*
467 * If we're not bouncing, just return; nothing to do.
468 */
469 if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
470 return;
471
472 switch (cookie->id_buftype) {
473 case ID_BUFTYPE_LINEAR:
474 /*
475 * Nothing to do for pre-read.
476 */
477
478 if (ops & BUS_DMASYNC_PREWRITE) {
479 /*
480 * Copy the caller's buffer to the bounce buffer.
481 */
482 memcpy((char *)cookie->id_bouncebuf + offset,
483 (char *)cookie->id_origbuf + offset, len);
484 }
485
486 if (ops & BUS_DMASYNC_POSTREAD) {
487 /*
488 * Copy the bounce buffer to the caller's buffer.
489 */
490 memcpy((char *)cookie->id_origbuf + offset,
491 (char *)cookie->id_bouncebuf + offset, len);
492 }
493
494 /*
495 * Nothing to do for post-write.
496 */
497 break;
498
499 case ID_BUFTYPE_MBUF:
500 {
501 struct mbuf *m, *m0 = cookie->id_origbuf;
502 bus_size_t minlen, moff;
503
504 /*
505 * Nothing to do for pre-read.
506 */
507
508 if (ops & BUS_DMASYNC_PREWRITE) {
509 /*
510 * Copy the caller's buffer to the bounce buffer.
511 */
512 m_copydata(m0, offset, len,
513 (char *)cookie->id_bouncebuf + offset);
514 }
515
516 if (ops & BUS_DMASYNC_POSTREAD) {
517 /*
518 * Copy the bounce buffer to the caller's buffer.
519 */
520 for (moff = offset, m = m0; m != NULL && len != 0;
521 m = m->m_next) {
522 /* Find the beginning mbuf. */
523 if (moff >= m->m_len) {
524 moff -= m->m_len;
525 continue;
526 }
527
528 /*
529 * Now at the first mbuf to sync; nail
530 * each one until we have exhausted the
531 * length.
532 */
533 minlen = len < m->m_len - moff ?
534 len : m->m_len - moff;
535
536 memcpy(mtod(m, char *) + moff,
537 (char *)cookie->id_bouncebuf + offset,
538 minlen);
539
540 moff = 0;
541 len -= minlen;
542 offset += minlen;
543 }
544 }
545
546 /*
547 * Nothing to do for post-write.
548 */
549 break;
550 }
551
552 case ID_BUFTYPE_UIO:
553 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
554 break;
555
556 case ID_BUFTYPE_RAW:
557 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
558 break;
559
560 case ID_BUFTYPE_INVALID:
561 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
562 break;
563
564 default:
565 printf("unknown buffer type %d\n", cookie->id_buftype);
566 panic("_isa_bus_dmamap_sync");
567 }
568 }
569
570 /*
571 * Allocate memory safe for ISA DMA.
572 */
573 int
574 _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
575 {
576
577 if (t->_ranges == NULL)
578 return (ENOMEM);
579
580 /* _bus_dmamem_alloc() does the range checks for us. */
581 return (_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs,
582 rsegs, flags));
583 }
584
585 /**********************************************************************
586 * ISA DMA utility functions
587 **********************************************************************/
588
589 int
590 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size, int flags)
591 {
592 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
593 int error = 0;
594
595 cookie->id_bouncebuflen = round_page(size);
596 error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
597 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
598 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
599 if (error)
600 goto out;
601 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
602 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
603 (void **)&cookie->id_bouncebuf, flags);
604
605 out:
606 if (error) {
607 _bus_dmamem_free(t, cookie->id_bouncesegs,
608 cookie->id_nbouncesegs);
609 cookie->id_bouncebuflen = 0;
610 cookie->id_nbouncesegs = 0;
611 } else {
612 cookie->id_flags |= ID_HAS_BOUNCE;
613 STAT_INCR(isa_dma_stats_nbouncebufs);
614 }
615
616 return (error);
617 }
618
619 void
620 _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
621 {
622 struct arm32_isa_dma_cookie *cookie = map->_dm_cookie;
623
624 STAT_DECR(isa_dma_stats_nbouncebufs);
625
626 _bus_dmamem_unmap(t, cookie->id_bouncebuf,
627 cookie->id_bouncebuflen);
628 _bus_dmamem_free(t, cookie->id_bouncesegs,
629 cookie->id_nbouncesegs);
630 cookie->id_bouncebuflen = 0;
631 cookie->id_nbouncesegs = 0;
632 cookie->id_flags &= ~ID_HAS_BOUNCE;
633 }
634