isa_dma.c revision 1.4.26.1 1 /* $NetBSD: isa_dma.c,v 1.4.26.1 2007/03/12 05:47:22 rmind Exp $ */
2
3 #define ISA_DMA_STATS
4
5 /*-
6 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
11 * Simulation Facility, NASA Ames Research Center.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the NetBSD
24 * Foundation, Inc. and its contributors.
25 * 4. Neither the name of The NetBSD Foundation nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: isa_dma.c,v 1.4.26.1 2007/03/12 05:47:22 rmind Exp $");
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/syslog.h>
49 #include <sys/device.h>
50 #include <sys/malloc.h>
51 #include <sys/proc.h>
52 #include <sys/mbuf.h>
53
54 #define _ATARI_BUS_DMA_PRIVATE
55 #include <machine/bus.h>
56
57 #include <dev/isa/isareg.h>
58 #include <dev/isa/isavar.h>
59
60 #include <uvm/uvm_extern.h>
61
62 extern paddr_t avail_end;
63
64 /*
65 * Cookie used by ISA dma. A pointer to one of these it stashed in
66 * the DMA map.
67 */
68 struct atari_isa_dma_cookie {
69 int id_flags; /* flags; see below */
70
71 /*
72 * Information about the original buffer used during
73 * DMA map syncs. Note that origibuflen is only used
74 * for ID_BUFTYPE_LINEAR.
75 */
76 void *id_origbuf; /* pointer to orig buffer if
77 bouncing */
78 bus_size_t id_origbuflen; /* ...and size */
79 int id_buftype; /* type of buffer */
80
81 void *id_bouncebuf; /* pointer to the bounce buffer */
82 bus_size_t id_bouncebuflen; /* ...and size */
83 int id_nbouncesegs; /* number of valid bounce segs */
84 bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer
85 physical memory segments */
86 };
87
88 /* id_flags */
89 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
90 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
91 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
92
93 /* id_buftype */
94 #define ID_BUFTYPE_INVALID 0
95 #define ID_BUFTYPE_LINEAR 1
96 #define ID_BUFTYPE_MBUF 2
97 #define ID_BUFTYPE_UIO 3
98 #define ID_BUFTYPE_RAW 4
99
100 int _isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
101 bus_size_t, bus_size_t, int, bus_dmamap_t *));
102 void _isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
103 int _isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
104 bus_size_t, struct proc *, int));
105 int _isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
106 struct mbuf *, int));
107 int _isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
108 struct uio *, int));
109 int _isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
110 bus_dma_segment_t *, int, bus_size_t, int));
111 void _isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
112 void _isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
113 bus_addr_t, bus_size_t, int));
114
115 int _isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
116 bus_size_t, bus_dma_segment_t *, int, int *, int));
117
118 int _isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
119 bus_size_t, int));
120 void _isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
121
122 /*
123 * Entry points for ISA DMA. These are mostly wrappers around
124 * the generic functions that understand how to deal with bounce
125 * buffers, if necessary.
126 */
127 struct atari_bus_dma_tag isa_bus_dma_tag = {
128 ISA_DMA_BOUNCE_THRESHOLD,
129 0,
130 _isa_bus_dmamap_create,
131 _isa_bus_dmamap_destroy,
132 _isa_bus_dmamap_load,
133 _isa_bus_dmamap_load_mbuf,
134 _isa_bus_dmamap_load_uio,
135 _isa_bus_dmamap_load_raw,
136 _isa_bus_dmamap_unload,
137 _isa_bus_dmamap_sync,
138 };
139
140 /**********************************************************************
141 * bus.h dma interface entry points
142 **********************************************************************/
143
144 #ifdef ISA_DMA_STATS
145 #define STAT_INCR(v) (v)++
146 #define STAT_DECR(v) do { \
147 if ((v) == 0) \
148 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
149 else \
150 (v)--; \
151 } while (0)
152 u_long isa_dma_stats_loads;
153 u_long isa_dma_stats_bounces;
154 u_long isa_dma_stats_nbouncebufs;
155 #else
156 #define STAT_INCR(v)
157 #define STAT_DECR(v)
158 #endif
159
160 /*
161 * Create an ISA DMA map.
162 */
163 int
164 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
165 bus_dma_tag_t t;
166 bus_size_t size;
167 int nsegments;
168 bus_size_t maxsegsz;
169 bus_size_t boundary;
170 int flags;
171 bus_dmamap_t *dmamp;
172 {
173 struct atari_isa_dma_cookie *cookie;
174 bus_dmamap_t map;
175 int error, cookieflags;
176 void *cookiestore;
177 size_t cookiesize;
178
179 /* Call common function to create the basic map. */
180 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
181 flags, dmamp);
182 if (error)
183 return (error);
184
185 map = *dmamp;
186 map->_dm_cookie = NULL;
187
188 cookiesize = sizeof(struct atari_isa_dma_cookie);
189
190 /*
191 * ISA only has 24-bits of address space. This means
192 * we can't DMA to pages over 16M. In order to DMA to
193 * arbitrary buffers, we use "bounce buffers" - pages
194 * in memory below the 16M boundary. On DMA reads,
195 * DMA happens to the bounce buffers, and is copied into
196 * the caller's buffer. On writes, data is copied into
197 * but bounce buffer, and the DMA happens from those
198 * pages. To software using the DMA mapping interface,
199 * this looks simply like a data cache.
200 *
201 * If we have more than 16M of RAM in the system, we may
202 * need bounce buffers. We check and remember that here.
203 *
204 * There are exceptions, however. VLB devices can do
205 * 32-bit DMA, and indicate that here.
206 *
207 * ...or, there is an opposite case. The most segments
208 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
209 * the caller can't handle that many segments (e.g. the
210 * ISA DMA controller), we may have to bounce it as well.
211 */
212 if (avail_end <= t->_bounce_thresh ||
213 (flags & ISABUS_DMA_32BIT) != 0) {
214 /* Bouncing not necessary due to memory size. */
215 map->_dm_bounce_thresh = 0;
216 }
217 cookieflags = 0;
218 if (map->_dm_bounce_thresh != 0 ||
219 ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
220 cookieflags |= ID_MIGHT_NEED_BOUNCE;
221 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
222 }
223
224 /*
225 * Allocate our cookie.
226 */
227 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
228 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
229 error = ENOMEM;
230 goto out;
231 }
232 memset(cookiestore, 0, cookiesize);
233 cookie = (struct atari_isa_dma_cookie *)cookiestore;
234 cookie->id_flags = cookieflags;
235 map->_dm_cookie = cookie;
236
237 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
238 /*
239 * Allocate the bounce pages now if the caller
240 * wishes us to do so.
241 */
242 if ((flags & BUS_DMA_ALLOCNOW) == 0)
243 goto out;
244
245 error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
246 }
247
248 out:
249 if (error) {
250 if (map->_dm_cookie != NULL)
251 free(map->_dm_cookie, M_DMAMAP);
252 _bus_dmamap_destroy(t, map);
253 }
254 return (error);
255 }
256
257 /*
258 * Destroy an ISA DMA map.
259 */
260 void
261 _isa_bus_dmamap_destroy(t, map)
262 bus_dma_tag_t t;
263 bus_dmamap_t map;
264 {
265 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
266
267 /*
268 * Free any bounce pages this map might hold.
269 */
270 if (cookie->id_flags & ID_HAS_BOUNCE)
271 _isa_dma_free_bouncebuf(t, map);
272
273 free(cookie, M_DMAMAP);
274 _bus_dmamap_destroy(t, map);
275 }
276
277 /*
278 * Load an ISA DMA map with a linear buffer.
279 */
280 int
281 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
282 bus_dma_tag_t t;
283 bus_dmamap_t map;
284 void *buf;
285 bus_size_t buflen;
286 struct proc *p;
287 int flags;
288 {
289 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
290 int error;
291
292 STAT_INCR(isa_dma_stats_loads);
293
294 /*
295 * Make sure that on error condition we return "no valid mappings."
296 */
297 map->dm_mapsize = 0;
298 map->dm_nsegs = 0;
299
300 /*
301 * Try to load the map the normal way. If this errors out,
302 * and we can bounce, we will.
303 */
304 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
305 if (error == 0 ||
306 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
307 return (error);
308
309 /*
310 * First attempt failed; bounce it.
311 */
312
313 STAT_INCR(isa_dma_stats_bounces);
314
315 /*
316 * Allocate bounce pages, if necessary.
317 */
318 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
319 error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
320 if (error)
321 return (error);
322 }
323
324 /*
325 * Cache a pointer to the caller's buffer and load the DMA map
326 * with the bounce buffer.
327 */
328 cookie->id_origbuf = buf;
329 cookie->id_origbuflen = buflen;
330 cookie->id_buftype = ID_BUFTYPE_LINEAR;
331 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
332 p, flags);
333 if (error) {
334 /*
335 * Free the bounce pages, unless our resources
336 * are reserved for our exclusive use.
337 */
338 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
339 _isa_dma_free_bouncebuf(t, map);
340 return (error);
341 }
342
343 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
344 cookie->id_flags |= ID_IS_BOUNCING;
345 return (0);
346 }
347
348 /*
349 * Like _isa_bus_dmamap_load(), but for mbufs.
350 */
351 int
352 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
353 bus_dma_tag_t t;
354 bus_dmamap_t map;
355 struct mbuf *m0;
356 int flags;
357 {
358 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
359 int error;
360
361 /*
362 * Make sure on error condition we return "no valid mappings."
363 */
364 map->dm_mapsize = 0;
365 map->dm_nsegs = 0;
366
367 #ifdef DIAGNOSTIC
368 if ((m0->m_flags & M_PKTHDR) == 0)
369 panic("_isa_bus_dmamap_load_mbuf: no packet header");
370 #endif
371
372 if (m0->m_pkthdr.len > map->_dm_size)
373 return (EINVAL);
374
375 /*
376 * Try to load the map the normal way. If this errors out,
377 * and we can bounce, we will.
378 */
379 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
380 if (error == 0 ||
381 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
382 return (error);
383
384 /*
385 * First attempt failed; bounce it.
386 */
387
388 STAT_INCR(isa_dma_stats_bounces);
389
390 /*
391 * Allocate bounce pages, if necessary.
392 */
393 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
394 error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
395 flags);
396 if (error)
397 return (error);
398 }
399
400 /*
401 * Cache a pointer to the caller's buffer and load the DMA map
402 * with the bounce buffer.
403 */
404 cookie->id_origbuf = m0;
405 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
406 cookie->id_buftype = ID_BUFTYPE_MBUF;
407 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
408 m0->m_pkthdr.len, NULL, flags);
409 if (error) {
410 /*
411 * Free the bounce pages, unless our resources
412 * are reserved for our exclusive use.
413 */
414 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
415 _isa_dma_free_bouncebuf(t, map);
416 return (error);
417 }
418
419 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
420 cookie->id_flags |= ID_IS_BOUNCING;
421 return (0);
422 }
423
424 /*
425 * Like _isa_bus_dmamap_load(), but for uios.
426 */
427 int
428 _isa_bus_dmamap_load_uio(t, map, uio, flags)
429 bus_dma_tag_t t;
430 bus_dmamap_t map;
431 struct uio *uio;
432 int flags;
433 {
434
435 panic("_isa_bus_dmamap_load_uio: not implemented");
436 }
437
438 /*
439 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
440 * bus_dmamem_alloc().
441 */
442 int
443 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
444 bus_dma_tag_t t;
445 bus_dmamap_t map;
446 bus_dma_segment_t *segs;
447 int nsegs;
448 bus_size_t size;
449 int flags;
450 {
451
452 panic("_isa_bus_dmamap_load_raw: not implemented");
453 }
454
455 /*
456 * Unload an ISA DMA map.
457 */
458 void
459 _isa_bus_dmamap_unload(t, map)
460 bus_dma_tag_t t;
461 bus_dmamap_t map;
462 {
463 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
464
465 /*
466 * If we have bounce pages, free them, unless they're
467 * reserved for our exclusive use.
468 */
469 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
470 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
471 _isa_dma_free_bouncebuf(t, map);
472
473 cookie->id_flags &= ~ID_IS_BOUNCING;
474 cookie->id_buftype = ID_BUFTYPE_INVALID;
475
476 /*
477 * Do the generic bits of the unload.
478 */
479 _bus_dmamap_unload(t, map);
480 }
481
482 /*
483 * Synchronize an ISA DMA map.
484 */
485 void
486 _isa_bus_dmamap_sync(t, map, offset, len, ops)
487 bus_dma_tag_t t;
488 bus_dmamap_t map;
489 bus_addr_t offset;
490 bus_size_t len;
491 int ops;
492 {
493 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
494
495 /*
496 * Mixing PRE and POST operations is not allowed.
497 */
498 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
499 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
500 panic("_isa_bus_dmamap_sync: mix PRE and POST");
501
502 #ifdef DIAGNOSTIC
503 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
504 if (offset >= map->dm_mapsize)
505 panic("_isa_bus_dmamap_sync: bad offset");
506 if (len == 0 || (offset + len) > map->dm_mapsize)
507 panic("_isa_bus_dmamap_sync: bad length");
508 }
509 #endif
510
511 /*
512 * If we're not bouncing, just return; nothing to do.
513 */
514 if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
515 return;
516
517 switch (cookie->id_buftype) {
518 case ID_BUFTYPE_LINEAR:
519 /*
520 * Nothing to do for pre-read.
521 */
522
523 if (ops & BUS_DMASYNC_PREWRITE) {
524 /*
525 * Copy the caller's buffer to the bounce buffer.
526 */
527 memcpy((char *)cookie->id_bouncebuf + offset,
528 (char *)cookie->id_origbuf + offset, len);
529 }
530
531 if (ops & BUS_DMASYNC_POSTREAD) {
532 /*
533 * Copy the bounce buffer to the caller's buffer.
534 */
535 memcpy((char *)cookie->id_origbuf + offset,
536 (char *)cookie->id_bouncebuf + offset, len);
537 }
538
539 /*
540 * Nothing to do for post-write.
541 */
542 break;
543
544 case ID_BUFTYPE_MBUF:
545 {
546 struct mbuf *m, *m0 = cookie->id_origbuf;
547 bus_size_t minlen, moff;
548
549 /*
550 * Nothing to do for pre-read.
551 */
552
553 if (ops & BUS_DMASYNC_PREWRITE) {
554 /*
555 * Copy the caller's buffer to the bounce buffer.
556 */
557 m_copydata(m0, offset, len,
558 (char *)cookie->id_bouncebuf + offset);
559 }
560
561 if (ops & BUS_DMASYNC_POSTREAD) {
562 /*
563 * Copy the bounce buffer to the caller's buffer.
564 */
565 for (moff = offset, m = m0; m != NULL && len != 0;
566 m = m->m_next) {
567 /* Find the beginning mbuf. */
568 if (moff >= m->m_len) {
569 moff -= m->m_len;
570 continue;
571 }
572
573 /*
574 * Now at the first mbuf to sync; nail
575 * each one until we have exhausted the
576 * length.
577 */
578 minlen = len < m->m_len - moff ?
579 len : m->m_len - moff;
580
581 memcpy(mtod(m, char *) + moff,
582 (char *)cookie->id_bouncebuf + offset,
583 minlen);
584
585 moff = 0;
586 len -= minlen;
587 offset += minlen;
588 }
589 }
590
591 /*
592 * Nothing to do for post-write.
593 */
594 break;
595 }
596
597 case ID_BUFTYPE_UIO:
598 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
599 break;
600
601 case ID_BUFTYPE_RAW:
602 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
603 break;
604
605 case ID_BUFTYPE_INVALID:
606 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
607 break;
608
609 default:
610 printf("unknown buffer type %d\n", cookie->id_buftype);
611 panic("_isa_bus_dmamap_sync");
612 }
613 }
614
615 /*
616 * Allocate memory safe for ISA DMA.
617 */
618 int
619 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
620 bus_dma_tag_t t;
621 bus_size_t size, alignment, boundary;
622 bus_dma_segment_t *segs;
623 int nsegs;
624 int *rsegs;
625 int flags;
626 {
627 paddr_t high;
628
629 if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
630 high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD);
631 else
632 high = trunc_page(avail_end);
633
634 return (bus_dmamem_alloc_range(t, size, alignment, boundary,
635 segs, nsegs, rsegs, flags, 0, high));
636 }
637
638 /**********************************************************************
639 * ISA DMA utility functions
640 **********************************************************************/
641
642 int
643 _isa_dma_alloc_bouncebuf(t, map, size, flags)
644 bus_dma_tag_t t;
645 bus_dmamap_t map;
646 bus_size_t size;
647 int flags;
648 {
649 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
650 int error = 0;
651
652 cookie->id_bouncebuflen = round_page(size);
653 error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
654 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
655 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
656 if (error)
657 goto out;
658 error = bus_dmamem_map(t, cookie->id_bouncesegs,
659 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
660 (void **)&cookie->id_bouncebuf, flags);
661
662 out:
663 if (error) {
664 bus_dmamem_free(t, cookie->id_bouncesegs,
665 cookie->id_nbouncesegs);
666 cookie->id_bouncebuflen = 0;
667 cookie->id_nbouncesegs = 0;
668 } else {
669 cookie->id_flags |= ID_HAS_BOUNCE;
670 STAT_INCR(isa_dma_stats_nbouncebufs);
671 }
672
673 return (error);
674 }
675
676 void
677 _isa_dma_free_bouncebuf(t, map)
678 bus_dma_tag_t t;
679 bus_dmamap_t map;
680 {
681 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
682
683 STAT_DECR(isa_dma_stats_nbouncebufs);
684
685 bus_dmamem_unmap(t, cookie->id_bouncebuf,
686 cookie->id_bouncebuflen);
687 bus_dmamem_free(t, cookie->id_bouncesegs,
688 cookie->id_nbouncesegs);
689 cookie->id_bouncebuflen = 0;
690 cookie->id_nbouncesegs = 0;
691 cookie->id_flags &= ~ID_HAS_BOUNCE;
692 }
693