isa_dma.c revision 1.6.42.1 1 /* $NetBSD: isa_dma.c,v 1.6.42.1 2008/05/18 12:31:43 yamt Exp $ */
2
3 #define ISA_DMA_STATS
4
5 /*-
6 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
11 * Simulation Facility, NASA Ames Research Center.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isa_dma.c,v 1.6.42.1 2008/05/18 12:31:43 yamt Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/syslog.h>
42 #include <sys/device.h>
43 #include <sys/malloc.h>
44 #include <sys/proc.h>
45 #include <sys/mbuf.h>
46
47 #define _ATARI_BUS_DMA_PRIVATE
48 #include <machine/bus.h>
49
50 #include <dev/isa/isareg.h>
51 #include <dev/isa/isavar.h>
52
53 #include <uvm/uvm_extern.h>
54
55 extern paddr_t avail_end;
56
57 /*
58 * Cookie used by ISA dma. A pointer to one of these it stashed in
59 * the DMA map.
60 */
61 struct atari_isa_dma_cookie {
62 int id_flags; /* flags; see below */
63
64 /*
65 * Information about the original buffer used during
66 * DMA map syncs. Note that origibuflen is only used
67 * for ID_BUFTYPE_LINEAR.
68 */
69 void *id_origbuf; /* pointer to orig buffer if
70 bouncing */
71 bus_size_t id_origbuflen; /* ...and size */
72 int id_buftype; /* type of buffer */
73
74 void *id_bouncebuf; /* pointer to the bounce buffer */
75 bus_size_t id_bouncebuflen; /* ...and size */
76 int id_nbouncesegs; /* number of valid bounce segs */
77 bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer
78 physical memory segments */
79 };
80
81 /* id_flags */
82 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
83 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
84 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
85
86 /* id_buftype */
87 #define ID_BUFTYPE_INVALID 0
88 #define ID_BUFTYPE_LINEAR 1
89 #define ID_BUFTYPE_MBUF 2
90 #define ID_BUFTYPE_UIO 3
91 #define ID_BUFTYPE_RAW 4
92
93 int _isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
94 bus_size_t, bus_size_t, int, bus_dmamap_t *));
95 void _isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
96 int _isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
97 bus_size_t, struct proc *, int));
98 int _isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
99 struct mbuf *, int));
100 int _isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
101 struct uio *, int));
102 int _isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
103 bus_dma_segment_t *, int, bus_size_t, int));
104 void _isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
105 void _isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
106 bus_addr_t, bus_size_t, int));
107
108 int _isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t,
109 bus_size_t, bus_dma_segment_t *, int, int *, int));
110
111 int _isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t,
112 bus_size_t, int));
113 void _isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t));
114
115 /*
116 * Entry points for ISA DMA. These are mostly wrappers around
117 * the generic functions that understand how to deal with bounce
118 * buffers, if necessary.
119 */
120 struct atari_bus_dma_tag isa_bus_dma_tag = {
121 ISA_DMA_BOUNCE_THRESHOLD,
122 0,
123 _isa_bus_dmamap_create,
124 _isa_bus_dmamap_destroy,
125 _isa_bus_dmamap_load,
126 _isa_bus_dmamap_load_mbuf,
127 _isa_bus_dmamap_load_uio,
128 _isa_bus_dmamap_load_raw,
129 _isa_bus_dmamap_unload,
130 _isa_bus_dmamap_sync,
131 };
132
133 /**********************************************************************
134 * bus.h dma interface entry points
135 **********************************************************************/
136
137 #ifdef ISA_DMA_STATS
138 #define STAT_INCR(v) (v)++
139 #define STAT_DECR(v) do { \
140 if ((v) == 0) \
141 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
142 else \
143 (v)--; \
144 } while (0)
145 u_long isa_dma_stats_loads;
146 u_long isa_dma_stats_bounces;
147 u_long isa_dma_stats_nbouncebufs;
148 #else
149 #define STAT_INCR(v)
150 #define STAT_DECR(v)
151 #endif
152
153 /*
154 * Create an ISA DMA map.
155 */
156 int
157 _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
158 bus_dma_tag_t t;
159 bus_size_t size;
160 int nsegments;
161 bus_size_t maxsegsz;
162 bus_size_t boundary;
163 int flags;
164 bus_dmamap_t *dmamp;
165 {
166 struct atari_isa_dma_cookie *cookie;
167 bus_dmamap_t map;
168 int error, cookieflags;
169 void *cookiestore;
170 size_t cookiesize;
171
172 /* Call common function to create the basic map. */
173 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
174 flags, dmamp);
175 if (error)
176 return (error);
177
178 map = *dmamp;
179 map->_dm_cookie = NULL;
180
181 cookiesize = sizeof(struct atari_isa_dma_cookie);
182
183 /*
184 * ISA only has 24-bits of address space. This means
185 * we can't DMA to pages over 16M. In order to DMA to
186 * arbitrary buffers, we use "bounce buffers" - pages
187 * in memory below the 16M boundary. On DMA reads,
188 * DMA happens to the bounce buffers, and is copied into
189 * the caller's buffer. On writes, data is copied into
190 * but bounce buffer, and the DMA happens from those
191 * pages. To software using the DMA mapping interface,
192 * this looks simply like a data cache.
193 *
194 * If we have more than 16M of RAM in the system, we may
195 * need bounce buffers. We check and remember that here.
196 *
197 * There are exceptions, however. VLB devices can do
198 * 32-bit DMA, and indicate that here.
199 *
200 * ...or, there is an opposite case. The most segments
201 * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
202 * the caller can't handle that many segments (e.g. the
203 * ISA DMA controller), we may have to bounce it as well.
204 */
205 if (avail_end <= t->_bounce_thresh ||
206 (flags & ISABUS_DMA_32BIT) != 0) {
207 /* Bouncing not necessary due to memory size. */
208 map->_dm_bounce_thresh = 0;
209 }
210 cookieflags = 0;
211 if (map->_dm_bounce_thresh != 0 ||
212 ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
213 cookieflags |= ID_MIGHT_NEED_BOUNCE;
214 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
215 }
216
217 /*
218 * Allocate our cookie.
219 */
220 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
221 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
222 error = ENOMEM;
223 goto out;
224 }
225 memset(cookiestore, 0, cookiesize);
226 cookie = (struct atari_isa_dma_cookie *)cookiestore;
227 cookie->id_flags = cookieflags;
228 map->_dm_cookie = cookie;
229
230 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
231 /*
232 * Allocate the bounce pages now if the caller
233 * wishes us to do so.
234 */
235 if ((flags & BUS_DMA_ALLOCNOW) == 0)
236 goto out;
237
238 error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
239 }
240
241 out:
242 if (error) {
243 if (map->_dm_cookie != NULL)
244 free(map->_dm_cookie, M_DMAMAP);
245 _bus_dmamap_destroy(t, map);
246 }
247 return (error);
248 }
249
250 /*
251 * Destroy an ISA DMA map.
252 */
253 void
254 _isa_bus_dmamap_destroy(t, map)
255 bus_dma_tag_t t;
256 bus_dmamap_t map;
257 {
258 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
259
260 /*
261 * Free any bounce pages this map might hold.
262 */
263 if (cookie->id_flags & ID_HAS_BOUNCE)
264 _isa_dma_free_bouncebuf(t, map);
265
266 free(cookie, M_DMAMAP);
267 _bus_dmamap_destroy(t, map);
268 }
269
270 /*
271 * Load an ISA DMA map with a linear buffer.
272 */
273 int
274 _isa_bus_dmamap_load(t, map, buf, buflen, p, flags)
275 bus_dma_tag_t t;
276 bus_dmamap_t map;
277 void *buf;
278 bus_size_t buflen;
279 struct proc *p;
280 int flags;
281 {
282 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
283 int error;
284
285 STAT_INCR(isa_dma_stats_loads);
286
287 /*
288 * Make sure that on error condition we return "no valid mappings."
289 */
290 map->dm_mapsize = 0;
291 map->dm_nsegs = 0;
292
293 /*
294 * Try to load the map the normal way. If this errors out,
295 * and we can bounce, we will.
296 */
297 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
298 if (error == 0 ||
299 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
300 return (error);
301
302 /*
303 * First attempt failed; bounce it.
304 */
305
306 STAT_INCR(isa_dma_stats_bounces);
307
308 /*
309 * Allocate bounce pages, if necessary.
310 */
311 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
312 error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags);
313 if (error)
314 return (error);
315 }
316
317 /*
318 * Cache a pointer to the caller's buffer and load the DMA map
319 * with the bounce buffer.
320 */
321 cookie->id_origbuf = buf;
322 cookie->id_origbuflen = buflen;
323 cookie->id_buftype = ID_BUFTYPE_LINEAR;
324 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
325 p, flags);
326 if (error) {
327 /*
328 * Free the bounce pages, unless our resources
329 * are reserved for our exclusive use.
330 */
331 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
332 _isa_dma_free_bouncebuf(t, map);
333 return (error);
334 }
335
336 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
337 cookie->id_flags |= ID_IS_BOUNCING;
338 return (0);
339 }
340
341 /*
342 * Like _isa_bus_dmamap_load(), but for mbufs.
343 */
344 int
345 _isa_bus_dmamap_load_mbuf(t, map, m0, flags)
346 bus_dma_tag_t t;
347 bus_dmamap_t map;
348 struct mbuf *m0;
349 int flags;
350 {
351 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
352 int error;
353
354 /*
355 * Make sure on error condition we return "no valid mappings."
356 */
357 map->dm_mapsize = 0;
358 map->dm_nsegs = 0;
359
360 #ifdef DIAGNOSTIC
361 if ((m0->m_flags & M_PKTHDR) == 0)
362 panic("_isa_bus_dmamap_load_mbuf: no packet header");
363 #endif
364
365 if (m0->m_pkthdr.len > map->_dm_size)
366 return (EINVAL);
367
368 /*
369 * Try to load the map the normal way. If this errors out,
370 * and we can bounce, we will.
371 */
372 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
373 if (error == 0 ||
374 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
375 return (error);
376
377 /*
378 * First attempt failed; bounce it.
379 */
380
381 STAT_INCR(isa_dma_stats_bounces);
382
383 /*
384 * Allocate bounce pages, if necessary.
385 */
386 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
387 error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
388 flags);
389 if (error)
390 return (error);
391 }
392
393 /*
394 * Cache a pointer to the caller's buffer and load the DMA map
395 * with the bounce buffer.
396 */
397 cookie->id_origbuf = m0;
398 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
399 cookie->id_buftype = ID_BUFTYPE_MBUF;
400 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
401 m0->m_pkthdr.len, NULL, flags);
402 if (error) {
403 /*
404 * Free the bounce pages, unless our resources
405 * are reserved for our exclusive use.
406 */
407 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
408 _isa_dma_free_bouncebuf(t, map);
409 return (error);
410 }
411
412 /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
413 cookie->id_flags |= ID_IS_BOUNCING;
414 return (0);
415 }
416
417 /*
418 * Like _isa_bus_dmamap_load(), but for uios.
419 */
420 int
421 _isa_bus_dmamap_load_uio(t, map, uio, flags)
422 bus_dma_tag_t t;
423 bus_dmamap_t map;
424 struct uio *uio;
425 int flags;
426 {
427
428 panic("_isa_bus_dmamap_load_uio: not implemented");
429 }
430
431 /*
432 * Like _isa_bus_dmamap_load(), but for raw memory allocated with
433 * bus_dmamem_alloc().
434 */
435 int
436 _isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
437 bus_dma_tag_t t;
438 bus_dmamap_t map;
439 bus_dma_segment_t *segs;
440 int nsegs;
441 bus_size_t size;
442 int flags;
443 {
444
445 panic("_isa_bus_dmamap_load_raw: not implemented");
446 }
447
448 /*
449 * Unload an ISA DMA map.
450 */
451 void
452 _isa_bus_dmamap_unload(t, map)
453 bus_dma_tag_t t;
454 bus_dmamap_t map;
455 {
456 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
457
458 /*
459 * If we have bounce pages, free them, unless they're
460 * reserved for our exclusive use.
461 */
462 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
463 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
464 _isa_dma_free_bouncebuf(t, map);
465
466 cookie->id_flags &= ~ID_IS_BOUNCING;
467 cookie->id_buftype = ID_BUFTYPE_INVALID;
468
469 /*
470 * Do the generic bits of the unload.
471 */
472 _bus_dmamap_unload(t, map);
473 }
474
475 /*
476 * Synchronize an ISA DMA map.
477 */
478 void
479 _isa_bus_dmamap_sync(t, map, offset, len, ops)
480 bus_dma_tag_t t;
481 bus_dmamap_t map;
482 bus_addr_t offset;
483 bus_size_t len;
484 int ops;
485 {
486 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
487
488 /*
489 * Mixing PRE and POST operations is not allowed.
490 */
491 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
492 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
493 panic("_isa_bus_dmamap_sync: mix PRE and POST");
494
495 #ifdef DIAGNOSTIC
496 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
497 if (offset >= map->dm_mapsize)
498 panic("_isa_bus_dmamap_sync: bad offset");
499 if (len == 0 || (offset + len) > map->dm_mapsize)
500 panic("_isa_bus_dmamap_sync: bad length");
501 }
502 #endif
503
504 /*
505 * If we're not bouncing, just return; nothing to do.
506 */
507 if ((cookie->id_flags & ID_IS_BOUNCING) == 0)
508 return;
509
510 switch (cookie->id_buftype) {
511 case ID_BUFTYPE_LINEAR:
512 /*
513 * Nothing to do for pre-read.
514 */
515
516 if (ops & BUS_DMASYNC_PREWRITE) {
517 /*
518 * Copy the caller's buffer to the bounce buffer.
519 */
520 memcpy((char *)cookie->id_bouncebuf + offset,
521 (char *)cookie->id_origbuf + offset, len);
522 }
523
524 if (ops & BUS_DMASYNC_POSTREAD) {
525 /*
526 * Copy the bounce buffer to the caller's buffer.
527 */
528 memcpy((char *)cookie->id_origbuf + offset,
529 (char *)cookie->id_bouncebuf + offset, len);
530 }
531
532 /*
533 * Nothing to do for post-write.
534 */
535 break;
536
537 case ID_BUFTYPE_MBUF:
538 {
539 struct mbuf *m, *m0 = cookie->id_origbuf;
540 bus_size_t minlen, moff;
541
542 /*
543 * Nothing to do for pre-read.
544 */
545
546 if (ops & BUS_DMASYNC_PREWRITE) {
547 /*
548 * Copy the caller's buffer to the bounce buffer.
549 */
550 m_copydata(m0, offset, len,
551 (char *)cookie->id_bouncebuf + offset);
552 }
553
554 if (ops & BUS_DMASYNC_POSTREAD) {
555 /*
556 * Copy the bounce buffer to the caller's buffer.
557 */
558 for (moff = offset, m = m0; m != NULL && len != 0;
559 m = m->m_next) {
560 /* Find the beginning mbuf. */
561 if (moff >= m->m_len) {
562 moff -= m->m_len;
563 continue;
564 }
565
566 /*
567 * Now at the first mbuf to sync; nail
568 * each one until we have exhausted the
569 * length.
570 */
571 minlen = len < m->m_len - moff ?
572 len : m->m_len - moff;
573
574 memcpy(mtod(m, char *) + moff,
575 (char *)cookie->id_bouncebuf + offset,
576 minlen);
577
578 moff = 0;
579 len -= minlen;
580 offset += minlen;
581 }
582 }
583
584 /*
585 * Nothing to do for post-write.
586 */
587 break;
588 }
589
590 case ID_BUFTYPE_UIO:
591 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO");
592 break;
593
594 case ID_BUFTYPE_RAW:
595 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW");
596 break;
597
598 case ID_BUFTYPE_INVALID:
599 panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID");
600 break;
601
602 default:
603 printf("unknown buffer type %d\n", cookie->id_buftype);
604 panic("_isa_bus_dmamap_sync");
605 }
606 }
607
608 /*
609 * Allocate memory safe for ISA DMA.
610 */
611 int
612 _isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
613 bus_dma_tag_t t;
614 bus_size_t size, alignment, boundary;
615 bus_dma_segment_t *segs;
616 int nsegs;
617 int *rsegs;
618 int flags;
619 {
620 paddr_t high;
621
622 if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
623 high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD);
624 else
625 high = trunc_page(avail_end);
626
627 return (bus_dmamem_alloc_range(t, size, alignment, boundary,
628 segs, nsegs, rsegs, flags, 0, high));
629 }
630
631 /**********************************************************************
632 * ISA DMA utility functions
633 **********************************************************************/
634
635 int
636 _isa_dma_alloc_bouncebuf(t, map, size, flags)
637 bus_dma_tag_t t;
638 bus_dmamap_t map;
639 bus_size_t size;
640 int flags;
641 {
642 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
643 int error = 0;
644
645 cookie->id_bouncebuflen = round_page(size);
646 error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
647 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
648 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
649 if (error)
650 goto out;
651 error = bus_dmamem_map(t, cookie->id_bouncesegs,
652 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
653 (void **)&cookie->id_bouncebuf, flags);
654
655 out:
656 if (error) {
657 bus_dmamem_free(t, cookie->id_bouncesegs,
658 cookie->id_nbouncesegs);
659 cookie->id_bouncebuflen = 0;
660 cookie->id_nbouncesegs = 0;
661 } else {
662 cookie->id_flags |= ID_HAS_BOUNCE;
663 STAT_INCR(isa_dma_stats_nbouncebufs);
664 }
665
666 return (error);
667 }
668
669 void
670 _isa_dma_free_bouncebuf(t, map)
671 bus_dma_tag_t t;
672 bus_dmamap_t map;
673 {
674 struct atari_isa_dma_cookie *cookie = map->_dm_cookie;
675
676 STAT_DECR(isa_dma_stats_nbouncebufs);
677
678 bus_dmamem_unmap(t, cookie->id_bouncebuf,
679 cookie->id_bouncebuflen);
680 bus_dmamem_free(t, cookie->id_bouncesegs,
681 cookie->id_nbouncesegs);
682 cookie->id_bouncebuflen = 0;
683 cookie->id_nbouncesegs = 0;
684 cookie->id_flags &= ~ID_HAS_BOUNCE;
685 }
686