int_bus_dma.c revision 1.13 1 /* $NetBSD: int_bus_dma.c,v 1.13 2003/09/06 11:12:53 rearnsha Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * PCI DMA support for the ARM Integrator.
40 */
41
42 #define _ARM32_BUS_DMA_PRIVATE
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: int_bus_dma.c,v 1.13 2003/09/06 11:12:53 rearnsha Exp $");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/device.h>
50 #include <sys/malloc.h>
51 #include <sys/mbuf.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #include <machine/bootconfig.h>
56
57 #include <evbarm/integrator/int_bus_dma.h>
58
59 struct integrator_dma_cookie {
60 int id_flags; /* flags; see below */
61
62 /*
63 * Information about the original buffer used during
64 * DMA map syncs. Note that origbuflen is only used
65 * for ID_BUFTYPE_LINEAR.
66 */
67 void *id_origbuf; /* pointer to orig buffer if
68 bouncing */
69 bus_size_t id_origbuflen; /* ...and size */
70 int id_buftype; /* type of buffer */
71
72 void *id_bouncebuf; /* pointer to the bounce buffer */
73 bus_size_t id_bouncebuflen; /* ...and size */
74 int id_nbouncesegs; /* number of valid bounce segs */
75 bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer
76 physical memory segments */
77 };
78 /* id_flags */
79 #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
80 #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
81 #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
82
83 /* id_buftype */
84 #define ID_BUFTYPE_INVALID 0
85 #define ID_BUFTYPE_LINEAR 1
86 #define ID_BUFTYPE_MBUF 2
87 #define ID_BUFTYPE_UIO 3
88 #define ID_BUFTYPE_RAW 4
89
90 #define DEBUG(x)
91
92 static struct arm32_dma_range integrator_dma_ranges[DRAM_BLOCKS];
93
94 extern BootConfig bootconfig;
95
96 static int integrator_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
97 bus_size_t, bus_size_t, int, bus_dmamap_t *);
98 static void integrator_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
99 static int integrator_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
100 bus_size_t, struct proc *, int);
101 static int integrator_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
102 struct mbuf *, int);
103 static int integrator_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
104 struct uio *, int);
105 static int integrator_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
106 bus_dma_segment_t *, int, bus_size_t, int);
107 static void integrator_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
108 static void integrator_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
109 bus_addr_t, bus_size_t, int);
110 static int integrator_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t,
111 bus_size_t, bus_size_t, bus_dma_segment_t *, int, int *, int);
112 static int integrator_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
113 bus_size_t, int);
114 static void integrator_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
115
116
117 /*
118 * Create an Integrator DMA map.
119 */
120 static int
121 integrator_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
122 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
123 {
124 struct integrator_dma_cookie *cookie;
125 bus_dmamap_t map;
126 int error, cookieflags;
127 void *cookiestore;
128 size_t cookiesize;
129
130 DEBUG(printf("I_bus_dmamap_create(tag %x, size %x, nseg %d, max %x,"
131 " boundary %x, flags %x, dmamap %p)\n", (unsigned) t,
132 (unsigned) size, nsegments, (unsigned) maxsegsz,
133 (unsigned)boundary, flags, dmamp));
134
135 /* Call common function to create the basic map. */
136 error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
137 flags, dmamp);
138 if (error)
139 return (error);
140
141 map = *dmamp;
142 map->_dm_cookie = NULL;
143
144 cookiesize = sizeof(struct integrator_dma_cookie);
145
146 /*
147 * Some CM boards have private memory which is significantly
148 * faster than the normal memory stick. To support this
149 * memory we have to bounce any DMA transfers.
150 *
151 * In order to DMA to arbitrary buffers, we use "bounce
152 * buffers" - pages in in the main PCI visible memory. On DMA
153 * reads, DMA happens to the bounce buffers, and is copied
154 * into the caller's buffer. On writes, data is copied into
155 * but bounce buffer, and the DMA happens from those pages.
156 * To software using the DMA mapping interface, this looks
157 * simply like a data cache.
158 *
159 * If we have private RAM in the system, we may need bounce
160 * buffers. We check and remember that here.
161 */
162 #if 0
163 cookieflags = ID_MIGHT_NEED_BOUNCE;
164 #else
165 cookieflags = 0;
166 #endif
167 cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
168
169 /*
170 * Allocate our cookie.
171 */
172 if ((cookiestore = malloc(cookiesize, M_DMAMAP,
173 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
174 error = ENOMEM;
175 goto out;
176 }
177 memset(cookiestore, 0, cookiesize);
178 cookie = (struct integrator_dma_cookie *)cookiestore;
179 cookie->id_flags = cookieflags;
180 map->_dm_cookie = cookie;
181
182 if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
183 /*
184 * Allocate the bounce pages now if the caller
185 * wishes us to do so.
186 */
187 if ((flags & BUS_DMA_ALLOCNOW) == 0)
188 goto out;
189
190 DEBUG(printf("I_bus_dmamap_create bouncebuf alloc\n"));
191 error = integrator_dma_alloc_bouncebuf(t, map, size, flags);
192 }
193
194 out:
195 if (error) {
196 if (map->_dm_cookie != NULL)
197 free(map->_dm_cookie, M_DMAMAP);
198 _bus_dmamap_destroy(t, map);
199 printf("I_bus_dmamap_create failed (%d)\n", error);
200 }
201 return (error);
202 }
203
204 /*
205 * Destroy an ISA DMA map.
206 */
207 static void
208 integrator_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
209 {
210 struct integrator_dma_cookie *cookie = map->_dm_cookie;
211
212 DEBUG(printf("I_bus_dmamap_destroy (tag %x, map %x)\n", (unsigned) t,
213 (unsigned) map));
214 /*
215 * Free any bounce pages this map might hold.
216 */
217 if (cookie->id_flags & ID_HAS_BOUNCE) {
218 DEBUG(printf("I_bus_dmamap_destroy bouncebuf\n"));
219 integrator_dma_free_bouncebuf(t, map);
220 }
221
222 free(cookie, M_DMAMAP);
223 _bus_dmamap_destroy(t, map);
224 }
225
226 /*
227 * Load an Integrator DMA map with a linear buffer.
228 */
229 static int
230 integrator_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
231 bus_size_t buflen, struct proc *p, int flags)
232 {
233 struct integrator_dma_cookie *cookie = map->_dm_cookie;
234 int error;
235
236 DEBUG(printf("I_bus_dmamap_load (tag %x, map %x, buf %p, len %u,"
237 " proc %p, flags %d)\n", (unsigned) t, (unsigned) map, buf,
238 (unsigned) buflen, p, flags));
239 /*
240 * Make sure that on error condition we return "no valid mappings."
241 */
242 map->dm_mapsize = 0;
243 map->dm_nsegs = 0;
244
245 /*
246 * Try to load the map the normal way. If this errors out,
247 * and we can bounce, we will.
248 */
249 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
250 if (error == 0 ||
251 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
252 return (error);
253
254 /*
255 * First attempt failed; bounce it.
256 */
257
258 /*
259 * Allocate bounce pages, if necessary.
260 */
261 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
262 DEBUG(printf("I_bus_dmamap_load alloc bouncebuf\n"));
263 error = integrator_dma_alloc_bouncebuf(t, map, buflen, flags);
264 if (error)
265 return (error);
266 }
267
268 /*
269 * Cache a pointer to the caller's buffer and load the DMA map
270 * with the bounce buffer.
271 */
272 cookie->id_origbuf = buf;
273 cookie->id_origbuflen = buflen;
274 cookie->id_buftype = ID_BUFTYPE_LINEAR;
275 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
276 NULL, flags);
277 if (error) {
278 /*
279 * Free the bounce pages, unless our resources
280 * are reserved for our exclusive use.
281 */
282 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
283 integrator_dma_free_bouncebuf(t, map);
284 return (error);
285 }
286
287 /* ...so integrator_bus_dmamap_sync() knows we're bouncing */
288 cookie->id_flags |= ID_IS_BOUNCING;
289 return (0);
290 }
291
292 /*
293 * Like integrator_bus_dmamap_load(), but for mbufs.
294 */
295 static int
296 integrator_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
297 struct mbuf *m0, int flags)
298 {
299 struct integrator_dma_cookie *cookie = map->_dm_cookie;
300 int error;
301
302 /*
303 * Make sure that on error condition we return "no valid mappings."
304 */
305 map->dm_mapsize = 0;
306 map->dm_nsegs = 0;
307
308 #ifdef DIAGNOSTIC
309 if ((m0->m_flags & M_PKTHDR) == 0)
310 panic("integrator_bus_dmamap_load_mbuf: no packet header");
311 #endif
312
313 if (m0->m_pkthdr.len > map->_dm_size)
314 return (EINVAL);
315
316 /*
317 * Try to load the map the normal way. If this errors out,
318 * and we can bounce, we will.
319 */
320 error = _bus_dmamap_load_mbuf(t, map, m0, flags);
321 if (error == 0 ||
322 (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
323 return (error);
324
325 /*
326 * First attempt failed; bounce it.
327 *
328 * Allocate bounce pages, if necessary.
329 */
330 if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
331 error = integrator_dma_alloc_bouncebuf(t, map,
332 m0->m_pkthdr.len, flags);
333 if (error)
334 return (error);
335 }
336
337 /*
338 * Cache a pointer to the caller's buffer and load the DMA map
339 * with the bounce buffer.
340 */
341 cookie->id_origbuf = m0;
342 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
343 cookie->id_buftype = ID_BUFTYPE_MBUF;
344 error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
345 m0->m_pkthdr.len, NULL, flags);
346 if (error) {
347 /*
348 * Free the bounce pages, unless our resources
349 * are reserved for our exclusive use.
350 */
351 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
352 integrator_dma_free_bouncebuf(t, map);
353 return (error);
354 }
355
356 /* ...so integrator_bus_dmamap_sync() knows we're bouncing */
357 cookie->id_flags |= ID_IS_BOUNCING;
358 return (0);
359 }
360
361 /*
362 * Like integrator_bus_dmamap_load(), but for uios.
363 */
364 static int
365 integrator_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
366 struct uio *uio, int flags)
367 {
368
369 panic("integrator_bus_dmamap_load_uio: not implemented");
370 }
371
372 /*
373 * Like intgrator_bus_dmamap_load(), but for raw memory allocated with
374 * bus_dmamem_alloc().
375 */
376 static int
377 integrator_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
378 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
379 {
380
381 panic("integrator_bus_dmamap_load_raw: not implemented");
382 }
383
384 /*
385 * Unload an Integrator DMA map.
386 */
387 static void
388 integrator_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
389 {
390 struct integrator_dma_cookie *cookie = map->_dm_cookie;
391
392 /*
393 * If we have bounce pages, free them, unless they're
394 * reserved for our exclusive use.
395 */
396 if ((cookie->id_flags & ID_HAS_BOUNCE) &&
397 (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
398 integrator_dma_free_bouncebuf(t, map);
399
400 cookie->id_flags &= ~ID_IS_BOUNCING;
401 cookie->id_buftype = ID_BUFTYPE_INVALID;
402
403 /*
404 * Do the generic bits of the unload.
405 */
406 _bus_dmamap_unload(t, map);
407 }
408
409 /*
410 * Synchronize an Integrator DMA map.
411 */
412 static void
413 integrator_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
414 bus_addr_t offset, bus_size_t len, int ops)
415 {
416 struct integrator_dma_cookie *cookie = map->_dm_cookie;
417
418 DEBUG(printf("I_bus_dmamap_sync (tag %x, map %x, offset %x, size %u,"
419 " ops %d\n", (unsigned)t, (unsigned)map, (unsigned)offset ,
420 (unsigned)len, ops));
421 /*
422 * Mixing PRE and POST operations is not allowed.
423 */
424 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
425 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
426 panic("integrator_bus_dmamap_sync: mix PRE and POST");
427
428 #ifdef DIAGNOSTIC
429 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
430 if (offset >= map->dm_mapsize)
431 panic("integrator_bus_dmamap_sync: bad offset");
432 if (len == 0 || (offset + len) > map->dm_mapsize)
433 panic("integrator_bus_dmamap_sync: bad length");
434 }
435 #endif
436
437 /*
438 * If we're not bouncing then use the standard code.
439 */
440 if ((cookie->id_flags & ID_IS_BOUNCING) == 0) {
441 _bus_dmamap_sync(t, map, offset, len, ops);
442 return;
443 }
444
445 DEBUG(printf("dmamap_sync(");
446 if (ops & BUS_DMASYNC_PREREAD)
447 printf("preread ");
448 if (ops & BUS_DMASYNC_PREWRITE)
449 printf("prewrite ");
450 if (ops & BUS_DMASYNC_POSTREAD)
451 printf("postread ");
452 if (ops & BUS_DMASYNC_POSTWRITE)
453 printf("postwrite ");)
454
455 switch (cookie->id_buftype) {
456 case ID_BUFTYPE_LINEAR:
457 if (ops & BUS_DMASYNC_PREWRITE) {
458 /*
459 * Copy the caller's buffer to the bounce buffer.
460 */
461 memcpy((char *)cookie->id_bouncebuf + offset,
462 (char *)cookie->id_origbuf + offset, len);
463 cpu_dcache_wbinv_range((vaddr_t)cookie->id_bouncebuf +
464 offset, len);
465 }
466 if (ops & BUS_DMASYNC_PREREAD) {
467 cpu_dcache_wbinv_range((vaddr_t)cookie->id_bouncebuf +
468 offset, len);
469 }
470 if (ops & BUS_DMASYNC_POSTREAD) {
471 /*
472 * Copy the bounce buffer to the caller's buffer.
473 */
474 memcpy((char *)cookie->id_origbuf + offset,
475 (char *)cookie->id_bouncebuf + offset, len);
476 }
477
478 /*
479 * Nothing to do for post-write.
480 */
481 break;
482
483 case ID_BUFTYPE_MBUF:
484 {
485 struct mbuf *m, *m0 = cookie->id_origbuf;
486 bus_size_t minlen, moff;
487
488 if (ops & BUS_DMASYNC_PREWRITE) {
489 /*
490 * Copy the caller's buffer to the bounce buffer.
491 */
492 m_copydata(m0, offset, len,
493 (char *)cookie->id_bouncebuf + offset);
494 cpu_dcache_wb_range((vaddr_t)cookie->id_bouncebuf +
495 offset, len);
496 }
497 if (ops & BUS_DMASYNC_PREREAD) {
498 cpu_dcache_wbinv_range ((vaddr_t)cookie->id_bouncebuf +
499 offset, len);
500 }
501 if (ops & BUS_DMASYNC_POSTREAD) {
502 /*
503 * Copy the bounce buffer to the caller's buffer.
504 */
505 for (moff = offset, m = m0; m != NULL && len != 0;
506 m = m->m_next) {
507 /* Find the beginning mbuf. */
508 if (moff >= m->m_len) {
509 moff -= m->m_len;
510 continue;
511 }
512
513 /*
514 * Now at the first mbuf to sync; nail
515 * each one until we have exhausted the
516 * length.
517 */
518 minlen = len < m->m_len - moff ?
519 len : m->m_len - moff;
520
521 memcpy(mtod(m, caddr_t) + moff,
522 (char *)cookie->id_bouncebuf + offset,
523 minlen);
524
525 moff = 0;
526 len -= minlen;
527 offset += minlen;
528 }
529 }
530 /*
531 * Nothing to do for post-write.
532 */
533 break;
534 }
535
536 case ID_BUFTYPE_UIO:
537 panic("integrator_bus_dmamap_sync: ID_BUFTYPE_UIO");
538 break;
539
540 case ID_BUFTYPE_RAW:
541 panic("integrator_bus_dmamap_sync: ID_BUFTYPE_RAW");
542 break;
543
544 case ID_BUFTYPE_INVALID:
545 panic("integrator_bus_dmamap_sync: ID_BUFTYPE_INVALID");
546 break;
547
548 default:
549 printf("unknown buffer type %d\n", cookie->id_buftype);
550 panic("integrator_bus_dmamap_sync");
551 }
552 }
553
554 /*
555 * Allocate memory safe for Integrator DMA.
556 */
557 static int
558 integrator_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
559 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
560 int nsegs, int *rsegs, int flags)
561 {
562
563 if (t->_ranges == NULL)
564 return (ENOMEM);
565
566 /* _bus_dmamem_alloc() does the range checks for us. */
567 return (_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs,
568 rsegs, flags));
569 }
570
571 /**********************************************************************
572 * Integrator DMA utility functions
573 **********************************************************************/
574
575 static int
576 integrator_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
577 bus_size_t size, int flags)
578 {
579 struct integrator_dma_cookie *cookie = map->_dm_cookie;
580 int error = 0;
581
582 DEBUG(printf("Alloc bouncebuf\n"));
583 cookie->id_bouncebuflen = round_page(size);
584 error = integrator_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
585 NBPG, map->_dm_boundary, cookie->id_bouncesegs,
586 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
587 if (error)
588 goto out;
589 {
590 int seg;
591
592 for (seg = 0; seg < cookie->id_nbouncesegs; seg++)
593 DEBUG(printf("Seg %d @ PA 0x%08x+0x%x\n", seg,
594 (unsigned) cookie->id_bouncesegs[seg].ds_addr,
595 (unsigned) cookie->id_bouncesegs[seg].ds_len));
596 }
597 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
598 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
599 (caddr_t *)&cookie->id_bouncebuf, flags);
600
601 out:
602 if (error) {
603 _bus_dmamem_free(t, cookie->id_bouncesegs,
604 cookie->id_nbouncesegs);
605 cookie->id_bouncebuflen = 0;
606 cookie->id_nbouncesegs = 0;
607 } else {
608 DEBUG(printf("Alloc bouncebuf OK\n"));
609 cookie->id_flags |= ID_HAS_BOUNCE;
610 }
611
612 return (error);
613 }
614
615 static void
616 integrator_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
617 {
618 struct integrator_dma_cookie *cookie = map->_dm_cookie;
619
620 _bus_dmamem_unmap(t, cookie->id_bouncebuf,
621 cookie->id_bouncebuflen);
622 _bus_dmamem_free(t, cookie->id_bouncesegs,
623 cookie->id_nbouncesegs);
624 cookie->id_bouncebuflen = 0;
625 cookie->id_nbouncesegs = 0;
626 cookie->id_flags &= ~ID_HAS_BOUNCE;
627 }
628
629 void
630 integrator_pci_dma_init(bus_dma_tag_t dmat)
631 {
632 struct arm32_dma_range *dr = integrator_dma_ranges;
633 int i;
634 int nranges = 0;
635
636 for (i = 0; i < bootconfig.dramblocks; i++)
637 if (bootconfig.dram[i].flags & BOOT_DRAM_CAN_DMA) {
638 dr[nranges].dr_sysbase = bootconfig.dram[i].address;
639 dr[nranges].dr_busbase =
640 LOCAL_TO_CM_ALIAS(dr[nranges].dr_sysbase);
641 dr[nranges].dr_len = bootconfig.dram[i].pages * NBPG;
642 nranges++;
643 }
644
645 if (nranges == 0)
646 panic ("integrator_pci_dma_init: No DMA capable memory");
647
648 dmat->_ranges = dr;
649 dmat->_nranges = nranges;
650
651 dmat->_dmamap_create = integrator_bus_dmamap_create;
652 dmat->_dmamap_destroy = integrator_bus_dmamap_destroy;
653 dmat->_dmamap_load = integrator_bus_dmamap_load;
654 dmat->_dmamap_load_mbuf = integrator_bus_dmamap_load_mbuf;
655 dmat->_dmamap_load_uio = integrator_bus_dmamap_load_uio;
656 dmat->_dmamap_load_raw = integrator_bus_dmamap_load_raw;
657 dmat->_dmamap_unload = integrator_bus_dmamap_unload;
658 dmat->_dmamap_sync_pre = integrator_bus_dmamap_sync;
659 dmat->_dmamap_sync_post = integrator_bus_dmamap_sync;
660
661 dmat->_dmamem_alloc = integrator_bus_dmamem_alloc;
662 dmat->_dmamem_free = _bus_dmamem_free;
663 dmat->_dmamem_map = _bus_dmamem_map;
664 dmat->_dmamem_unmap = _bus_dmamem_unmap;
665 dmat->_dmamem_mmap = _bus_dmamem_mmap;
666 }
667