bus_dma.c revision 1.7 1 /* $NetBSD: bus_dma.c,v 1.7 2002/01/25 19:37:49 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/map.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/reboot.h>
47 #include <sys/conf.h>
48 #include <sys/file.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/vnode.h>
52 #include <sys/device.h>
53
54 #include <uvm/uvm_extern.h>
55
56 #define _ARM32_BUS_DMA_PRIVATE
57 #include <machine/bus.h>
58
59 #include <machine/cpu.h>
60
61 #include <arm/cpufunc.h>
62
63 int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
64 bus_size_t, struct proc *, int, vm_offset_t *, int *, int);
65 int _bus_dma_inrange(bus_dma_segment_t *, int, bus_addr_t);
66
67 /*
68 * Common function for DMA map creation. May be called by bus-specific
69 * DMA map creation functions.
70 */
71 int
72 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
73 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
74 {
75 struct arm32_bus_dmamap *map;
76 void *mapstore;
77 size_t mapsize;
78
79 #ifdef DEBUG_DMA
80 printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
81 t, size, nsegments, maxsegsz, boundary, flags);
82 #endif /* DEBUG_DMA */
83
84 /*
85 * Allocate and initialize the DMA map. The end of the map
86 * is a variable-sized array of segments, so we allocate enough
87 * room for them in one shot.
88 *
89 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
90 * of ALLOCNOW notifies others that we've reserved these resources,
91 * and they are not to be freed.
92 *
93 * The bus_dmamap_t includes one bus_dma_segment_t, hence
94 * the (nsegments - 1).
95 */
96 mapsize = sizeof(struct arm32_bus_dmamap) +
97 (sizeof(bus_dma_segment_t) * (nsegments - 1));
98 if ((mapstore = malloc(mapsize, M_DMAMAP,
99 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
100 return (ENOMEM);
101
102 memset(mapstore, 0, mapsize);
103 map = (struct arm32_bus_dmamap *)mapstore;
104 map->_dm_size = size;
105 map->_dm_segcnt = nsegments;
106 map->_dm_maxsegsz = maxsegsz;
107 map->_dm_boundary = boundary;
108 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
109 map->dm_mapsize = 0; /* no valid mappings */
110 map->dm_nsegs = 0;
111
112 *dmamp = map;
113 #ifdef DEBUG_DMA
114 printf("dmamap_create:map=%p\n", map);
115 #endif /* DEBUG_DMA */
116 return (0);
117 }
118
119 /*
120 * Common function for DMA map destruction. May be called by bus-specific
121 * DMA map destruction functions.
122 */
123 void
124 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
125 {
126
127 #ifdef DEBUG_DMA
128 printf("dmamap_destroy: t=%p map=%p\n", t, map);
129 #endif /* DEBUG_DMA */
130 #ifdef DIAGNOSTIC
131 if (map->dm_nsegs > 0)
132 printf("bus_dmamap_destroy() called for map with valid mappings\n");
133 #endif /* DIAGNOSTIC */
134 free(map, M_DEVBUF);
135 }
136
137 /*
138 * Common function for loading a DMA map with a linear buffer. May
139 * be called by bus-specific DMA map load functions.
140 */
141 int
142 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
143 bus_size_t buflen, struct proc *p, int flags)
144 {
145 vm_offset_t lastaddr;
146 int seg, error;
147
148 #ifdef DEBUG_DMA
149 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
150 t, map, buf, buflen, p, flags);
151 #endif /* DEBUG_DMA */
152
153 /*
154 * Make sure that on error condition we return "no valid mappings".
155 */
156 map->dm_mapsize = 0;
157 map->dm_nsegs = 0;
158
159 if (buflen > map->_dm_size)
160 return (EINVAL);
161
162 seg = 0;
163 error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
164 &lastaddr, &seg, 1);
165 if (error == 0) {
166 map->dm_mapsize = buflen;
167 map->dm_nsegs = seg + 1;
168 }
169 #ifdef DEBUG_DMA
170 printf("dmamap_load: error=%d\n", error);
171 #endif /* DEBUG_DMA */
172 return (error);
173 }
174
175 /*
176 * Like _bus_dmamap_load(), but for mbufs.
177 */
178 int
179 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
180 int flags)
181 {
182 vm_offset_t lastaddr;
183 int seg, error, first;
184 struct mbuf *m;
185
186 #ifdef DEBUG_DMA
187 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
188 t, map, m0, flags);
189 #endif /* DEBUG_DMA */
190
191 /*
192 * Make sure that on error condition we return "no valid mappings."
193 */
194 map->dm_mapsize = 0;
195 map->dm_nsegs = 0;
196
197 #ifdef DIAGNOSTIC
198 if ((m0->m_flags & M_PKTHDR) == 0)
199 panic("_bus_dmamap_load_mbuf: no packet header");
200 #endif /* DIAGNOSTIC */
201
202 if (m0->m_pkthdr.len > map->_dm_size)
203 return (EINVAL);
204
205 first = 1;
206 seg = 0;
207 error = 0;
208 for (m = m0; m != NULL && error == 0; m = m->m_next) {
209 error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
210 NULL, flags, &lastaddr, &seg, first);
211 first = 0;
212 }
213 if (error == 0) {
214 map->dm_mapsize = m0->m_pkthdr.len;
215 map->dm_nsegs = seg + 1;
216 }
217 #ifdef DEBUG_DMA
218 printf("dmamap_load_mbuf: error=%d\n", error);
219 #endif /* DEBUG_DMA */
220 return (error);
221 }
222
223 /*
224 * Like _bus_dmamap_load(), but for uios.
225 */
226 int
227 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
228 int flags)
229 {
230 vm_offset_t lastaddr;
231 int seg, i, error, first;
232 bus_size_t minlen, resid;
233 struct proc *p = NULL;
234 struct iovec *iov;
235 caddr_t addr;
236
237 /*
238 * Make sure that on error condition we return "no valid mappings."
239 */
240 map->dm_mapsize = 0;
241 map->dm_nsegs = 0;
242
243 resid = uio->uio_resid;
244 iov = uio->uio_iov;
245
246 if (uio->uio_segflg == UIO_USERSPACE) {
247 p = uio->uio_procp;
248 #ifdef DIAGNOSTIC
249 if (p == NULL)
250 panic("_bus_dmamap_load_uio: USERSPACE but no proc");
251 #endif
252 }
253
254 first = 1;
255 seg = 0;
256 error = 0;
257 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
258 /*
259 * Now at the first iovec to load. Load each iovec
260 * until we have exhausted the residual count.
261 */
262 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
263 addr = (caddr_t)iov[i].iov_base;
264
265 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
266 p, flags, &lastaddr, &seg, first);
267 first = 0;
268
269 resid -= minlen;
270 }
271 if (error == 0) {
272 map->dm_mapsize = uio->uio_resid;
273 map->dm_nsegs = seg + 1;
274 }
275 return (error);
276 }
277
278 /*
279 * Like _bus_dmamap_load(), but for raw memory allocated with
280 * bus_dmamem_alloc().
281 */
282 int
283 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
284 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
285 {
286
287 panic("_bus_dmamap_load_raw: not implemented");
288 }
289
290 /*
291 * Common function for unloading a DMA map. May be called by
292 * bus-specific DMA map unload functions.
293 */
294 void
295 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
296 {
297
298 #ifdef DEBUG_DMA
299 printf("dmamap_unload: t=%p map=%p\n", t, map);
300 #endif /* DEBUG_DMA */
301
302 /*
303 * No resources to free; just mark the mappings as
304 * invalid.
305 */
306 map->dm_mapsize = 0;
307 map->dm_nsegs = 0;
308 }
309
310 /*
311 * Common function for DMA map synchronization. May be called
312 * by bus-specific DMA map synchronization functions.
313 */
314 void
315 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
316 bus_size_t len, int ops)
317 {
318 int loop;
319 bus_addr_t vaddr;
320 bus_size_t length;
321 bus_dma_segment_t *seg;
322
323 #ifdef DEBUG_DMA
324 printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
325 t, map, offset, len, ops);
326 #endif /* DEBUG_DMA */
327
328 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) {
329 /* Quick exit if length is zero */
330 if (len == 0)
331 return;
332
333 /* Find the segment pointed to by offset */
334 loop = map->dm_nsegs;
335 seg = &map->dm_segs[0];
336 while (offset >= seg->ds_len) {
337 offset -= seg->ds_len;
338 ++seg;
339 /* Got any more segments ? */
340 --loop;
341 if (loop == 0)
342 return;
343 }
344
345 /* Set the starting address and maximum length */
346 vaddr = seg->_ds_vaddr + offset;
347 length = seg->ds_len - offset;
348 do {
349 /* Limit the length if not the whole segment */
350 if (len < length)
351 length = len;
352 #ifdef DEBUG_DMA
353 printf("syncing: %lx,%lx\n", vaddr, length);
354 #endif /* DEBUG_DMA */
355 /* Actually sync the cache */
356 cpu_dcache_wbinv_range(vaddr, length);
357
358 /* Adjust the length */
359 len -= length;
360
361 /* sync complete ? */
362 if (len > 0) {
363 /* Got any more segments ? */
364 --loop;
365 if (loop == 0)
366 return;
367 ++seg;
368 vaddr = seg->_ds_vaddr;
369 length = seg->ds_len;
370 }
371 } while (len > 0);
372
373 cpu_drain_writebuf();
374 }
375 }
376
377 /*
378 * Common function for DMA-safe memory allocation. May be called
379 * by bus-specific DMA memory allocation functions.
380 */
381
382 extern vm_offset_t physical_start;
383 extern vm_offset_t physical_freestart;
384 extern vm_offset_t physical_freeend;
385 extern vm_offset_t physical_end;
386
387 int
388 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
389 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
390 int flags)
391 {
392 int error;
393 #ifdef DEBUG_DMA
394 printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x\n",
395 t, size, alignment, boundary, segs, nsegs, rsegs, flags);
396 #endif /* DEBUG_DMA */
397 error = (_bus_dmamem_alloc_range(t, size, alignment, boundary,
398 segs, nsegs, rsegs, flags, trunc_page(physical_start), trunc_page(physical_end)));
399 #ifdef DEBUG_DMA
400 printf("dmamem_alloc: =%d\n", error);
401 #endif /* DEBUG_DMA */
402 return(error);
403 }
404
405 /*
406 * Common function for freeing DMA-safe memory. May be called by
407 * bus-specific DMA memory free functions.
408 */
409 void
410 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
411 {
412 struct vm_page *m;
413 bus_addr_t addr;
414 struct pglist mlist;
415 int curseg;
416
417 #ifdef DEBUG_DMA
418 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
419 #endif /* DEBUG_DMA */
420
421 /*
422 * Build a list of pages to free back to the VM system.
423 */
424 TAILQ_INIT(&mlist);
425 for (curseg = 0; curseg < nsegs; curseg++) {
426 for (addr = segs[curseg].ds_addr;
427 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
428 addr += PAGE_SIZE) {
429 m = PHYS_TO_VM_PAGE(addr);
430 TAILQ_INSERT_TAIL(&mlist, m, pageq);
431 }
432 }
433 uvm_pglistfree(&mlist);
434 }
435
436 /*
437 * Common function for mapping DMA-safe memory. May be called by
438 * bus-specific DMA memory map functions.
439 */
440 int
441 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
442 size_t size, caddr_t *kvap, int flags)
443 {
444 vm_offset_t va;
445 bus_addr_t addr;
446 int curseg;
447 pt_entry_t *ptep/*, pte*/;
448
449 #ifdef DEBUG_DMA
450 printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
451 segs, nsegs, (unsigned long)size, flags);
452 #endif /* DEBUG_DMA */
453
454 size = round_page(size);
455 va = uvm_km_valloc(kernel_map, size);
456
457 if (va == 0)
458 return (ENOMEM);
459
460 *kvap = (caddr_t)va;
461
462 for (curseg = 0; curseg < nsegs; curseg++) {
463 for (addr = segs[curseg].ds_addr;
464 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
465 addr += NBPG, va += NBPG, size -= NBPG) {
466 #ifdef DEBUG_DMA
467 printf("wiring p%lx to v%lx", addr, va);
468 #endif /* DEBUG_DMA */
469 if (size == 0)
470 panic("_bus_dmamem_map: size botch");
471 pmap_enter(pmap_kernel(), va, addr,
472 VM_PROT_READ | VM_PROT_WRITE,
473 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
474 /*
475 * If the memory must remain coherent with the
476 * cache then we must make the memory uncacheable
477 * in order to maintain virtual cache coherency.
478 * We must also guarentee the cache does not already
479 * contain the virtal addresses we are making
480 * uncacheable.
481 */
482 if (flags & BUS_DMA_COHERENT) {
483 cpu_dcache_wbinv_range(va, NBPG);
484 cpu_drain_writebuf();
485 ptep = vtopte(va);
486 *ptep = ((*ptep) & (~PT_C | PT_B));
487 tlb_flush();
488 }
489 #ifdef DEBUG_DMA
490 ptep = vtopte(va);
491 printf(" pte=v%p *pte=%x\n", ptep, *ptep);
492 #endif /* DEBUG_DMA */
493 }
494 }
495 pmap_update(pmap_kernel());
496 #ifdef DEBUG_DMA
497 printf("dmamem_map: =%p\n", *kvap);
498 #endif /* DEBUG_DMA */
499 return (0);
500 }
501
502 /*
503 * Common function for unmapping DMA-safe memory. May be called by
504 * bus-specific DMA memory unmapping functions.
505 */
506 void
507 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
508 {
509
510 #ifdef DEBUG_DMA
511 printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva,
512 (unsigned long)size);
513 #endif /* DEBUG_DMA */
514 #ifdef DIAGNOSTIC
515 if ((u_long)kva & PGOFSET)
516 panic("_bus_dmamem_unmap");
517 #endif /* DIAGNOSTIC */
518
519 size = round_page(size);
520 uvm_km_free(kernel_map, (vm_offset_t)kva, size);
521 }
522
523 /*
524 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
525 * bus-specific DMA mmap(2)'ing functions.
526 */
527 paddr_t
528 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
529 off_t off, int prot, int flags)
530 {
531 int i;
532
533 for (i = 0; i < nsegs; i++) {
534 #ifdef DIAGNOSTIC
535 if (off & PGOFSET)
536 panic("_bus_dmamem_mmap: offset unaligned");
537 if (segs[i].ds_addr & PGOFSET)
538 panic("_bus_dmamem_mmap: segment unaligned");
539 if (segs[i].ds_len & PGOFSET)
540 panic("_bus_dmamem_mmap: segment size not multiple"
541 " of page size");
542 #endif /* DIAGNOSTIC */
543 if (off >= segs[i].ds_len) {
544 off -= segs[i].ds_len;
545 continue;
546 }
547
548 return (arm_byte_to_page((u_long)segs[i].ds_addr + off));
549 }
550
551 /* Page not found. */
552 return (-1);
553 }
554
555 /**********************************************************************
556 * DMA utility functions
557 **********************************************************************/
558
559 /*
560 * Utility function to load a linear buffer. lastaddrp holds state
561 * between invocations (for multiple-buffer loads). segp contains
562 * the starting segment on entrace, and the ending segment on exit.
563 * first indicates if this is the first invocation of this function.
564 */
565 int
566 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
567 bus_size_t buflen, struct proc *p, int flags, vm_offset_t *lastaddrp,
568 int *segp, int first)
569 {
570 bus_size_t sgsize;
571 bus_addr_t curaddr, lastaddr, baddr, bmask;
572 vm_offset_t vaddr = (vm_offset_t)buf;
573 int seg;
574 pmap_t pmap;
575
576 #ifdef DEBUG_DMA
577 printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
578 buf, buflen, flags, first);
579 #endif /* DEBUG_DMA */
580
581 if (p != NULL)
582 pmap = p->p_vmspace->vm_map.pmap;
583 else
584 pmap = pmap_kernel();
585
586 lastaddr = *lastaddrp;
587 bmask = ~(map->_dm_boundary - 1);
588
589 for (seg = *segp; buflen > 0; ) {
590 /*
591 * Get the physical address for this segment.
592 */
593 (void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr);
594
595 /*
596 * Make sure we're in an allowed DMA range.
597 */
598 if (t->_ranges != NULL &&
599 _bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0)
600 return (EINVAL);
601
602 /*
603 * Compute the segment size, and adjust counts.
604 */
605 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
606 if (buflen < sgsize)
607 sgsize = buflen;
608
609 /*
610 * Make sure we don't cross any boundaries.
611 */
612 if (map->_dm_boundary > 0) {
613 baddr = (curaddr + map->_dm_boundary) & bmask;
614 if (sgsize > (baddr - curaddr))
615 sgsize = (baddr - curaddr);
616 }
617
618 /*
619 * Insert chunk into a segment, coalescing with
620 * previous segment if possible.
621 */
622 if (first) {
623 map->dm_segs[seg].ds_addr = curaddr;
624 map->dm_segs[seg].ds_len = sgsize;
625 map->dm_segs[seg]._ds_vaddr = vaddr;
626 first = 0;
627 } else {
628 if (curaddr == lastaddr &&
629 (map->dm_segs[seg].ds_len + sgsize) <=
630 map->_dm_maxsegsz &&
631 (map->_dm_boundary == 0 ||
632 (map->dm_segs[seg].ds_addr & bmask) ==
633 (curaddr & bmask)))
634 map->dm_segs[seg].ds_len += sgsize;
635 else {
636 if (++seg >= map->_dm_segcnt)
637 break;
638 map->dm_segs[seg].ds_addr = curaddr;
639 map->dm_segs[seg].ds_len = sgsize;
640 map->dm_segs[seg]._ds_vaddr = vaddr;
641 }
642 }
643
644 lastaddr = curaddr + sgsize;
645 vaddr += sgsize;
646 buflen -= sgsize;
647 }
648
649 *segp = seg;
650 *lastaddrp = lastaddr;
651
652 /*
653 * Did we fit?
654 */
655 if (buflen != 0)
656 return (EFBIG); /* XXX better return value here? */
657 return (0);
658 }
659
660 /*
661 * Check to see if the specified page is in an allowed DMA range.
662 */
663 int
664 _bus_dma_inrange(bus_dma_segment_t *ranges, int nranges, bus_addr_t curaddr)
665 {
666 bus_dma_segment_t *ds;
667 int i;
668
669 for (i = 0, ds = ranges; i < nranges; i++, ds++) {
670 if (curaddr >= ds->ds_addr &&
671 round_page(curaddr) <= (ds->ds_addr + ds->ds_len))
672 return (1);
673 }
674
675 return (0);
676 }
677
678 /*
679 * Allocate physical memory from the given physical address range.
680 * Called by DMA-safe memory allocation methods.
681 */
682 int
683 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
684 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
685 int flags, vm_offset_t low, vm_offset_t high)
686 {
687 vm_offset_t curaddr, lastaddr;
688 struct vm_page *m;
689 struct pglist mlist;
690 int curseg, error;
691
692 #ifdef DEBUG_DMA
693 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
694 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
695 #endif /* DEBUG_DMA */
696
697 /* Always round the size. */
698 size = round_page(size);
699
700 /*
701 * Allocate pages from the VM system.
702 */
703 TAILQ_INIT(&mlist);
704 error = uvm_pglistalloc(size, low, high, alignment, boundary,
705 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
706 if (error)
707 return (error);
708
709 /*
710 * Compute the location, size, and number of segments actually
711 * returned by the VM code.
712 */
713 m = mlist.tqh_first;
714 curseg = 0;
715 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
716 segs[curseg].ds_len = PAGE_SIZE;
717 #ifdef DEBUG_DMA
718 printf("alloc: page %lx\n", lastaddr);
719 #endif /* DEBUG_DMA */
720 m = m->pageq.tqe_next;
721
722 for (; m != NULL; m = m->pageq.tqe_next) {
723 curaddr = VM_PAGE_TO_PHYS(m);
724 #ifdef DIAGNOSTIC
725 if (curaddr < low || curaddr >= high) {
726 printf("uvm_pglistalloc returned non-sensical"
727 " address 0x%lx\n", curaddr);
728 panic("_bus_dmamem_alloc_range");
729 }
730 #endif /* DIAGNOSTIC */
731 #ifdef DEBUG_DMA
732 printf("alloc: page %lx\n", curaddr);
733 #endif /* DEBUG_DMA */
734 if (curaddr == (lastaddr + PAGE_SIZE))
735 segs[curseg].ds_len += PAGE_SIZE;
736 else {
737 curseg++;
738 segs[curseg].ds_addr = curaddr;
739 segs[curseg].ds_len = PAGE_SIZE;
740 }
741 lastaddr = curaddr;
742 }
743
744 *rsegs = curseg + 1;
745
746 return (0);
747 }
748