bus_dma.c revision 1.2 1 /* $NetBSD: bus_dma.c,v 1.2 2001/09/10 21:19:35 chris Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/map.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/reboot.h>
47 #include <sys/conf.h>
48 #include <sys/file.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/vnode.h>
52 #include <sys/device.h>
53
54 #include <uvm/uvm_extern.h>
55
56 #define _ARM32_BUS_DMA_PRIVATE
57 #include <machine/bus.h>
58
59 #include <machine/cpu.h>
60 #include <machine/cpufunc.h>
61 #include <machine/psl.h>
62
63 int _bus_dmamap_load_buffer __P((bus_dma_tag_t, bus_dmamap_t, void *,
64 bus_size_t, struct proc *, int, vm_offset_t *, int *, int));
65 int _bus_dma_inrange __P((bus_dma_segment_t *, int, bus_addr_t));
66
67 /*
68 * Common function for DMA map creation. May be called by bus-specific
69 * DMA map creation functions.
70 */
71 int
72 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
73 bus_dma_tag_t t;
74 bus_size_t size;
75 int nsegments;
76 bus_size_t maxsegsz;
77 bus_size_t boundary;
78 int flags;
79 bus_dmamap_t *dmamp;
80 {
81 struct arm32_bus_dmamap *map;
82 void *mapstore;
83 size_t mapsize;
84
85 #ifdef DEBUG_DMA
86 printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
87 t, size, nsegments, maxsegsz, boundary, flags);
88 #endif /* DEBUG_DMA */
89
90 /*
91 * Allocate and initialize the DMA map. The end of the map
92 * is a variable-sized array of segments, so we allocate enough
93 * room for them in one shot.
94 *
95 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
96 * of ALLOCNOW notifies others that we've reserved these resources,
97 * and they are not to be freed.
98 *
99 * The bus_dmamap_t includes one bus_dma_segment_t, hence
100 * the (nsegments - 1).
101 */
102 mapsize = sizeof(struct arm32_bus_dmamap) +
103 (sizeof(bus_dma_segment_t) * (nsegments - 1));
104 if ((mapstore = malloc(mapsize, M_DMAMAP,
105 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
106 return (ENOMEM);
107
108 memset(mapstore, 0, mapsize);
109 map = (struct arm32_bus_dmamap *)mapstore;
110 map->_dm_size = size;
111 map->_dm_segcnt = nsegments;
112 map->_dm_maxsegsz = maxsegsz;
113 map->_dm_boundary = boundary;
114 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
115 map->dm_mapsize = 0; /* no valid mappings */
116 map->dm_nsegs = 0;
117
118 *dmamp = map;
119 #ifdef DEBUG_DMA
120 printf("dmamap_create:map=%p\n", map);
121 #endif /* DEBUG_DMA */
122 return (0);
123 }
124
125 /*
126 * Common function for DMA map destruction. May be called by bus-specific
127 * DMA map destruction functions.
128 */
129 void
130 _bus_dmamap_destroy(t, map)
131 bus_dma_tag_t t;
132 bus_dmamap_t map;
133 {
134
135 #ifdef DEBUG_DMA
136 printf("dmamap_destroy: t=%p map=%p\n", t, map);
137 #endif /* DEBUG_DMA */
138 #ifdef DIAGNOSTIC
139 if (map->dm_nsegs > 0)
140 printf("bus_dmamap_destroy() called for map with valid mappings\n");
141 #endif /* DIAGNOSTIC */
142 free(map, M_DEVBUF);
143 }
144
145 /*
146 * Common function for loading a DMA map with a linear buffer. May
147 * be called by bus-specific DMA map load functions.
148 */
149 int
150 _bus_dmamap_load(t, map, buf, buflen, p, flags)
151 bus_dma_tag_t t;
152 bus_dmamap_t map;
153 void *buf;
154 bus_size_t buflen;
155 struct proc *p;
156 int flags;
157 {
158 vm_offset_t lastaddr;
159 int seg, error;
160
161 #ifdef DEBUG_DMA
162 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
163 t, map, buf, buflen, p, flags);
164 #endif /* DEBUG_DMA */
165
166 /*
167 * Make sure that on error condition we return "no valid mappings".
168 */
169 map->dm_mapsize = 0;
170 map->dm_nsegs = 0;
171
172 if (buflen > map->_dm_size)
173 return (EINVAL);
174
175 seg = 0;
176 error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
177 &lastaddr, &seg, 1);
178 if (error == 0) {
179 map->dm_mapsize = buflen;
180 map->dm_nsegs = seg + 1;
181 }
182 #ifdef DEBUG_DMA
183 printf("dmamap_load: error=%d\n", error);
184 #endif /* DEBUG_DMA */
185 return (error);
186 }
187
188 /*
189 * Like _bus_dmamap_load(), but for mbufs.
190 */
191 int
192 _bus_dmamap_load_mbuf(t, map, m0, flags)
193 bus_dma_tag_t t;
194 bus_dmamap_t map;
195 struct mbuf *m0;
196 int flags;
197 {
198 vm_offset_t lastaddr;
199 int seg, error, first;
200 struct mbuf *m;
201
202 #ifdef DEBUG_DMA
203 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
204 t, map, m0, flags);
205 #endif /* DEBUG_DMA */
206
207 /*
208 * Make sure that on error condition we return "no valid mappings."
209 */
210 map->dm_mapsize = 0;
211 map->dm_nsegs = 0;
212
213 #ifdef DIAGNOSTIC
214 if ((m0->m_flags & M_PKTHDR) == 0)
215 panic("_bus_dmamap_load_mbuf: no packet header");
216 #endif /* DIAGNOSTIC */
217
218 if (m0->m_pkthdr.len > map->_dm_size)
219 return (EINVAL);
220
221 first = 1;
222 seg = 0;
223 error = 0;
224 for (m = m0; m != NULL && error == 0; m = m->m_next) {
225 error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
226 NULL, flags, &lastaddr, &seg, first);
227 first = 0;
228 }
229 if (error == 0) {
230 map->dm_mapsize = m0->m_pkthdr.len;
231 map->dm_nsegs = seg + 1;
232 }
233 #ifdef DEBUG_DMA
234 printf("dmamap_load_mbuf: error=%d\n", error);
235 #endif /* DEBUG_DMA */
236 return (error);
237 }
238
239 /*
240 * Like _bus_dmamap_load(), but for uios.
241 */
242 int
243 _bus_dmamap_load_uio(t, map, uio, flags)
244 bus_dma_tag_t t;
245 bus_dmamap_t map;
246 struct uio *uio;
247 int flags;
248 {
249 vm_offset_t lastaddr;
250 int seg, i, error, first;
251 bus_size_t minlen, resid;
252 struct proc *p = NULL;
253 struct iovec *iov;
254 caddr_t addr;
255
256 /*
257 * Make sure that on error condition we return "no valid mappings."
258 */
259 map->dm_mapsize = 0;
260 map->dm_nsegs = 0;
261
262 resid = uio->uio_resid;
263 iov = uio->uio_iov;
264
265 if (uio->uio_segflg == UIO_USERSPACE) {
266 p = uio->uio_procp;
267 #ifdef DIAGNOSTIC
268 if (p == NULL)
269 panic("_bus_dmamap_load_uio: USERSPACE but no proc");
270 #endif
271 }
272
273 first = 1;
274 seg = 0;
275 error = 0;
276 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
277 /*
278 * Now at the first iovec to load. Load each iovec
279 * until we have exhausted the residual count.
280 */
281 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
282 addr = (caddr_t)iov[i].iov_base;
283
284 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
285 p, flags, &lastaddr, &seg, first);
286 first = 0;
287
288 resid -= minlen;
289 }
290 if (error == 0) {
291 map->dm_mapsize = uio->uio_resid;
292 map->dm_nsegs = seg + 1;
293 }
294 return (error);
295 }
296
297 /*
298 * Like _bus_dmamap_load(), but for raw memory allocated with
299 * bus_dmamem_alloc().
300 */
301 int
302 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
303 bus_dma_tag_t t;
304 bus_dmamap_t map;
305 bus_dma_segment_t *segs;
306 int nsegs;
307 bus_size_t size;
308 int flags;
309 {
310
311 panic("_bus_dmamap_load_raw: not implemented");
312 }
313
314 /*
315 * Common function for unloading a DMA map. May be called by
316 * bus-specific DMA map unload functions.
317 */
318 void
319 _bus_dmamap_unload(t, map)
320 bus_dma_tag_t t;
321 bus_dmamap_t map;
322 {
323
324 #ifdef DEBUG_DMA
325 printf("dmamap_unload: t=%p map=%p\n", t, map);
326 #endif /* DEBUG_DMA */
327
328 /*
329 * No resources to free; just mark the mappings as
330 * invalid.
331 */
332 map->dm_mapsize = 0;
333 map->dm_nsegs = 0;
334 }
335
336 /*
337 * Common function for DMA map synchronization. May be called
338 * by bus-specific DMA map synchronization functions.
339 */
340 void
341 _bus_dmamap_sync(t, map, offset, len, ops)
342 bus_dma_tag_t t;
343 bus_dmamap_t map;
344 bus_addr_t offset;
345 bus_size_t len;
346 int ops;
347 {
348 int loop;
349 bus_addr_t vaddr;
350 bus_size_t length;
351 bus_dma_segment_t *seg;
352
353 #ifdef DEBUG_DMA
354 printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
355 t, map, offset, len, ops);
356 #endif /* DEBUG_DMA */
357
358 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) {
359 /* Quick exit if length is zero */
360 if (len == 0)
361 return;
362
363 /* Find the segment pointed to by offset */
364 loop = map->dm_nsegs;
365 seg = &map->dm_segs[0];
366 while (offset >= seg->ds_len) {
367 offset -= seg->ds_len;
368 ++seg;
369 /* Got any more segments ? */
370 --loop;
371 if (loop == 0)
372 return;
373 }
374
375 /* Set the starting address and maximum length */
376 vaddr = seg->_ds_vaddr + offset;
377 length = seg->ds_len - offset;
378 do {
379 /* Limit the length if not the whole segment */
380 if (len < length)
381 length = len;
382 #ifdef DEBUG_DMA
383 printf("syncing: %lx,%lx\n", vaddr, length);
384 #endif /* DEBUG_DMA */
385 /* Actually sync the cache */
386 cpu_cache_purgeD_rng(vaddr, length);
387
388 /* Adjust the length */
389 len -= length;
390
391 /* sync complete ? */
392 if (len > 0) {
393 /* Got any more segments ? */
394 --loop;
395 if (loop == 0)
396 return;
397 ++seg;
398 vaddr = seg->_ds_vaddr;
399 length = seg->ds_len;
400 }
401 } while (len > 0);
402
403 cpu_drain_writebuf();
404 }
405 }
406
407 /*
408 * Common function for DMA-safe memory allocation. May be called
409 * by bus-specific DMA memory allocation functions.
410 */
411
412 extern vm_offset_t physical_start;
413 extern vm_offset_t physical_freestart;
414 extern vm_offset_t physical_freeend;
415 extern vm_offset_t physical_end;
416
417 int
418 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
419 bus_dma_tag_t t;
420 bus_size_t size, alignment, boundary;
421 bus_dma_segment_t *segs;
422 int nsegs;
423 int *rsegs;
424 int flags;
425 {
426 int error;
427 #ifdef DEBUG_DMA
428 printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x\n",
429 t, size, alignment, boundary, segs, nsegs, rsegs, flags);
430 #endif /* DEBUG_DMA */
431 error = (_bus_dmamem_alloc_range(t, size, alignment, boundary,
432 segs, nsegs, rsegs, flags, trunc_page(physical_start), trunc_page(physical_end)));
433 #ifdef DEBUG_DMA
434 printf("dmamem_alloc: =%d\n", error);
435 #endif /* DEBUG_DMA */
436 return(error);
437 }
438
439 /*
440 * Common function for freeing DMA-safe memory. May be called by
441 * bus-specific DMA memory free functions.
442 */
443 void
444 _bus_dmamem_free(t, segs, nsegs)
445 bus_dma_tag_t t;
446 bus_dma_segment_t *segs;
447 int nsegs;
448 {
449 struct vm_page *m;
450 bus_addr_t addr;
451 struct pglist mlist;
452 int curseg;
453
454 #ifdef DEBUG_DMA
455 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
456 #endif /* DEBUG_DMA */
457
458 /*
459 * Build a list of pages to free back to the VM system.
460 */
461 TAILQ_INIT(&mlist);
462 for (curseg = 0; curseg < nsegs; curseg++) {
463 for (addr = segs[curseg].ds_addr;
464 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
465 addr += PAGE_SIZE) {
466 m = PHYS_TO_VM_PAGE(addr);
467 TAILQ_INSERT_TAIL(&mlist, m, pageq);
468 }
469 }
470 uvm_pglistfree(&mlist);
471 }
472
473 /*
474 * Common function for mapping DMA-safe memory. May be called by
475 * bus-specific DMA memory map functions.
476 */
477 int
478 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
479 bus_dma_tag_t t;
480 bus_dma_segment_t *segs;
481 int nsegs;
482 size_t size;
483 caddr_t *kvap;
484 int flags;
485 {
486 vm_offset_t va;
487 bus_addr_t addr;
488 int curseg;
489 pt_entry_t *ptep/*, pte*/;
490
491 #ifdef DEBUG_DMA
492 printf("dmamem_map: t=%p segs=%p nsegs=%x size=%x flags=%x\n", t, segs, nsegs, size, flags);
493 #endif /* DEBUG_DMA */
494
495 size = round_page(size);
496 va = uvm_km_valloc(kernel_map, size);
497
498 if (va == 0)
499 return (ENOMEM);
500
501 *kvap = (caddr_t)va;
502
503 for (curseg = 0; curseg < nsegs; curseg++) {
504 for (addr = segs[curseg].ds_addr;
505 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
506 addr += NBPG, va += NBPG, size -= NBPG) {
507 #ifdef DEBUG_DMA
508 printf("wiring p%lx to v%lx", addr, va);
509 #endif /* DEBUG_DMA */
510 if (size == 0)
511 panic("_bus_dmamem_map: size botch");
512 pmap_enter(pmap_kernel(), va, addr,
513 VM_PROT_READ | VM_PROT_WRITE,
514 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
515 /*
516 * If the memory must remain coherent with the
517 * cache then we must make the memory uncacheable
518 * in order to maintain virtual cache coherency.
519 * We must also guarentee the cache does not already
520 * contain the virtal addresses we are making
521 * uncacheable.
522 */
523 if (flags & BUS_DMA_COHERENT) {
524 cpu_cache_purgeD_rng(va, NBPG);
525 cpu_drain_writebuf();
526 ptep = vtopte(va);
527 *ptep = ((*ptep) & (~PT_C | PT_B));
528 tlb_flush();
529 }
530 #ifdef DEBUG_DMA
531 ptep = vtopte(va);
532 printf(" pte=v%p *pte=%x\n", ptep, *ptep);
533 #endif /* DEBUG_DMA */
534 }
535 }
536 pmap_update(pmap_kernel());
537 #ifdef DEBUG_DMA
538 printf("dmamem_map: =%p\n", *kvap);
539 #endif /* DEBUG_DMA */
540 return (0);
541 }
542
543 /*
544 * Common function for unmapping DMA-safe memory. May be called by
545 * bus-specific DMA memory unmapping functions.
546 */
547 void
548 _bus_dmamem_unmap(t, kva, size)
549 bus_dma_tag_t t;
550 caddr_t kva;
551 size_t size;
552 {
553
554 #ifdef DEBUG_DMA
555 printf("dmamem_unmap: t=%p kva=%p size=%x\n", t, kva, size);
556 #endif /* DEBUG_DMA */
557 #ifdef DIAGNOSTIC
558 if ((u_long)kva & PGOFSET)
559 panic("_bus_dmamem_unmap");
560 #endif /* DIAGNOSTIC */
561
562 size = round_page(size);
563 uvm_km_free(kernel_map, (vm_offset_t)kva, size);
564 }
565
566 /*
567 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
568 * bus-specific DMA mmap(2)'ing functions.
569 */
570 paddr_t
571 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
572 bus_dma_tag_t t;
573 bus_dma_segment_t *segs;
574 int nsegs;
575 off_t off;
576 int prot, flags;
577 {
578 int i;
579
580 for (i = 0; i < nsegs; i++) {
581 #ifdef DIAGNOSTIC
582 if (off & PGOFSET)
583 panic("_bus_dmamem_mmap: offset unaligned");
584 if (segs[i].ds_addr & PGOFSET)
585 panic("_bus_dmamem_mmap: segment unaligned");
586 if (segs[i].ds_len & PGOFSET)
587 panic("_bus_dmamem_mmap: segment size not multiple"
588 " of page size");
589 #endif /* DIAGNOSTIC */
590 if (off >= segs[i].ds_len) {
591 off -= segs[i].ds_len;
592 continue;
593 }
594
595 return (arm_byte_to_page((u_long)segs[i].ds_addr + off));
596 }
597
598 /* Page not found. */
599 return (-1);
600 }
601
602 /**********************************************************************
603 * DMA utility functions
604 **********************************************************************/
605
606 /*
607 * Utility function to load a linear buffer. lastaddrp holds state
608 * between invocations (for multiple-buffer loads). segp contains
609 * the starting segment on entrace, and the ending segment on exit.
610 * first indicates if this is the first invocation of this function.
611 */
612 int
613 _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first)
614 bus_dma_tag_t t;
615 bus_dmamap_t map;
616 void *buf;
617 bus_size_t buflen;
618 struct proc *p;
619 int flags;
620 vm_offset_t *lastaddrp;
621 int *segp;
622 int first;
623 {
624 bus_size_t sgsize;
625 bus_addr_t curaddr, lastaddr, baddr, bmask;
626 vm_offset_t vaddr = (vm_offset_t)buf;
627 int seg;
628 pmap_t pmap;
629
630 #ifdef DEBUG_DMA
631 printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
632 buf, buflen, flags, first);
633 #endif /* DEBUG_DMA */
634
635 if (p != NULL)
636 pmap = p->p_vmspace->vm_map.pmap;
637 else
638 pmap = pmap_kernel();
639
640 lastaddr = *lastaddrp;
641 bmask = ~(map->_dm_boundary - 1);
642
643 for (seg = *segp; buflen > 0; ) {
644 /*
645 * Get the physical address for this segment.
646 */
647 (void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr);
648
649 /*
650 * Make sure we're in an allowed DMA range.
651 */
652 if (t->_ranges != NULL &&
653 _bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0)
654 return (EINVAL);
655
656 /*
657 * Compute the segment size, and adjust counts.
658 */
659 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
660 if (buflen < sgsize)
661 sgsize = buflen;
662
663 /*
664 * Make sure we don't cross any boundaries.
665 */
666 if (map->_dm_boundary > 0) {
667 baddr = (curaddr + map->_dm_boundary) & bmask;
668 if (sgsize > (baddr - curaddr))
669 sgsize = (baddr - curaddr);
670 }
671
672 /*
673 * Insert chunk into a segment, coalescing with
674 * previous segment if possible.
675 */
676 if (first) {
677 map->dm_segs[seg].ds_addr = curaddr;
678 map->dm_segs[seg].ds_len = sgsize;
679 map->dm_segs[seg]._ds_vaddr = vaddr;
680 first = 0;
681 } else {
682 if (curaddr == lastaddr &&
683 (map->dm_segs[seg].ds_len + sgsize) <=
684 map->_dm_maxsegsz &&
685 (map->_dm_boundary == 0 ||
686 (map->dm_segs[seg].ds_addr & bmask) ==
687 (curaddr & bmask)))
688 map->dm_segs[seg].ds_len += sgsize;
689 else {
690 if (++seg >= map->_dm_segcnt)
691 break;
692 map->dm_segs[seg].ds_addr = curaddr;
693 map->dm_segs[seg].ds_len = sgsize;
694 map->dm_segs[seg]._ds_vaddr = vaddr;
695 }
696 }
697
698 lastaddr = curaddr + sgsize;
699 vaddr += sgsize;
700 buflen -= sgsize;
701 }
702
703 *segp = seg;
704 *lastaddrp = lastaddr;
705
706 /*
707 * Did we fit?
708 */
709 if (buflen != 0)
710 return (EFBIG); /* XXX better return value here? */
711 return (0);
712 }
713
714 /*
715 * Check to see if the specified page is in an allowed DMA range.
716 */
717 int
718 _bus_dma_inrange(ranges, nranges, curaddr)
719 bus_dma_segment_t *ranges;
720 int nranges;
721 bus_addr_t curaddr;
722 {
723 bus_dma_segment_t *ds;
724 int i;
725
726 for (i = 0, ds = ranges; i < nranges; i++, ds++) {
727 if (curaddr >= ds->ds_addr &&
728 round_page(curaddr) <= (ds->ds_addr + ds->ds_len))
729 return (1);
730 }
731
732 return (0);
733 }
734
735 /*
736 * Allocate physical memory from the given physical address range.
737 * Called by DMA-safe memory allocation methods.
738 */
739 int
740 _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
741 flags, low, high)
742 bus_dma_tag_t t;
743 bus_size_t size, alignment, boundary;
744 bus_dma_segment_t *segs;
745 int nsegs;
746 int *rsegs;
747 int flags;
748 vm_offset_t low;
749 vm_offset_t high;
750 {
751 vm_offset_t curaddr, lastaddr;
752 struct vm_page *m;
753 struct pglist mlist;
754 int curseg, error;
755
756 #ifdef DEBUG_DMA
757 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
758 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
759 #endif /* DEBUG_DMA */
760
761 /* Always round the size. */
762 size = round_page(size);
763
764 /*
765 * Allocate pages from the VM system.
766 */
767 TAILQ_INIT(&mlist);
768 error = uvm_pglistalloc(size, low, high, alignment, boundary,
769 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
770 if (error)
771 return (error);
772
773 /*
774 * Compute the location, size, and number of segments actually
775 * returned by the VM code.
776 */
777 m = mlist.tqh_first;
778 curseg = 0;
779 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
780 segs[curseg].ds_len = PAGE_SIZE;
781 #ifdef DEBUG_DMA
782 printf("alloc: page %lx\n", lastaddr);
783 #endif /* DEBUG_DMA */
784 m = m->pageq.tqe_next;
785
786 for (; m != NULL; m = m->pageq.tqe_next) {
787 curaddr = VM_PAGE_TO_PHYS(m);
788 #ifdef DIAGNOSTIC
789 if (curaddr < low || curaddr >= high) {
790 printf("uvm_pglistalloc returned non-sensical"
791 " address 0x%lx\n", curaddr);
792 panic("_bus_dmamem_alloc_range");
793 }
794 #endif /* DIAGNOSTIC */
795 #ifdef DEBUG_DMA
796 printf("alloc: page %lx\n", curaddr);
797 #endif /* DEBUG_DMA */
798 if (curaddr == (lastaddr + PAGE_SIZE))
799 segs[curseg].ds_len += PAGE_SIZE;
800 else {
801 curseg++;
802 segs[curseg].ds_addr = curaddr;
803 segs[curseg].ds_len = PAGE_SIZE;
804 }
805 lastaddr = curaddr;
806 }
807
808 *rsegs = curseg + 1;
809
810 return (0);
811 }
812