bus_dma.c revision 1.29 1 /* $NetBSD: bus_dma.c,v 1.29 2008/02/03 08:34:57 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39 /*
40 * bus_dma routines for vax. File copied from arm32/bus_dma.c.
41 * NetBSD: bus_dma.c,v 1.11 1998/09/21 22:53:35 thorpej Exp
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.29 2008/02/03 08:34:57 matt Exp $");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/buf.h>
52 #include <sys/reboot.h>
53 #include <sys/conf.h>
54 #include <sys/file.h>
55 #include <sys/malloc.h>
56 #include <sys/mbuf.h>
57 #include <sys/vnode.h>
58 #include <sys/device.h>
59
60 #include <uvm/uvm_extern.h>
61
62 #define _VAX_BUS_DMA_PRIVATE
63 #include <machine/bus.h>
64
65 #include <machine/ka43.h>
66 #include <machine/sid.h>
67
68 extern paddr_t avail_start, avail_end;
69 extern vaddr_t virtual_avail;
70
71 int _bus_dmamap_load_buffer __P((bus_dma_tag_t, bus_dmamap_t, void *,
72 bus_size_t, struct vmspace *, int, vaddr_t *, int *, int));
73 int _bus_dma_inrange __P((bus_dma_segment_t *, int, bus_addr_t));
74 int _bus_dmamem_alloc_range __P((bus_dma_tag_t, bus_size_t, bus_size_t,
75 bus_size_t, bus_dma_segment_t*, int, int *, int, vaddr_t, vaddr_t));
76 /*
77 * Common function for DMA map creation. May be called by bus-specific
78 * DMA map creation functions.
79 */
80 int
81 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
82 bus_dma_tag_t t;
83 bus_size_t size;
84 int nsegments;
85 bus_size_t maxsegsz;
86 bus_size_t boundary;
87 int flags;
88 bus_dmamap_t *dmamp;
89 {
90 struct vax_bus_dmamap *map;
91 void *mapstore;
92 size_t mapsize;
93
94 #ifdef DEBUG_DMA
95 printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
96 t, size, nsegments, maxsegsz, boundary, flags);
97 #endif /* DEBUG_DMA */
98
99 /*
100 * Allocate and initialize the DMA map. The end of the map
101 * is a variable-sized array of segments, so we allocate enough
102 * room for them in one shot.
103 *
104 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
105 * of ALLOCNOW notifies others that we've reserved these resources,
106 * and they are not to be freed.
107 *
108 * The bus_dmamap_t includes one bus_dma_segment_t, hence
109 * the (nsegments - 1).
110 */
111 mapsize = sizeof(struct vax_bus_dmamap) +
112 (sizeof(bus_dma_segment_t) * (nsegments - 1));
113 if ((mapstore = malloc(mapsize, M_DMAMAP,
114 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
115 return (ENOMEM);
116
117 bzero(mapstore, mapsize);
118 map = (struct vax_bus_dmamap *)mapstore;
119 map->_dm_size = size;
120 map->_dm_segcnt = nsegments;
121 map->_dm_maxmaxsegsz = maxsegsz;
122 map->_dm_boundary = boundary;
123 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
124 map->dm_maxsegsz = maxsegsz;
125 map->dm_mapsize = 0; /* no valid mappings */
126 map->dm_nsegs = 0;
127
128 *dmamp = map;
129 #ifdef DEBUG_DMA
130 printf("dmamap_create:map=%p\n", map);
131 #endif /* DEBUG_DMA */
132 return (0);
133 }
134
135 /*
136 * Common function for DMA map destruction. May be called by bus-specific
137 * DMA map destruction functions.
138 */
139 void
140 _bus_dmamap_destroy(t, map)
141 bus_dma_tag_t t;
142 bus_dmamap_t map;
143 {
144
145 #ifdef DEBUG_DMA
146 printf("dmamap_destroy: t=%p map=%p\n", t, map);
147 #endif /* DEBUG_DMA */
148 #ifdef DIAGNOSTIC
149 if (map->dm_nsegs > 0)
150 printf("bus_dmamap_destroy() called for map with valid mappings\n");
151 #endif /* DIAGNOSTIC */
152 free(map, M_DEVBUF);
153 }
154
155 /*
156 * Common function for loading a DMA map with a linear buffer. May
157 * be called by bus-specific DMA map load functions.
158 */
159 int
160 _bus_dmamap_load(t, map, buf, buflen, p, flags)
161 bus_dma_tag_t t;
162 bus_dmamap_t map;
163 void *buf;
164 bus_size_t buflen;
165 struct proc *p;
166 int flags;
167 {
168 vaddr_t lastaddr = 0;
169 int seg, error;
170 struct vmspace *vm;
171
172 #ifdef DEBUG_DMA
173 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
174 t, map, buf, buflen, p, flags);
175 #endif /* DEBUG_DMA */
176
177 /*
178 * Make sure that on error condition we return "no valid mappings".
179 */
180 map->dm_mapsize = 0;
181 map->dm_nsegs = 0;
182 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
183
184 if (buflen > map->_dm_size)
185 return (EINVAL);
186
187 if (p != NULL) {
188 vm = p->p_vmspace;
189 } else {
190 vm = vmspace_kernel();
191 }
192
193 seg = 0;
194 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
195 &lastaddr, &seg, 1);
196 if (error == 0) {
197 map->dm_mapsize = buflen;
198 map->dm_nsegs = seg + 1;
199 }
200 #ifdef DEBUG_DMA
201 printf("dmamap_load: error=%d\n", error);
202 #endif /* DEBUG_DMA */
203 return (error);
204 }
205
206 /*
207 * Like _bus_dmamap_load(), but for mbufs.
208 */
209 int
210 _bus_dmamap_load_mbuf(t, map, m0, flags)
211 bus_dma_tag_t t;
212 bus_dmamap_t map;
213 struct mbuf *m0;
214 int flags;
215 {
216 vaddr_t lastaddr = 0;
217 int seg, error;
218 bool first;
219 struct mbuf *m;
220
221 #ifdef DEBUG_DMA
222 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
223 t, map, m0, flags);
224 #endif /* DEBUG_DMA */
225
226 /*
227 * Make sure that on error condition we return "no valid mappings."
228 */
229 map->dm_mapsize = 0;
230 map->dm_nsegs = 0;
231 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
232
233 #ifdef DIAGNOSTIC
234 if ((m0->m_flags & M_PKTHDR) == 0)
235 panic("_bus_dmamap_load_mbuf: no packet header");
236 #endif /* DIAGNOSTIC */
237
238 if (m0->m_pkthdr.len > map->_dm_size)
239 return (EINVAL);
240
241 first = true;
242 seg = 0;
243 error = 0;
244 for (m = m0; m != NULL && error == 0; m = m->m_next, first = false) {
245 if (m->m_len == 0)
246 continue;
247 #if 0
248 switch (m->m_flags & (M_EXT|M_CLUSTER)) {
249 #if 0
250 case M_EXT|M_CLUSTER:
251 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
252 lastaddr = m->m_ext.ext_paddr
253 + (m->m_data - m->m_ext.ext_buf);
254 #endif
255 #if 1
256 have_addr:
257 #endif
258 if (!first && ++seg >= map->_dm_segcnt) {
259 error = EFBIG;
260 continue;
261 }
262 map->dm_segs[seg].ds_addr = lastaddr;
263 map->dm_segs[seg].ds_len = m->m_len;
264 lastaddr += m->m_len;
265 continue;
266 #if 1
267 case 0:
268 KASSERT(m->m_paddr != M_PADDR_INVALID);
269 lastaddr = m->m_paddr + M_BUFOFFSET(m)
270 + (m->m_data - M_BUFADDR(m));
271 goto have_addr;
272 #endif
273 default:
274 break;
275 }
276 #endif
277 error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
278 vmspace_kernel(), flags, &lastaddr, &seg, first);
279 }
280 if (error == 0) {
281 map->dm_mapsize = m0->m_pkthdr.len;
282 map->dm_nsegs = seg + 1;
283 }
284 #ifdef DEBUG_DMA
285 printf("dmamap_load_mbuf: error=%d\n", error);
286 #endif /* DEBUG_DMA */
287 return (error);
288 }
289
290 /*
291 * Like _bus_dmamap_load(), but for uios.
292 */
293 int
294 _bus_dmamap_load_uio(t, map, uio, flags)
295 bus_dma_tag_t t;
296 bus_dmamap_t map;
297 struct uio *uio;
298 int flags;
299 {
300 vaddr_t lastaddr = 0;
301 int seg, i, error;
302 bool first;
303 bus_size_t minlen, resid;
304 struct iovec *iov;
305 void *addr;
306
307 /*
308 * Make sure that on error condition we return "no valid mappings."
309 */
310 map->dm_mapsize = 0;
311 map->dm_nsegs = 0;
312 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
313
314 resid = uio->uio_resid;
315 iov = uio->uio_iov;
316
317 first = true;
318 seg = 0;
319 error = 0;
320 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
321 /*
322 * Now at the first iovec to load. Load each iovec
323 * until we have exhausted the residual count.
324 */
325 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
326 addr = (void *)iov[i].iov_base;
327
328 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
329 uio->uio_vmspace, flags, &lastaddr, &seg, first);
330 first = false;
331
332 resid -= minlen;
333 }
334 if (error == 0) {
335 map->dm_mapsize = uio->uio_resid;
336 map->dm_nsegs = seg + 1;
337 }
338 return (error);
339 }
340
341 /*
342 * Like _bus_dmamap_load(), but for raw memory allocated with
343 * bus_dmamem_alloc().
344 */
345 int
346 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
347 bus_dma_tag_t t;
348 bus_dmamap_t map;
349 bus_dma_segment_t *segs;
350 int nsegs;
351 bus_size_t size;
352 int flags;
353 {
354
355 panic("_bus_dmamap_load_raw: not implemented");
356 }
357
358 /*
359 * Common function for unloading a DMA map. May be called by
360 * bus-specific DMA map unload functions.
361 */
362 void
363 _bus_dmamap_unload(t, map)
364 bus_dma_tag_t t;
365 bus_dmamap_t map;
366 {
367
368 #ifdef DEBUG_DMA
369 printf("dmamap_unload: t=%p map=%p\n", t, map);
370 #endif /* DEBUG_DMA */
371
372 /*
373 * No resources to free; just mark the mappings as
374 * invalid.
375 */
376 map->dm_maxsegsz = map->_dm_maxmaxsegsz;
377 map->dm_mapsize = 0;
378 map->dm_nsegs = 0;
379 }
380
381 /*
382 * Common function for DMA map synchronization. May be called
383 * by bus-specific DMA map synchronization functions.
384 */
385 void
386 _bus_dmamap_sync(t, map, offset, len, ops)
387 bus_dma_tag_t t;
388 bus_dmamap_t map;
389 bus_addr_t offset;
390 bus_size_t len;
391 int ops;
392 {
393 #ifdef DEBUG_DMA
394 printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
395 t, map, offset, len, ops);
396 #endif /* DEBUG_DMA */
397 /*
398 * A vax only has snoop-cache, so this routine is a no-op.
399 */
400 return;
401 }
402
403 /*
404 * Common function for DMA-safe memory allocation. May be called
405 * by bus-specific DMA memory allocation functions.
406 */
407
408 int
409 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
410 bus_dma_tag_t t;
411 bus_size_t size, alignment, boundary;
412 bus_dma_segment_t *segs;
413 int nsegs;
414 int *rsegs;
415 int flags;
416 {
417 int error;
418
419 error = (_bus_dmamem_alloc_range(t, size, alignment, boundary,
420 segs, nsegs, rsegs, flags, round_page(avail_start),
421 trunc_page(avail_end)));
422 return(error);
423 }
424
425 /*
426 * Common function for freeing DMA-safe memory. May be called by
427 * bus-specific DMA memory free functions.
428 */
429 void
430 _bus_dmamem_free(t, segs, nsegs)
431 bus_dma_tag_t t;
432 bus_dma_segment_t *segs;
433 int nsegs;
434 {
435 struct vm_page *m;
436 bus_addr_t addr;
437 struct pglist mlist;
438 int curseg;
439
440 #ifdef DEBUG_DMA
441 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
442 #endif /* DEBUG_DMA */
443
444 /*
445 * Build a list of pages to free back to the VM system.
446 */
447 TAILQ_INIT(&mlist);
448 for (curseg = 0; curseg < nsegs; curseg++) {
449 for (addr = segs[curseg].ds_addr;
450 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
451 addr += PAGE_SIZE) {
452 m = PHYS_TO_VM_PAGE(addr);
453 TAILQ_INSERT_TAIL(&mlist, m, pageq);
454 }
455 }
456 uvm_pglistfree(&mlist);
457 }
458
459 /*
460 * Common function for mapping DMA-safe memory. May be called by
461 * bus-specific DMA memory map functions.
462 */
463 int
464 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
465 bus_dma_tag_t t;
466 bus_dma_segment_t *segs;
467 int nsegs;
468 size_t size;
469 void **kvap;
470 int flags;
471 {
472 vaddr_t va;
473 bus_addr_t addr;
474 int curseg;
475 const uvm_flag_t kmflags =
476 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
477
478 /*
479 * Special case (but common):
480 * If there is only one physical segment then the already-mapped
481 * virtual address is returned, since all physical memory is already
482 * in the beginning of kernel virtual memory.
483 */
484 if (nsegs == 1) {
485 *kvap = (void *)(segs[0].ds_addr | KERNBASE);
486 /*
487 * KA43 (3100/m76) must have its DMA-safe memory accessed
488 * through DIAGMEM. Remap it here.
489 */
490 if (vax_boardtype == VAX_BTYP_43) {
491 pmap_map((vaddr_t)*kvap, segs[0].ds_addr|KA43_DIAGMEM,
492 (segs[0].ds_addr|KA43_DIAGMEM) + size,
493 VM_PROT_READ|VM_PROT_WRITE);
494 }
495 return 0;
496 }
497 size = round_page(size);
498 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
499
500 if (va == 0)
501 return (ENOMEM);
502
503 *kvap = (void *)va;
504
505 for (curseg = 0; curseg < nsegs; curseg++) {
506 for (addr = segs[curseg].ds_addr;
507 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
508 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
509 if (size == 0)
510 panic("_bus_dmamem_map: size botch");
511 if (vax_boardtype == VAX_BTYP_43)
512 addr |= KA43_DIAGMEM;
513 pmap_enter(pmap_kernel(), va, addr,
514 VM_PROT_READ | VM_PROT_WRITE,
515 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
516 }
517 }
518 pmap_update(pmap_kernel());
519 return (0);
520 }
521
522 /*
523 * Common function for unmapping DMA-safe memory. May be called by
524 * bus-specific DMA memory unmapping functions.
525 */
526 void
527 _bus_dmamem_unmap(t, kva, size)
528 bus_dma_tag_t t;
529 void *kva;
530 size_t size;
531 {
532
533 #ifdef DEBUG_DMA
534 printf("dmamem_unmap: t=%p kva=%p size=%x\n", t, kva, size);
535 #endif /* DEBUG_DMA */
536 #ifdef DIAGNOSTIC
537 if ((u_long)kva & PGOFSET)
538 panic("_bus_dmamem_unmap");
539 #endif /* DIAGNOSTIC */
540
541 /* Avoid free'ing if not mapped */
542 if (kva < (void *)virtual_avail)
543 return;
544
545 size = round_page(size);
546 pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
547 pmap_update(pmap_kernel());
548 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
549 }
550
551 /*
552 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
553 * bus-specific DMA mmap(2)'ing functions.
554 */
555 paddr_t
556 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
557 bus_dma_tag_t t;
558 bus_dma_segment_t *segs;
559 int nsegs;
560 off_t off;
561 int prot, flags;
562 {
563 int i;
564
565 for (i = 0; i < nsegs; i++) {
566 #ifdef DIAGNOSTIC
567 if (off & PGOFSET)
568 panic("_bus_dmamem_mmap: offset unaligned");
569 if (segs[i].ds_addr & PGOFSET)
570 panic("_bus_dmamem_mmap: segment unaligned");
571 if (segs[i].ds_len & PGOFSET)
572 panic("_bus_dmamem_mmap: segment size not multiple"
573 " of page size");
574 #endif /* DIAGNOSTIC */
575 if (off >= segs[i].ds_len) {
576 off -= segs[i].ds_len;
577 continue;
578 }
579
580 return (btop((u_long)segs[i].ds_addr + off));
581 }
582
583 /* Page not found. */
584 return (-1);
585 }
586
587 /**********************************************************************
588 * DMA utility functions
589 **********************************************************************/
590
591 /*
592 * Utility function to load a linear buffer. lastaddrp holds state
593 * between invocations (for multiple-buffer loads). segp contains
594 * the starting segment on entrace, and the ending segment on exit.
595 * first indicates if this is the first invocation of this function.
596 */
597 int
598 _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags, lastaddrp, segp, first)
599 bus_dma_tag_t t;
600 bus_dmamap_t map;
601 void *buf;
602 bus_size_t buflen;
603 struct vmspace *vm;
604 int flags;
605 vaddr_t *lastaddrp;
606 int *segp;
607 bool first;
608 {
609 bus_size_t sgsize;
610 bus_addr_t curaddr, lastaddr, baddr, bmask;
611 vaddr_t vaddr = (vaddr_t)buf;
612 int seg;
613 pmap_t pmap;
614
615 #ifdef DEBUG_DMA
616 printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
617 buf, buflen, flags, first);
618 #endif /* DEBUG_DMA */
619
620 pmap = vm_map_pmap(&vm->vm_map);
621
622 lastaddr = *lastaddrp;
623 bmask = ~(map->_dm_boundary - 1);
624
625 for (seg = *segp; buflen > 0; ) {
626 /*
627 * Get the physical address for this segment.
628 */
629 (void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr);
630
631 #if 0
632 /*
633 * Make sure we're in an allowed DMA range.
634 */
635 if (t->_ranges != NULL &&
636 _bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0)
637 return (EINVAL);
638 #endif
639
640 /*
641 * Compute the segment size, and adjust counts.
642 */
643 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
644 if (buflen < sgsize)
645 sgsize = buflen;
646
647 /*
648 * Make sure we don't cross any boundaries.
649 */
650 if (map->_dm_boundary > 0) {
651 baddr = (curaddr + map->_dm_boundary) & bmask;
652 if (sgsize > (baddr - curaddr))
653 sgsize = (baddr - curaddr);
654 }
655
656 /*
657 * Insert chunk into a segment, coalescing with
658 * previous segment if possible.
659 */
660 if (first) {
661 map->dm_segs[seg].ds_addr = curaddr;
662 map->dm_segs[seg].ds_len = sgsize;
663 first = false;
664 } else {
665 if (curaddr == lastaddr &&
666 (map->dm_segs[seg].ds_len + sgsize) <=
667 map->dm_maxsegsz &&
668 (map->_dm_boundary == 0 ||
669 (map->dm_segs[seg].ds_addr & bmask) ==
670 (curaddr & bmask)))
671 map->dm_segs[seg].ds_len += sgsize;
672 else {
673 if (++seg >= map->_dm_segcnt)
674 break;
675 map->dm_segs[seg].ds_addr = curaddr;
676 map->dm_segs[seg].ds_len = sgsize;
677 }
678 }
679
680 lastaddr = curaddr + sgsize;
681 vaddr += sgsize;
682 buflen -= sgsize;
683 }
684
685 *segp = seg;
686 *lastaddrp = lastaddr;
687
688 /*
689 * Did we fit?
690 */
691 if (buflen != 0)
692 return (EFBIG); /* XXX better return value here? */
693 return (0);
694 }
695
696 /*
697 * Check to see if the specified page is in an allowed DMA range.
698 */
699 int
700 _bus_dma_inrange(ranges, nranges, curaddr)
701 bus_dma_segment_t *ranges;
702 int nranges;
703 bus_addr_t curaddr;
704 {
705 bus_dma_segment_t *ds;
706 int i;
707
708 for (i = 0, ds = ranges; i < nranges; i++, ds++) {
709 if (curaddr >= ds->ds_addr &&
710 round_page(curaddr) <= (ds->ds_addr + ds->ds_len))
711 return (1);
712 }
713
714 return (0);
715 }
716
717 /*
718 * Allocate physical memory from the given physical address range.
719 * Called by DMA-safe memory allocation methods.
720 */
721 int
722 _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
723 flags, low, high)
724 bus_dma_tag_t t;
725 bus_size_t size, alignment, boundary;
726 bus_dma_segment_t *segs;
727 int nsegs;
728 int *rsegs;
729 int flags;
730 vaddr_t low;
731 vaddr_t high;
732 {
733 vaddr_t curaddr, lastaddr;
734 struct vm_page *m;
735 struct pglist mlist;
736 int curseg, error;
737
738 #ifdef DEBUG_DMA
739 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
740 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
741 #endif /* DEBUG_DMA */
742
743 /* Always round the size. */
744 size = round_page(size);
745
746 /*
747 * Allocate pages from the VM system.
748 */
749 error = uvm_pglistalloc(size, low, high, alignment, boundary,
750 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
751 if (error)
752 return (error);
753
754 /*
755 * Compute the location, size, and number of segments actually
756 * returned by the VM code.
757 */
758 m = mlist.tqh_first;
759 curseg = 0;
760 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
761 segs[curseg].ds_len = PAGE_SIZE;
762 #ifdef DEBUG_DMA
763 printf("alloc: page %lx\n", lastaddr);
764 #endif /* DEBUG_DMA */
765 m = m->pageq.tqe_next;
766
767 for (; m != NULL; m = m->pageq.tqe_next) {
768 curaddr = VM_PAGE_TO_PHYS(m);
769 #ifdef DIAGNOSTIC
770 if (curaddr < low || curaddr >= high) {
771 printf("uvm_pglistalloc returned non-sensical"
772 " address 0x%lx\n", curaddr);
773 panic("_bus_dmamem_alloc_range");
774 }
775 #endif /* DIAGNOSTIC */
776 #ifdef DEBUG_DMA
777 printf("alloc: page %lx\n", curaddr);
778 #endif /* DEBUG_DMA */
779 if (curaddr == (lastaddr + PAGE_SIZE))
780 segs[curseg].ds_len += PAGE_SIZE;
781 else {
782 curseg++;
783 segs[curseg].ds_addr = curaddr;
784 segs[curseg].ds_len = PAGE_SIZE;
785 }
786 lastaddr = curaddr;
787 }
788
789 *rsegs = curseg + 1;
790
791 return (0);
792 }
793
794 /*
795 * "generic" DMA struct, nothing special.
796 */
797 struct vax_bus_dma_tag vax_bus_dma_tag = {
798 NULL,
799 0,
800 0,
801 0,
802 0,
803 0,
804 _bus_dmamap_create,
805 _bus_dmamap_destroy,
806 _bus_dmamap_load,
807 _bus_dmamap_load_mbuf,
808 _bus_dmamap_load_uio,
809 _bus_dmamap_load_raw,
810 _bus_dmamap_unload,
811 _bus_dmamap_sync,
812 _bus_dmamem_alloc,
813 _bus_dmamem_free,
814 _bus_dmamem_map,
815 _bus_dmamem_unmap,
816 _bus_dmamem_mmap,
817 };
818