bus_dma.c revision 1.14 1 /* $NetBSD: bus_dma.c,v 1.14 2002/07/28 17:54:05 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/map.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/reboot.h>
47 #include <sys/conf.h>
48 #include <sys/file.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/vnode.h>
52 #include <sys/device.h>
53
54 #include <uvm/uvm_extern.h>
55
56 #define _ARM32_BUS_DMA_PRIVATE
57 #include <machine/bus.h>
58
59 #include <machine/cpu.h>
60
61 #include <arm/cpufunc.h>
62
63 int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
64 bus_size_t, struct proc *, int, paddr_t *, int *, int);
65 int _bus_dma_inrange(bus_dma_segment_t *, int, bus_addr_t);
66
67 /*
68 * Common function for DMA map creation. May be called by bus-specific
69 * DMA map creation functions.
70 */
71 int
72 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
73 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
74 {
75 struct arm32_bus_dmamap *map;
76 void *mapstore;
77 size_t mapsize;
78
79 #ifdef DEBUG_DMA
80 printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
81 t, size, nsegments, maxsegsz, boundary, flags);
82 #endif /* DEBUG_DMA */
83
84 /*
85 * Allocate and initialize the DMA map. The end of the map
86 * is a variable-sized array of segments, so we allocate enough
87 * room for them in one shot.
88 *
89 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
90 * of ALLOCNOW notifies others that we've reserved these resources,
91 * and they are not to be freed.
92 *
93 * The bus_dmamap_t includes one bus_dma_segment_t, hence
94 * the (nsegments - 1).
95 */
96 mapsize = sizeof(struct arm32_bus_dmamap) +
97 (sizeof(bus_dma_segment_t) * (nsegments - 1));
98 if ((mapstore = malloc(mapsize, M_DMAMAP,
99 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
100 return (ENOMEM);
101
102 memset(mapstore, 0, mapsize);
103 map = (struct arm32_bus_dmamap *)mapstore;
104 map->_dm_size = size;
105 map->_dm_segcnt = nsegments;
106 map->_dm_maxsegsz = maxsegsz;
107 map->_dm_boundary = boundary;
108 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
109 map->_dm_origbuf = NULL;
110 map->_dm_buftype = ARM32_BUFTYPE_INVALID;
111 map->_dm_proc = NULL;
112 map->dm_mapsize = 0; /* no valid mappings */
113 map->dm_nsegs = 0;
114
115 *dmamp = map;
116 #ifdef DEBUG_DMA
117 printf("dmamap_create:map=%p\n", map);
118 #endif /* DEBUG_DMA */
119 return (0);
120 }
121
122 /*
123 * Common function for DMA map destruction. May be called by bus-specific
124 * DMA map destruction functions.
125 */
126 void
127 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
128 {
129
130 #ifdef DEBUG_DMA
131 printf("dmamap_destroy: t=%p map=%p\n", t, map);
132 #endif /* DEBUG_DMA */
133
134 /*
135 * Explicit unload.
136 */
137 map->dm_mapsize = 0;
138 map->dm_nsegs = 0;
139 map->_dm_origbuf = NULL;
140 map->_dm_buftype = ARM32_BUFTYPE_INVALID;
141 map->_dm_proc = NULL;
142
143 free(map, M_DEVBUF);
144 }
145
146 /*
147 * Common function for loading a DMA map with a linear buffer. May
148 * be called by bus-specific DMA map load functions.
149 */
150 int
151 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
152 bus_size_t buflen, struct proc *p, int flags)
153 {
154 paddr_t lastaddr;
155 int seg, error;
156
157 #ifdef DEBUG_DMA
158 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
159 t, map, buf, buflen, p, flags);
160 #endif /* DEBUG_DMA */
161
162 /*
163 * Make sure that on error condition we return "no valid mappings".
164 */
165 map->dm_mapsize = 0;
166 map->dm_nsegs = 0;
167
168 if (buflen > map->_dm_size)
169 return (EINVAL);
170
171 seg = 0;
172 error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
173 &lastaddr, &seg, 1);
174 if (error == 0) {
175 map->dm_mapsize = buflen;
176 map->dm_nsegs = seg + 1;
177 map->_dm_origbuf = buf;
178 map->_dm_buftype = ARM32_BUFTYPE_LINEAR;
179 map->_dm_proc = p;
180 }
181 #ifdef DEBUG_DMA
182 printf("dmamap_load: error=%d\n", error);
183 #endif /* DEBUG_DMA */
184 return (error);
185 }
186
187 /*
188 * Like _bus_dmamap_load(), but for mbufs.
189 */
190 int
191 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
192 int flags)
193 {
194 paddr_t lastaddr;
195 int seg, error, first;
196 struct mbuf *m;
197
198 #ifdef DEBUG_DMA
199 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
200 t, map, m0, flags);
201 #endif /* DEBUG_DMA */
202
203 /*
204 * Make sure that on error condition we return "no valid mappings."
205 */
206 map->dm_mapsize = 0;
207 map->dm_nsegs = 0;
208
209 #ifdef DIAGNOSTIC
210 if ((m0->m_flags & M_PKTHDR) == 0)
211 panic("_bus_dmamap_load_mbuf: no packet header");
212 #endif /* DIAGNOSTIC */
213
214 if (m0->m_pkthdr.len > map->_dm_size)
215 return (EINVAL);
216
217 first = 1;
218 seg = 0;
219 error = 0;
220 for (m = m0; m != NULL && error == 0; m = m->m_next) {
221 error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
222 NULL, flags, &lastaddr, &seg, first);
223 first = 0;
224 }
225 if (error == 0) {
226 map->dm_mapsize = m0->m_pkthdr.len;
227 map->dm_nsegs = seg + 1;
228 map->_dm_origbuf = m0;
229 map->_dm_buftype = ARM32_BUFTYPE_MBUF;
230 map->_dm_proc = NULL; /* always kernel */
231 }
232 #ifdef DEBUG_DMA
233 printf("dmamap_load_mbuf: error=%d\n", error);
234 #endif /* DEBUG_DMA */
235 return (error);
236 }
237
238 /*
239 * Like _bus_dmamap_load(), but for uios.
240 */
241 int
242 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
243 int flags)
244 {
245 paddr_t lastaddr;
246 int seg, i, error, first;
247 bus_size_t minlen, resid;
248 struct proc *p = NULL;
249 struct iovec *iov;
250 caddr_t addr;
251
252 /*
253 * Make sure that on error condition we return "no valid mappings."
254 */
255 map->dm_mapsize = 0;
256 map->dm_nsegs = 0;
257
258 resid = uio->uio_resid;
259 iov = uio->uio_iov;
260
261 if (uio->uio_segflg == UIO_USERSPACE) {
262 p = uio->uio_procp;
263 #ifdef DIAGNOSTIC
264 if (p == NULL)
265 panic("_bus_dmamap_load_uio: USERSPACE but no proc");
266 #endif
267 }
268
269 first = 1;
270 seg = 0;
271 error = 0;
272 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
273 /*
274 * Now at the first iovec to load. Load each iovec
275 * until we have exhausted the residual count.
276 */
277 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
278 addr = (caddr_t)iov[i].iov_base;
279
280 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
281 p, flags, &lastaddr, &seg, first);
282 first = 0;
283
284 resid -= minlen;
285 }
286 if (error == 0) {
287 map->dm_mapsize = uio->uio_resid;
288 map->dm_nsegs = seg + 1;
289 map->_dm_origbuf = uio;
290 map->_dm_buftype = ARM32_BUFTYPE_UIO;
291 map->_dm_proc = p;
292 }
293 return (error);
294 }
295
296 /*
297 * Like _bus_dmamap_load(), but for raw memory allocated with
298 * bus_dmamem_alloc().
299 */
300 int
301 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
302 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
303 {
304
305 panic("_bus_dmamap_load_raw: not implemented");
306 }
307
308 /*
309 * Common function for unloading a DMA map. May be called by
310 * bus-specific DMA map unload functions.
311 */
312 void
313 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
314 {
315
316 #ifdef DEBUG_DMA
317 printf("dmamap_unload: t=%p map=%p\n", t, map);
318 #endif /* DEBUG_DMA */
319
320 /*
321 * No resources to free; just mark the mappings as
322 * invalid.
323 */
324 map->dm_mapsize = 0;
325 map->dm_nsegs = 0;
326 map->_dm_origbuf = NULL;
327 map->_dm_buftype = ARM32_BUFTYPE_INVALID;
328 map->_dm_proc = NULL;
329 }
330
331 static void
332 _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
333 bus_size_t len, int ops)
334 {
335 vaddr_t addr = (vaddr_t) map->_dm_origbuf;
336
337 addr += offset;
338 len -= offset;
339
340 switch (ops) {
341 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
342 cpu_dcache_wbinv_range(addr, len);
343 break;
344
345 case BUS_DMASYNC_PREREAD:
346 #if 1
347 cpu_dcache_wbinv_range(addr, len);
348 #else
349 cpu_dcache_inv_range(addr, len);
350 #endif
351 break;
352
353 case BUS_DMASYNC_PREWRITE:
354 cpu_dcache_wb_range(addr, len);
355 break;
356 }
357 }
358
359 static void
360 _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
361 bus_size_t len, int ops)
362 {
363 struct mbuf *m, *m0 = map->_dm_origbuf;
364 bus_size_t minlen, moff;
365 vaddr_t maddr;
366
367 for (moff = offset, m = m0; m != NULL && len != 0;
368 m = m->m_next) {
369 /* Find the beginning mbuf. */
370 if (moff >= m->m_len) {
371 moff -= m->m_len;
372 continue;
373 }
374
375 /*
376 * Now at the first mbuf to sync; nail each one until
377 * we have exhausted the length.
378 */
379 minlen = m->m_len - moff;
380 if (len < minlen)
381 minlen = len;
382
383 maddr = mtod(m, vaddr_t);
384 maddr += moff;
385
386 switch (ops) {
387 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
388 cpu_dcache_wbinv_range(maddr, minlen);
389 break;
390
391 case BUS_DMASYNC_PREREAD:
392 #if 1
393 cpu_dcache_wbinv_range(maddr, minlen);
394 #else
395 cpu_dcache_inv_range(maddr, minlen);
396 #endif
397 break;
398
399 case BUS_DMASYNC_PREWRITE:
400 cpu_dcache_wb_range(maddr, minlen);
401 break;
402 }
403 moff = 0;
404 len -= minlen;
405 }
406 }
407
408 static void
409 _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
410 bus_size_t len, int ops)
411 {
412 struct uio *uio = map->_dm_origbuf;
413 struct iovec *iov;
414 bus_size_t minlen, ioff;
415 vaddr_t addr;
416
417 for (iov = uio->uio_iov, ioff = offset; len != 0; iov++) {
418 /* Find the beginning iovec. */
419 if (ioff >= iov->iov_len) {
420 ioff -= iov->iov_len;
421 continue;
422 }
423
424 /*
425 * Now at the first iovec to sync; nail each one until
426 * we have exhausted the length.
427 */
428 minlen = iov->iov_len - ioff;
429 if (len < minlen)
430 minlen = len;
431
432 addr = (vaddr_t) iov->iov_base;
433 addr += ioff;
434
435 switch (ops) {
436 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
437 cpu_dcache_wbinv_range(addr, minlen);
438 break;
439
440 case BUS_DMASYNC_PREREAD:
441 #if 1
442 cpu_dcache_wbinv_range(addr, minlen);
443 #else
444 cpu_dcache_inv_range(addr, minlen);
445 #endif
446 break;
447
448 case BUS_DMASYNC_PREWRITE:
449 cpu_dcache_wb_range(addr, minlen);
450 break;
451 }
452 ioff = 0;
453 len -= minlen;
454 }
455 }
456
457 /*
458 * Common function for DMA map synchronization. May be called
459 * by bus-specific DMA map synchronization functions.
460 *
461 * This version works for the Virtually Indexed Virtually Tagged
462 * cache found on 32-bit ARM processors.
463 *
464 * XXX Should have separate versions for write-through vs.
465 * XXX write-back caches. We currently assume write-back
466 * XXX here, which is not as efficient as it could be for
467 * XXX the write-through case.
468 */
469 void
470 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
471 bus_size_t len, int ops)
472 {
473
474 #ifdef DEBUG_DMA
475 printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
476 t, map, offset, len, ops);
477 #endif /* DEBUG_DMA */
478
479 /*
480 * Mixing of PRE and POST operations is not allowed.
481 */
482 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
483 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
484 panic("_bus_dmamap_sync: mix PRE and POST");
485
486 #ifdef DIAGNOSTIC
487 if (offset >= map->dm_mapsize)
488 panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
489 offset, map->dm_mapsize);
490 if (len == 0 || (offset + len) > map->dm_mapsize)
491 panic("_bus_dmamap_sync: bad length");
492 #endif
493
494 /*
495 * For a virtually-indexed write-back cache, we need
496 * to do the following things:
497 *
498 * PREREAD -- Invalidate the D-cache. We do this
499 * here in case a write-back is required by the back-end.
500 *
501 * PREWRITE -- Write-back the D-cache. Note that if
502 * we are doing a PREREAD|PREWRITE, we can collapse
503 * the whole thing into a single Wb-Inv.
504 *
505 * POSTREAD -- Nothing.
506 *
507 * POSTWRITE -- Nothing.
508 */
509
510 ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
511 if (ops == 0)
512 return;
513
514 /*
515 * XXX Skip cache frobbing if mapping was COHERENT.
516 */
517
518 /*
519 * If the mapping is not the kernel's and also not the
520 * current process's (XXX actually, vmspace), then we
521 * don't have anything to do, since the cache is Wb-Inv'd
522 * on context switch.
523 *
524 * XXX REVISIT WHEN WE DO FCSE!
525 */
526 if (__predict_false(map->_dm_proc != NULL && map->_dm_proc != curproc))
527 return;
528
529 switch (map->_dm_buftype) {
530 case ARM32_BUFTYPE_LINEAR:
531 _bus_dmamap_sync_linear(t, map, offset, len, ops);
532 break;
533
534 case ARM32_BUFTYPE_MBUF:
535 _bus_dmamap_sync_mbuf(t, map, offset, len, ops);
536 break;
537
538 case ARM32_BUFTYPE_UIO:
539 _bus_dmamap_sync_uio(t, map, offset, len, ops);
540 break;
541
542 case ARM32_BUFTYPE_RAW:
543 panic("_bus_dmamap_sync: ARM32_BUFTYPE_RAW");
544 break;
545
546 case ARM32_BUFTYPE_INVALID:
547 panic("_bus_dmamap_sync: ARM32_BUFTYPE_INVALID");
548 break;
549
550 default:
551 printf("unknown buffer type %d\n", map->_dm_buftype);
552 panic("_bus_dmamap_sync");
553 }
554
555 /* Drain the write buffer. */
556 cpu_drain_writebuf();
557 }
558
559 /*
560 * Common function for DMA-safe memory allocation. May be called
561 * by bus-specific DMA memory allocation functions.
562 */
563
564 extern paddr_t physical_start;
565 extern paddr_t physical_freestart;
566 extern paddr_t physical_freeend;
567 extern paddr_t physical_end;
568
569 int
570 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
571 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
572 int flags)
573 {
574 int error;
575 #ifdef DEBUG_DMA
576 printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x\n",
577 t, size, alignment, boundary, segs, nsegs, rsegs, flags);
578 #endif /* DEBUG_DMA */
579 error = (_bus_dmamem_alloc_range(t, size, alignment, boundary,
580 segs, nsegs, rsegs, flags, trunc_page(physical_start), trunc_page(physical_end)));
581 #ifdef DEBUG_DMA
582 printf("dmamem_alloc: =%d\n", error);
583 #endif /* DEBUG_DMA */
584 return(error);
585 }
586
587 /*
588 * Common function for freeing DMA-safe memory. May be called by
589 * bus-specific DMA memory free functions.
590 */
591 void
592 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
593 {
594 struct vm_page *m;
595 bus_addr_t addr;
596 struct pglist mlist;
597 int curseg;
598
599 #ifdef DEBUG_DMA
600 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
601 #endif /* DEBUG_DMA */
602
603 /*
604 * Build a list of pages to free back to the VM system.
605 */
606 TAILQ_INIT(&mlist);
607 for (curseg = 0; curseg < nsegs; curseg++) {
608 for (addr = segs[curseg].ds_addr;
609 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
610 addr += PAGE_SIZE) {
611 m = PHYS_TO_VM_PAGE(addr);
612 TAILQ_INSERT_TAIL(&mlist, m, pageq);
613 }
614 }
615 uvm_pglistfree(&mlist);
616 }
617
618 /*
619 * Common function for mapping DMA-safe memory. May be called by
620 * bus-specific DMA memory map functions.
621 */
622 int
623 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
624 size_t size, caddr_t *kvap, int flags)
625 {
626 vaddr_t va;
627 bus_addr_t addr;
628 int curseg;
629 pt_entry_t *ptep/*, pte*/;
630
631 #ifdef DEBUG_DMA
632 printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
633 segs, nsegs, (unsigned long)size, flags);
634 #endif /* DEBUG_DMA */
635
636 size = round_page(size);
637 va = uvm_km_valloc(kernel_map, size);
638
639 if (va == 0)
640 return (ENOMEM);
641
642 *kvap = (caddr_t)va;
643
644 for (curseg = 0; curseg < nsegs; curseg++) {
645 for (addr = segs[curseg].ds_addr;
646 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
647 addr += NBPG, va += NBPG, size -= NBPG) {
648 #ifdef DEBUG_DMA
649 printf("wiring p%lx to v%lx", addr, va);
650 #endif /* DEBUG_DMA */
651 if (size == 0)
652 panic("_bus_dmamem_map: size botch");
653 pmap_enter(pmap_kernel(), va, addr,
654 VM_PROT_READ | VM_PROT_WRITE,
655 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
656 /*
657 * If the memory must remain coherent with the
658 * cache then we must make the memory uncacheable
659 * in order to maintain virtual cache coherency.
660 * We must also guarentee the cache does not already
661 * contain the virtal addresses we are making
662 * uncacheable.
663 */
664 if (flags & BUS_DMA_COHERENT) {
665 cpu_dcache_wbinv_range(va, NBPG);
666 cpu_drain_writebuf();
667 ptep = vtopte(va);
668 *ptep &= ~(L2_B | L2_C);
669 tlb_flush();
670 }
671 #ifdef DEBUG_DMA
672 ptep = vtopte(va);
673 printf(" pte=v%p *pte=%x\n", ptep, *ptep);
674 #endif /* DEBUG_DMA */
675 }
676 }
677 pmap_update(pmap_kernel());
678 #ifdef DEBUG_DMA
679 printf("dmamem_map: =%p\n", *kvap);
680 #endif /* DEBUG_DMA */
681 return (0);
682 }
683
684 /*
685 * Common function for unmapping DMA-safe memory. May be called by
686 * bus-specific DMA memory unmapping functions.
687 */
688 void
689 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
690 {
691
692 #ifdef DEBUG_DMA
693 printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva,
694 (unsigned long)size);
695 #endif /* DEBUG_DMA */
696 #ifdef DIAGNOSTIC
697 if ((u_long)kva & PGOFSET)
698 panic("_bus_dmamem_unmap");
699 #endif /* DIAGNOSTIC */
700
701 size = round_page(size);
702 uvm_km_free(kernel_map, (vaddr_t)kva, size);
703 }
704
705 /*
706 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
707 * bus-specific DMA mmap(2)'ing functions.
708 */
709 paddr_t
710 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
711 off_t off, int prot, int flags)
712 {
713 int i;
714
715 for (i = 0; i < nsegs; i++) {
716 #ifdef DIAGNOSTIC
717 if (off & PGOFSET)
718 panic("_bus_dmamem_mmap: offset unaligned");
719 if (segs[i].ds_addr & PGOFSET)
720 panic("_bus_dmamem_mmap: segment unaligned");
721 if (segs[i].ds_len & PGOFSET)
722 panic("_bus_dmamem_mmap: segment size not multiple"
723 " of page size");
724 #endif /* DIAGNOSTIC */
725 if (off >= segs[i].ds_len) {
726 off -= segs[i].ds_len;
727 continue;
728 }
729
730 return (arm_btop((u_long)segs[i].ds_addr + off));
731 }
732
733 /* Page not found. */
734 return (-1);
735 }
736
737 /**********************************************************************
738 * DMA utility functions
739 **********************************************************************/
740
741 /*
742 * Utility function to load a linear buffer. lastaddrp holds state
743 * between invocations (for multiple-buffer loads). segp contains
744 * the starting segment on entrace, and the ending segment on exit.
745 * first indicates if this is the first invocation of this function.
746 */
747 int
748 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
749 bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
750 int *segp, int first)
751 {
752 bus_size_t sgsize;
753 bus_addr_t curaddr, lastaddr, baddr, bmask;
754 vaddr_t vaddr = (vaddr_t)buf;
755 int seg;
756 pmap_t pmap;
757
758 #ifdef DEBUG_DMA
759 printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
760 buf, buflen, flags, first);
761 #endif /* DEBUG_DMA */
762
763 if (p != NULL)
764 pmap = p->p_vmspace->vm_map.pmap;
765 else
766 pmap = pmap_kernel();
767
768 lastaddr = *lastaddrp;
769 bmask = ~(map->_dm_boundary - 1);
770
771 for (seg = *segp; buflen > 0; ) {
772 /*
773 * Get the physical address for this segment.
774 */
775 (void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr);
776
777 /*
778 * Make sure we're in an allowed DMA range.
779 */
780 if (t->_ranges != NULL &&
781 _bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0)
782 return (EINVAL);
783
784 /*
785 * Compute the segment size, and adjust counts.
786 */
787 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
788 if (buflen < sgsize)
789 sgsize = buflen;
790
791 /*
792 * Make sure we don't cross any boundaries.
793 */
794 if (map->_dm_boundary > 0) {
795 baddr = (curaddr + map->_dm_boundary) & bmask;
796 if (sgsize > (baddr - curaddr))
797 sgsize = (baddr - curaddr);
798 }
799
800 /*
801 * Insert chunk into a segment, coalescing with
802 * previous segment if possible.
803 */
804 if (first) {
805 map->dm_segs[seg].ds_addr = curaddr;
806 map->dm_segs[seg].ds_len = sgsize;
807 first = 0;
808 } else {
809 if (curaddr == lastaddr &&
810 (map->dm_segs[seg].ds_len + sgsize) <=
811 map->_dm_maxsegsz &&
812 (map->_dm_boundary == 0 ||
813 (map->dm_segs[seg].ds_addr & bmask) ==
814 (curaddr & bmask)))
815 map->dm_segs[seg].ds_len += sgsize;
816 else {
817 if (++seg >= map->_dm_segcnt)
818 break;
819 map->dm_segs[seg].ds_addr = curaddr;
820 map->dm_segs[seg].ds_len = sgsize;
821 }
822 }
823
824 lastaddr = curaddr + sgsize;
825 vaddr += sgsize;
826 buflen -= sgsize;
827 }
828
829 *segp = seg;
830 *lastaddrp = lastaddr;
831
832 /*
833 * Did we fit?
834 */
835 if (buflen != 0)
836 return (EFBIG); /* XXX better return value here? */
837 return (0);
838 }
839
840 /*
841 * Check to see if the specified page is in an allowed DMA range.
842 */
843 int
844 _bus_dma_inrange(bus_dma_segment_t *ranges, int nranges, bus_addr_t curaddr)
845 {
846 bus_dma_segment_t *ds;
847 int i;
848
849 for (i = 0, ds = ranges; i < nranges; i++, ds++) {
850 if (curaddr >= ds->ds_addr &&
851 round_page(curaddr) <= (ds->ds_addr + ds->ds_len))
852 return (1);
853 }
854
855 return (0);
856 }
857
858 /*
859 * Allocate physical memory from the given physical address range.
860 * Called by DMA-safe memory allocation methods.
861 */
862 int
863 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
864 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
865 int flags, paddr_t low, paddr_t high)
866 {
867 paddr_t curaddr, lastaddr;
868 struct vm_page *m;
869 struct pglist mlist;
870 int curseg, error;
871
872 #ifdef DEBUG_DMA
873 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
874 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
875 #endif /* DEBUG_DMA */
876
877 /* Always round the size. */
878 size = round_page(size);
879
880 /*
881 * Allocate pages from the VM system.
882 */
883 error = uvm_pglistalloc(size, low, high, alignment, boundary,
884 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
885 if (error)
886 return (error);
887
888 /*
889 * Compute the location, size, and number of segments actually
890 * returned by the VM code.
891 */
892 m = mlist.tqh_first;
893 curseg = 0;
894 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
895 segs[curseg].ds_len = PAGE_SIZE;
896 #ifdef DEBUG_DMA
897 printf("alloc: page %lx\n", lastaddr);
898 #endif /* DEBUG_DMA */
899 m = m->pageq.tqe_next;
900
901 for (; m != NULL; m = m->pageq.tqe_next) {
902 curaddr = VM_PAGE_TO_PHYS(m);
903 #ifdef DIAGNOSTIC
904 if (curaddr < low || curaddr >= high) {
905 printf("uvm_pglistalloc returned non-sensical"
906 " address 0x%lx\n", curaddr);
907 panic("_bus_dmamem_alloc_range");
908 }
909 #endif /* DIAGNOSTIC */
910 #ifdef DEBUG_DMA
911 printf("alloc: page %lx\n", curaddr);
912 #endif /* DEBUG_DMA */
913 if (curaddr == (lastaddr + PAGE_SIZE))
914 segs[curseg].ds_len += PAGE_SIZE;
915 else {
916 curseg++;
917 segs[curseg].ds_addr = curaddr;
918 segs[curseg].ds_len = PAGE_SIZE;
919 }
920 lastaddr = curaddr;
921 }
922
923 *rsegs = curseg + 1;
924
925 return (0);
926 }
927