int_bus_dma.c revision 1.4 1 /* $NetBSD: int_bus_dma.c,v 1.4 2002/01/25 19:19:29 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39 /*
40 * The integrator board has memory steering hardware that means that
41 * the normal physical addresses used by the processor cannot be used
42 * for DMA. Instead we have to use the "core module alias mapping
43 * addresses". We don't use these for normal processor accesses since
44 * they are much slower than the direct addresses when accessing
45 * memory on the local board.
46 */
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/map.h>
52 #include <sys/proc.h>
53 #include <sys/buf.h>
54 #include <sys/reboot.h>
55 #include <sys/conf.h>
56 #include <sys/file.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/vnode.h>
60 #include <sys/device.h>
61
62 #include <uvm/uvm_extern.h>
63
64 #define _ARM32_BUS_DMA_PRIVATE
65 #include <evbarm/integrator/int_bus_dma.h>
66
67 #include <machine/cpu.h>
68 #include <arm/cpufunc.h>
69
70 static int integrator_bus_dmamap_load_buffer __P((bus_dma_tag_t,
71 bus_dmamap_t, void *, bus_size_t, struct proc *, int,
72 vm_offset_t *, int *, int));
73 static int integrator_bus_dma_inrange __P((bus_dma_segment_t *, int,
74 bus_addr_t));
75
76 /*
77 * Common function for loading a DMA map with a linear buffer. May
78 * be called by bus-specific DMA map load functions.
79 */
80 int
81 integrator_bus_dmamap_load(t, map, buf, buflen, p, flags)
82 bus_dma_tag_t t;
83 bus_dmamap_t map;
84 void *buf;
85 bus_size_t buflen;
86 struct proc *p;
87 int flags;
88 {
89 vm_offset_t lastaddr;
90 int seg, error;
91
92 #ifdef DEBUG_DMA
93 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
94 t, map, buf, buflen, p, flags);
95 #endif /* DEBUG_DMA */
96
97 /*
98 * Make sure that on error condition we return "no valid mappings".
99 */
100 map->dm_mapsize = 0;
101 map->dm_nsegs = 0;
102
103 if (buflen > map->_dm_size)
104 return (EINVAL);
105
106 seg = 0;
107 error = integrator_bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
108 &lastaddr, &seg, 1);
109 if (error == 0) {
110 map->dm_mapsize = buflen;
111 map->dm_nsegs = seg + 1;
112 }
113 #ifdef DEBUG_DMA
114 printf("dmamap_load: error=%d\n", error);
115 #endif /* DEBUG_DMA */
116 return (error);
117 }
118
119 /*
120 * Like _bus_dmamap_load(), but for mbufs.
121 */
122 int
123 integrator_bus_dmamap_load_mbuf(t, map, m0, flags)
124 bus_dma_tag_t t;
125 bus_dmamap_t map;
126 struct mbuf *m0;
127 int flags;
128 {
129 vm_offset_t lastaddr;
130 int seg, error, first;
131 struct mbuf *m;
132
133 #ifdef DEBUG_DMA
134 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
135 t, map, m0, flags);
136 #endif /* DEBUG_DMA */
137
138 /*
139 * Make sure that on error condition we return "no valid mappings."
140 */
141 map->dm_mapsize = 0;
142 map->dm_nsegs = 0;
143
144 #ifdef DIAGNOSTIC
145 if ((m0->m_flags & M_PKTHDR) == 0)
146 panic("integrator_bus_dmamap_load_mbuf: no packet header");
147 #endif /* DIAGNOSTIC */
148
149 if (m0->m_pkthdr.len > map->_dm_size)
150 return (EINVAL);
151
152 first = 1;
153 seg = 0;
154 error = 0;
155 for (m = m0; m != NULL && error == 0; m = m->m_next) {
156 error = integrator_bus_dmamap_load_buffer(t, map, m->m_data,
157 m->m_len, NULL, flags, &lastaddr, &seg, first);
158 first = 0;
159 }
160 if (error == 0) {
161 map->dm_mapsize = m0->m_pkthdr.len;
162 map->dm_nsegs = seg + 1;
163 }
164 #ifdef DEBUG_DMA
165 printf("dmamap_load_mbuf: error=%d\n", error);
166 #endif /* DEBUG_DMA */
167 return (error);
168 }
169
170 /*
171 * Like _bus_dmamap_load(), but for uios.
172 */
173 int
174 integrator_bus_dmamap_load_uio(t, map, uio, flags)
175 bus_dma_tag_t t;
176 bus_dmamap_t map;
177 struct uio *uio;
178 int flags;
179 {
180 vm_offset_t lastaddr;
181 int seg, i, error, first;
182 bus_size_t minlen, resid;
183 struct proc *p = NULL;
184 struct iovec *iov;
185 caddr_t addr;
186
187 /*
188 * Make sure that on error condition we return "no valid mappings."
189 */
190 map->dm_mapsize = 0;
191 map->dm_nsegs = 0;
192
193 resid = uio->uio_resid;
194 iov = uio->uio_iov;
195
196 if (uio->uio_segflg == UIO_USERSPACE) {
197 p = uio->uio_procp;
198 #ifdef DIAGNOSTIC
199 if (p == NULL)
200 panic("integrator_bus_dmamap_load_uio: USERSPACE but no proc");
201 #endif
202 }
203
204 first = 1;
205 seg = 0;
206 error = 0;
207 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
208 /*
209 * Now at the first iovec to load. Load each iovec
210 * until we have exhausted the residual count.
211 */
212 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
213 addr = (caddr_t)iov[i].iov_base;
214
215 error = integrator_bus_dmamap_load_buffer(t, map, addr, minlen,
216 p, flags, &lastaddr, &seg, first);
217 first = 0;
218
219 resid -= minlen;
220 }
221 if (error == 0) {
222 map->dm_mapsize = uio->uio_resid;
223 map->dm_nsegs = seg + 1;
224 }
225 return (error);
226 }
227
228 /*
229 * Common function for DMA-safe memory allocation. May be called
230 * by bus-specific DMA memory allocation functions.
231 */
232
233 extern vm_offset_t physical_start;
234 extern vm_offset_t physical_freestart;
235 extern vm_offset_t physical_freeend;
236 extern vm_offset_t physical_end;
237
238 int
239 integrator_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
240 bus_dma_tag_t t;
241 bus_size_t size, alignment, boundary;
242 bus_dma_segment_t *segs;
243 int nsegs;
244 int *rsegs;
245 int flags;
246 {
247 int error;
248 #ifdef DEBUG_DMA
249 printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x\n",
250 t, size, alignment, boundary, segs, nsegs, rsegs, flags);
251 #endif /* DEBUG_DMA */
252 error = (integrator_bus_dmamem_alloc_range(t, size, alignment, boundary,
253 segs, nsegs, rsegs, flags, trunc_page(physical_start), trunc_page(physical_end)));
254 #ifdef DEBUG_DMA
255 printf("dmamem_alloc: =%d\n", error);
256 #endif /* DEBUG_DMA */
257 return(error);
258 }
259
260 /*
261 * Common function for freeing DMA-safe memory. May be called by
262 * bus-specific DMA memory free functions.
263 */
264 void
265 integrator_bus_dmamem_free(t, segs, nsegs)
266 bus_dma_tag_t t;
267 bus_dma_segment_t *segs;
268 int nsegs;
269 {
270 struct vm_page *m;
271 bus_addr_t addr;
272 struct pglist mlist;
273 int curseg;
274
275 #ifdef DEBUG_DMA
276 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
277 #endif /* DEBUG_DMA */
278
279 /*
280 * Build a list of pages to free back to the VM system.
281 */
282 TAILQ_INIT(&mlist);
283 for (curseg = 0; curseg < nsegs; curseg++) {
284 for (addr = segs[curseg].ds_addr;
285 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
286 addr += PAGE_SIZE) {
287 m = PHYS_TO_VM_PAGE(CM_ALIAS_TO_LOCAL(addr));
288 TAILQ_INSERT_TAIL(&mlist, m, pageq);
289 }
290 }
291 uvm_pglistfree(&mlist);
292 }
293
294 /*
295 * Common function for mapping DMA-safe memory. May be called by
296 * bus-specific DMA memory map functions.
297 */
298 int
299 integrator_bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
300 bus_dma_tag_t t;
301 bus_dma_segment_t *segs;
302 int nsegs;
303 size_t size;
304 caddr_t *kvap;
305 int flags;
306 {
307 vm_offset_t va;
308 bus_addr_t addr;
309 int curseg;
310 pt_entry_t *ptep/*, pte*/;
311
312 #ifdef DEBUG_DMA
313 printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
314 segs, nsegs, (unsigned long)size, flags);
315 #endif /* DEBUG_DMA */
316
317 size = round_page(size);
318 va = uvm_km_valloc(kernel_map, size);
319
320 if (va == 0)
321 return (ENOMEM);
322
323 *kvap = (caddr_t)va;
324
325 for (curseg = 0; curseg < nsegs; curseg++) {
326 for (addr = segs[curseg].ds_addr;
327 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
328 addr += NBPG, va += NBPG, size -= NBPG) {
329 #ifdef DEBUG_DMA
330 printf("wiring p%lx to v%lx", CM_ALIAS_TO_LOCAL(addr),
331 va);
332 #endif /* DEBUG_DMA */
333 if (size == 0)
334 panic("integrator_bus_dmamem_map: size botch");
335 pmap_enter(pmap_kernel(), va, CM_ALIAS_TO_LOCAL(addr),
336 VM_PROT_READ | VM_PROT_WRITE,
337 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
338 /*
339 * If the memory must remain coherent with the
340 * cache then we must make the memory uncacheable
341 * in order to maintain virtual cache coherency.
342 * We must also guarentee the cache does not already
343 * contain the virtal addresses we are making
344 * uncacheable.
345 */
346 if (flags & BUS_DMA_COHERENT) {
347 cpu_dcache_wbinv_range(va, NBPG);
348 cpu_drain_writebuf();
349 ptep = vtopte(va);
350 *ptep = ((*ptep) & (~PT_C | PT_B));
351 tlb_flush();
352 }
353 #ifdef DEBUG_DMA
354 ptep = vtopte(va);
355 printf(" pte=v%p *pte=%x\n", ptep, *ptep);
356 #endif /* DEBUG_DMA */
357 }
358 }
359 pmap_update(pmap_kernel());
360 #ifdef DEBUG_DMA
361 printf("dmamem_map: =%p\n", *kvap);
362 #endif /* DEBUG_DMA */
363 return (0);
364 }
365
366 /*
367 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
368 * bus-specific DMA mmap(2)'ing functions.
369 */
370 paddr_t
371 integrator_bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
372 bus_dma_tag_t t;
373 bus_dma_segment_t *segs;
374 int nsegs;
375 off_t off;
376 int prot, flags;
377 {
378 int i;
379
380 for (i = 0; i < nsegs; i++) {
381 #ifdef DIAGNOSTIC
382 if (off & PGOFSET)
383 panic("integrator_bus_dmamem_mmap: offset unaligned");
384 if (segs[i].ds_addr & PGOFSET)
385 panic("integrator_bus_dmamem_mmap: segment unaligned");
386 if (segs[i].ds_len & PGOFSET)
387 panic("integrator_bus_dmamem_mmap: segment size not multiple"
388 " of page size");
389 #endif /* DIAGNOSTIC */
390 if (off >= segs[i].ds_len) {
391 off -= segs[i].ds_len;
392 continue;
393 }
394
395 return arm_byte_to_page((u_long)CM_ALIAS_TO_LOCAL(segs[i].ds_addr) + off);
396 }
397
398 /* Page not found. */
399 return -1;
400 }
401
402 /**********************************************************************
403 * DMA utility functions
404 **********************************************************************/
405
406 /*
407 * Utility function to load a linear buffer. lastaddrp holds state
408 * between invocations (for multiple-buffer loads). segp contains
409 * the starting segment on entrace, and the ending segment on exit.
410 * first indicates if this is the first invocation of this function.
411 */
412 static int
413 integrator_bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp,
414 segp, first)
415 bus_dma_tag_t t;
416 bus_dmamap_t map;
417 void *buf;
418 bus_size_t buflen;
419 struct proc *p;
420 int flags;
421 vm_offset_t *lastaddrp;
422 int *segp;
423 int first;
424 {
425 bus_size_t sgsize;
426 bus_addr_t curaddr, lastaddr, baddr, bmask;
427 vm_offset_t vaddr = (vm_offset_t)buf;
428 int seg;
429 pmap_t pmap;
430
431 #ifdef DEBUG_DMA
432 printf("integrator_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
433 buf, buflen, flags, first);
434 #endif /* DEBUG_DMA */
435
436 if (p != NULL)
437 pmap = p->p_vmspace->vm_map.pmap;
438 else
439 pmap = pmap_kernel();
440
441 lastaddr = *lastaddrp;
442 bmask = ~(map->_dm_boundary - 1);
443
444 for (seg = *segp; buflen > 0; ) {
445 /*
446 * Get the physical address for this segment.
447 */
448 (void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr);
449
450 /*
451 * Make sure we're in an allowed DMA range.
452 */
453 if (t->_ranges != NULL &&
454 integrator_bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0)
455 return (EINVAL);
456
457 /*
458 * Compute the segment size, and adjust counts.
459 */
460 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
461 if (buflen < sgsize)
462 sgsize = buflen;
463
464 /*
465 * Make sure we don't cross any boundaries.
466 */
467 if (map->_dm_boundary > 0) {
468 baddr = (curaddr + map->_dm_boundary) & bmask;
469 if (sgsize > (baddr - curaddr))
470 sgsize = (baddr - curaddr);
471 }
472
473 /*
474 * Insert chunk into a segment, coalescing with
475 * previous segment if possible.
476 */
477 if (first) {
478 map->dm_segs[seg].ds_addr = LOCAL_TO_CM_ALIAS(curaddr);
479 map->dm_segs[seg].ds_len = sgsize;
480 map->dm_segs[seg]._ds_vaddr = vaddr;
481 first = 0;
482 } else {
483 if (curaddr == lastaddr &&
484 (map->dm_segs[seg].ds_len + sgsize) <=
485 map->_dm_maxsegsz &&
486 (map->_dm_boundary == 0 ||
487 (map->dm_segs[seg].ds_addr & bmask) ==
488 (LOCAL_TO_CM_ALIAS(curaddr) & bmask)))
489 map->dm_segs[seg].ds_len += sgsize;
490 else {
491 if (++seg >= map->_dm_segcnt)
492 break;
493 map->dm_segs[seg].ds_addr = LOCAL_TO_CM_ALIAS(curaddr);
494 map->dm_segs[seg].ds_len = sgsize;
495 map->dm_segs[seg]._ds_vaddr = vaddr;
496 }
497 }
498
499 lastaddr = curaddr + sgsize;
500 vaddr += sgsize;
501 buflen -= sgsize;
502 }
503
504 *segp = seg;
505 *lastaddrp = lastaddr;
506
507 /*
508 * Did we fit?
509 */
510 if (buflen != 0)
511 return (EFBIG); /* XXX better return value here? */
512 return (0);
513 }
514
515 /*
516 * Check to see if the specified page is in an allowed DMA range.
517 */
518 static int
519 integrator_bus_dma_inrange(ranges, nranges, curaddr)
520 bus_dma_segment_t *ranges;
521 int nranges;
522 bus_addr_t curaddr;
523 {
524 bus_dma_segment_t *ds;
525 int i;
526
527 for (i = 0, ds = ranges; i < nranges; i++, ds++) {
528 if (curaddr >= CM_ALIAS_TO_LOCAL(ds->ds_addr) &&
529 round_page(curaddr) <= (CM_ALIAS_TO_LOCAL(ds->ds_addr) + ds->ds_len))
530 return (1);
531 }
532
533 return (0);
534 }
535
536 /*
537 * Allocate physical memory from the given physical address range.
538 * Called by DMA-safe memory allocation methods.
539 */
540 int
541 integrator_bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
542 flags, low, high)
543 bus_dma_tag_t t;
544 bus_size_t size, alignment, boundary;
545 bus_dma_segment_t *segs;
546 int nsegs;
547 int *rsegs;
548 int flags;
549 vm_offset_t low;
550 vm_offset_t high;
551 {
552 vm_offset_t curaddr, lastaddr;
553 struct vm_page *m;
554 struct pglist mlist;
555 int curseg, error;
556
557 #ifdef DEBUG_DMA
558 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
559 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
560 #endif /* DEBUG_DMA */
561
562 /* Always round the size. */
563 size = round_page(size);
564
565 /*
566 * Allocate pages from the VM system.
567 */
568 TAILQ_INIT(&mlist);
569 error = uvm_pglistalloc(size, low, high, alignment, boundary,
570 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
571 if (error)
572 return (error);
573
574 /*
575 * Compute the location, size, and number of segments actually
576 * returned by the VM code.
577 */
578 m = mlist.tqh_first;
579 curseg = 0;
580 lastaddr = VM_PAGE_TO_PHYS(m);
581 segs[curseg].ds_addr = LOCAL_TO_CM_ALIAS(lastaddr);
582 segs[curseg].ds_len = PAGE_SIZE;
583 #ifdef DEBUG_DMA
584 printf("alloc: page %lx\n", lastaddr);
585 #endif /* DEBUG_DMA */
586 m = m->pageq.tqe_next;
587
588 for (; m != NULL; m = m->pageq.tqe_next) {
589 curaddr = VM_PAGE_TO_PHYS(m);
590 #ifdef DIAGNOSTIC
591 if (curaddr < low || curaddr >= high) {
592 printf("uvm_pglistalloc returned non-sensical"
593 " address 0x%lx\n", curaddr);
594 panic("integrator_bus_dmamem_alloc_range");
595 }
596 #endif /* DIAGNOSTIC */
597 #ifdef DEBUG_DMA
598 printf("alloc: page %lx\n", curaddr);
599 #endif /* DEBUG_DMA */
600 if (curaddr == (lastaddr + PAGE_SIZE))
601 segs[curseg].ds_len += PAGE_SIZE;
602 else {
603 curseg++;
604 segs[curseg].ds_addr = LOCAL_TO_CM_ALIAS(curaddr);
605 segs[curseg].ds_len = PAGE_SIZE;
606 }
607 lastaddr = curaddr;
608 }
609
610 *rsegs = curseg + 1;
611
612 return (0);
613 }
614