bus_dma.c revision 1.27 1 /* $NetBSD: bus_dma.c,v 1.27 1998/09/21 22:51:56 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_uvm.h"
41 #include "opt_pmap_new.h"
42
43 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
44
45 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.27 1998/09/21 22:51:56 thorpej Exp $");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/device.h>
51 #include <sys/malloc.h>
52 #include <sys/proc.h>
53 #include <sys/mbuf.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_kern.h>
57 #if defined(UVM)
58 #include <uvm/uvm_extern.h>
59 #endif
60
61 #define _ALPHA_BUS_DMA_PRIVATE
62 #include <machine/bus.h>
63 #include <machine/intr.h>
64
65 int _bus_dmamap_load_buffer_direct_common __P((bus_dma_tag_t,
66 bus_dmamap_t, void *, bus_size_t, struct proc *, int,
67 paddr_t *, int *, int));
68
69 /*
70 * Common function for DMA map creation. May be called by bus-specific
71 * DMA map creation functions.
72 */
73 int
74 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
75 bus_dma_tag_t t;
76 bus_size_t size;
77 int nsegments;
78 bus_size_t maxsegsz;
79 bus_size_t boundary;
80 int flags;
81 bus_dmamap_t *dmamp;
82 {
83 struct alpha_bus_dmamap *map;
84 void *mapstore;
85 size_t mapsize;
86
87 /*
88 * Allcoate and initialize the DMA map. The end of the map
89 * is a variable-sized array of segments, so we allocate enough
90 * room for them in one shot.
91 *
92 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
93 * of ALLOCNOW notifes others that we've reserved these resources,
94 * and they are not to be freed.
95 *
96 * The bus_dmamap_t includes one bus_dma_segment_t, hence
97 * the (nsegments - 1).
98 */
99 mapsize = sizeof(struct alpha_bus_dmamap) +
100 (sizeof(bus_dma_segment_t) * (nsegments - 1));
101 if ((mapstore = malloc(mapsize, M_DMAMAP,
102 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
103 return (ENOMEM);
104
105 bzero(mapstore, mapsize);
106 map = (struct alpha_bus_dmamap *)mapstore;
107 map->_dm_size = size;
108 map->_dm_segcnt = nsegments;
109 map->_dm_maxsegsz = maxsegsz;
110 if (t->_boundary != 0 && t->_boundary < boundary)
111 map->_dm_boundary = t->_boundary;
112 else
113 map->_dm_boundary = boundary;
114 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
115 map->dm_mapsize = 0; /* no valid mappings */
116 map->dm_nsegs = 0;
117
118 *dmamp = map;
119 return (0);
120 }
121
122 /*
123 * Common function for DMA map destruction. May be called by bus-specific
124 * DMA map destruction functions.
125 */
126 void
127 _bus_dmamap_destroy(t, map)
128 bus_dma_tag_t t;
129 bus_dmamap_t map;
130 {
131
132 free(map, M_DMAMAP);
133 }
134
135 /*
136 * Utility function to load a linear buffer. lastaddrp holds state
137 * between invocations (for multiple-buffer loads). segp contains
138 * the starting segment on entrance, and the ending segment on exit.
139 * first indicates if this is the first invocation of this function.
140 */
141 int
142 _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen, p, flags,
143 lastaddrp, segp, first)
144 bus_dma_tag_t t;
145 bus_dmamap_t map;
146 void *buf;
147 bus_size_t buflen;
148 struct proc *p;
149 int flags;
150 paddr_t *lastaddrp;
151 int *segp;
152 int first;
153 {
154 bus_size_t sgsize;
155 bus_addr_t curaddr, lastaddr, baddr, bmask;
156 vaddr_t vaddr = (vaddr_t)buf;
157 int seg;
158
159 lastaddr = *lastaddrp;
160 bmask = ~(map->_dm_boundary - 1);
161
162 for (seg = *segp; buflen > 0 ; ) {
163 /*
164 * Get the physical address for this segment.
165 */
166 if (p != NULL)
167 curaddr = pmap_extract(p->p_vmspace->vm_map.pmap,
168 vaddr);
169 else
170 curaddr = vtophys(vaddr);
171
172 /*
173 * If we're beyond the current DMA window, indicate
174 * that and try to fall back into SGMAPs.
175 */
176 if (t->_wsize != 0 && curaddr >= t->_wsize)
177 return (EINVAL);
178
179 curaddr |= t->_wbase;
180
181 /*
182 * Compute the segment size, and adjust counts.
183 */
184 sgsize = NBPG - ((u_long)vaddr & PGOFSET);
185 if (buflen < sgsize)
186 sgsize = buflen;
187
188 /*
189 * Make sure we don't cross any boundaries.
190 */
191 if (map->_dm_boundary > 0) {
192 baddr = (curaddr + map->_dm_boundary) & bmask;
193 if (sgsize > (baddr - curaddr))
194 sgsize = (baddr - curaddr);
195 }
196
197 /*
198 * Insert chunk into a segment, coalescing with
199 * the previous segment if possible.
200 */
201 if (first) {
202 map->dm_segs[seg].ds_addr = curaddr;
203 map->dm_segs[seg].ds_len = sgsize;
204 first = 0;
205 } else {
206 if (curaddr == lastaddr &&
207 (map->dm_segs[seg].ds_len + sgsize) <=
208 map->_dm_maxsegsz &&
209 (map->_dm_boundary == 0 ||
210 (map->dm_segs[seg].ds_addr & bmask) ==
211 (curaddr & bmask)))
212 map->dm_segs[seg].ds_len += sgsize;
213 else {
214 if (++seg >= map->_dm_segcnt)
215 break;
216 map->dm_segs[seg].ds_addr = curaddr;
217 map->dm_segs[seg].ds_len = sgsize;
218 }
219 }
220
221 lastaddr = curaddr + sgsize;
222 vaddr += sgsize;
223 buflen -= sgsize;
224 }
225
226 *segp = seg;
227 *lastaddrp = lastaddr;
228
229 /*
230 * Did we fit?
231 */
232 if (buflen != 0) {
233 /*
234 * If there is a chained window, we will automatically
235 * fall back to it.
236 */
237 return (EFBIG); /* XXX better return value here? */
238 }
239
240 return (0);
241 }
242
243 /*
244 * Common function for loading a direct-mapped DMA map with a linear
245 * buffer. Called by bus-specific DMA map load functions with the
246 * OR value appropriate for indicating "direct-mapped" for that
247 * chipset.
248 */
249 int
250 _bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
251 bus_dma_tag_t t;
252 bus_dmamap_t map;
253 void *buf;
254 bus_size_t buflen;
255 struct proc *p;
256 int flags;
257 {
258 paddr_t lastaddr;
259 int seg, error;
260
261 /*
262 * Make sure that on error condition we return "no valid mappings".
263 */
264 map->dm_mapsize = 0;
265 map->dm_nsegs = 0;
266
267 if (buflen > map->_dm_size)
268 return (EINVAL);
269
270 seg = 0;
271 error = _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen,
272 p, flags, &lastaddr, &seg, 1);
273 if (error == 0) {
274 map->dm_mapsize = buflen;
275 map->dm_nsegs = seg + 1;
276 } else if (t->_next_window != NULL) {
277 /*
278 * Give the next window a chance.
279 */
280 error = bus_dmamap_load(t->_next_window, map, buf, buflen,
281 p, flags);
282 }
283 return (error);
284 }
285
286 /*
287 * Like _bus_dmamap_load_direct_common(), but for mbufs.
288 */
289 int
290 _bus_dmamap_load_mbuf_direct(t, map, m0, flags)
291 bus_dma_tag_t t;
292 bus_dmamap_t map;
293 struct mbuf *m0;
294 int flags;
295 {
296 paddr_t lastaddr;
297 int seg, error, first;
298 struct mbuf *m;
299
300 /*
301 * Make sure that on error condition we return "no valid mappings."
302 */
303 map->dm_mapsize = 0;
304 map->dm_nsegs = 0;
305
306 #ifdef DIAGNOSTIC
307 if ((m0->m_flags & M_PKTHDR) == 0)
308 panic("_bus_dmamap_load_mbuf_direct_common: no packet header");
309 #endif
310
311 if (m0->m_pkthdr.len > map->_dm_size)
312 return (EINVAL);
313
314 first = 1;
315 seg = 0;
316 error = 0;
317 for (m = m0; m != NULL && error == 0; m = m->m_next) {
318 error = _bus_dmamap_load_buffer_direct_common(t, map,
319 m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
320 first = 0;
321 }
322 if (error == 0) {
323 map->dm_mapsize = m0->m_pkthdr.len;
324 map->dm_nsegs = seg + 1;
325 } else if (t->_next_window != NULL) {
326 /*
327 * Give the next window a chance.
328 */
329 error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
330 }
331 return (error);
332 }
333
334 /*
335 * Like _bus_dmamap_load_direct_common(), but for uios.
336 */
337 int
338 _bus_dmamap_load_uio_direct(t, map, uio, flags)
339 bus_dma_tag_t t;
340 bus_dmamap_t map;
341 struct uio *uio;
342 int flags;
343 {
344 paddr_t lastaddr;
345 int seg, i, error, first;
346 bus_size_t minlen, resid;
347 struct proc *p = NULL;
348 struct iovec *iov;
349 caddr_t addr;
350
351 /*
352 * Make sure that on error condition we return "no valid mappings."
353 */
354 map->dm_mapsize = 0;
355 map->dm_nsegs = 0;
356
357 resid = uio->uio_resid;
358 iov = uio->uio_iov;
359
360 if (uio->uio_segflg == UIO_USERSPACE) {
361 p = uio->uio_procp;
362 #ifdef DIAGNOSTIC
363 if (p == NULL)
364 panic("_bus_dmamap_load_direct_common: USERSPACE but no proc");
365 #endif
366 }
367
368 first = 1;
369 seg = 0;
370 error = 0;
371 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
372 /*
373 * Now at the first iovec to load. Load each iovec
374 * until we have exhausted the residual count.
375 */
376 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
377 addr = (caddr_t)iov[i].iov_base;
378
379 error = _bus_dmamap_load_buffer_direct_common(t, map,
380 addr, minlen, p, flags, &lastaddr, &seg, first);
381 first = 0;
382
383 resid -= minlen;
384 }
385 if (error == 0) {
386 map->dm_mapsize = uio->uio_resid;
387 map->dm_nsegs = seg + 1;
388 } else if (t->_next_window != NULL) {
389 /*
390 * Give the next window a chance.
391 */
392 error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
393 }
394 return (error);
395 }
396
397 /*
398 * Like _bus_dmamap_load_direct_common(), but for raw memory.
399 */
400 int
401 _bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
402 bus_dma_tag_t t;
403 bus_dmamap_t map;
404 bus_dma_segment_t *segs;
405 int nsegs;
406 bus_size_t size;
407 int flags;
408 {
409
410 panic("_bus_dmamap_load_raw_direct: not implemented");
411 }
412
413 /*
414 * Common function for unloading a DMA map. May be called by
415 * chipset-specific DMA map unload functions.
416 */
417 void
418 _bus_dmamap_unload(t, map)
419 bus_dma_tag_t t;
420 bus_dmamap_t map;
421 {
422
423 /*
424 * No resources to free; just mark the mappings as
425 * invalid.
426 */
427 map->dm_mapsize = 0;
428 map->dm_nsegs = 0;
429 }
430
431 /*
432 * Common function for DMA map synchronization. May be called
433 * by chipset-specific DMA map synchronization functions.
434 */
435 void
436 _bus_dmamap_sync(t, map, offset, len, ops)
437 bus_dma_tag_t t;
438 bus_dmamap_t map;
439 bus_addr_t offset;
440 bus_size_t len;
441 int ops;
442 {
443
444 /*
445 * Flush the store buffer.
446 */
447 alpha_mb();
448 }
449
450 /*
451 * Common function for DMA-safe memory allocation. May be called
452 * by bus-specific DMA memory allocation functions.
453 */
454 int
455 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
456 bus_dma_tag_t t;
457 bus_size_t size, alignment, boundary;
458 bus_dma_segment_t *segs;
459 int nsegs;
460 int *rsegs;
461 int flags;
462 {
463 extern paddr_t avail_start, avail_end;
464 paddr_t curaddr, lastaddr, high;
465 vm_page_t m;
466 struct pglist mlist;
467 int curseg, error;
468
469 /* Always round the size. */
470 size = round_page(size);
471
472 high = avail_end - PAGE_SIZE;
473
474 /*
475 * Allocate pages from the VM system.
476 */
477 TAILQ_INIT(&mlist);
478 #if defined(UVM)
479 error = uvm_pglistalloc(size, avail_start, high, alignment, boundary,
480 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
481 #else
482 error = vm_page_alloc_memory(size, avail_start, high,
483 alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
484 #endif
485 if (error)
486 return (error);
487
488 /*
489 * Compute the location, size, and number of segments actually
490 * returned by the VM code.
491 */
492 m = mlist.tqh_first;
493 curseg = 0;
494 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
495 segs[curseg].ds_len = PAGE_SIZE;
496 m = m->pageq.tqe_next;
497
498 for (; m != NULL; m = m->pageq.tqe_next) {
499 curaddr = VM_PAGE_TO_PHYS(m);
500 #ifdef DIAGNOSTIC
501 if (curaddr < avail_start || curaddr >= high) {
502 printf("vm_page_alloc_memory returned non-sensical"
503 " address 0x%lx\n", curaddr);
504 panic("_bus_dmamem_alloc");
505 }
506 #endif
507 if (curaddr == (lastaddr + PAGE_SIZE))
508 segs[curseg].ds_len += PAGE_SIZE;
509 else {
510 curseg++;
511 segs[curseg].ds_addr = curaddr;
512 segs[curseg].ds_len = PAGE_SIZE;
513 }
514 lastaddr = curaddr;
515 }
516
517 *rsegs = curseg + 1;
518
519 return (0);
520 }
521
522 /*
523 * Common function for freeing DMA-safe memory. May be called by
524 * bus-specific DMA memory free functions.
525 */
526 void
527 _bus_dmamem_free(t, segs, nsegs)
528 bus_dma_tag_t t;
529 bus_dma_segment_t *segs;
530 int nsegs;
531 {
532 vm_page_t m;
533 bus_addr_t addr;
534 struct pglist mlist;
535 int curseg;
536
537 /*
538 * Build a list of pages to free back to the VM system.
539 */
540 TAILQ_INIT(&mlist);
541 for (curseg = 0; curseg < nsegs; curseg++) {
542 for (addr = segs[curseg].ds_addr;
543 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
544 addr += PAGE_SIZE) {
545 m = PHYS_TO_VM_PAGE(addr);
546 TAILQ_INSERT_TAIL(&mlist, m, pageq);
547 }
548 }
549
550 #if defined(UVM)
551 uvm_pglistfree(&mlist);
552 #else
553 vm_page_free_memory(&mlist);
554 #endif
555 }
556
557 /*
558 * Common function for mapping DMA-safe memory. May be called by
559 * bus-specific DMA memory map functions.
560 */
561 int
562 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
563 bus_dma_tag_t t;
564 bus_dma_segment_t *segs;
565 int nsegs;
566 size_t size;
567 caddr_t *kvap;
568 int flags;
569 {
570 vaddr_t va;
571 bus_addr_t addr;
572 int curseg;
573
574 /*
575 * If we're only mapping 1 segment, use K0SEG, to avoid
576 * TLB thrashing.
577 */
578 if (nsegs == 1) {
579 *kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
580 return (0);
581 }
582
583 size = round_page(size);
584
585 #if defined(UVM)
586 va = uvm_km_valloc(kernel_map, size);
587 #else
588 va = kmem_alloc_pageable(kernel_map, size);
589 #endif
590
591 if (va == 0)
592 return (ENOMEM);
593
594 *kvap = (caddr_t)va;
595
596 for (curseg = 0; curseg < nsegs; curseg++) {
597 for (addr = segs[curseg].ds_addr;
598 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
599 addr += NBPG, va += NBPG, size -= NBPG) {
600 if (size == 0)
601 panic("_bus_dmamem_map: size botch");
602 #if defined(PMAP_NEW)
603 pmap_kenter_pa(va, addr, VM_PROT_READ | VM_PROT_WRITE);
604 #else
605 pmap_enter(pmap_kernel(), va, addr,
606 VM_PROT_READ | VM_PROT_WRITE, TRUE);
607 #endif
608 }
609 }
610
611 return (0);
612 }
613
614 /*
615 * Common function for unmapping DMA-safe memory. May be called by
616 * bus-specific DMA memory unmapping functions.
617 */
618 void
619 _bus_dmamem_unmap(t, kva, size)
620 bus_dma_tag_t t;
621 caddr_t kva;
622 size_t size;
623 {
624
625 #ifdef DIAGNOSTIC
626 if ((u_long)kva & PGOFSET)
627 panic("_bus_dmamem_unmap");
628 #endif
629
630 /*
631 * Nothing to do if we mapped it with K0SEG.
632 */
633 if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
634 kva <= (caddr_t)ALPHA_K0SEG_END)
635 return;
636
637 size = round_page(size);
638 #if defined(UVM)
639 uvm_km_free(kernel_map, (vaddr_t)kva, size);
640 #else
641 kmem_free(kernel_map, (vaddr_t)kva, size);
642 #endif
643 }
644
645 /*
646 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
647 * bus-specific DMA mmap(2)'ing functions.
648 */
649 int
650 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
651 bus_dma_tag_t t;
652 bus_dma_segment_t *segs;
653 int nsegs, off, prot, flags;
654 {
655 int i;
656
657 for (i = 0; i < nsegs; i++) {
658 #ifdef DIAGNOSTIC
659 if (off & PGOFSET)
660 panic("_bus_dmamem_mmap: offset unaligned");
661 if (segs[i].ds_addr & PGOFSET)
662 panic("_bus_dmamem_mmap: segment unaligned");
663 if (segs[i].ds_len & PGOFSET)
664 panic("_bus_dmamem_mmap: segment size not multiple"
665 " of page size");
666 #endif
667 if (off >= segs[i].ds_len) {
668 off -= segs[i].ds_len;
669 continue;
670 }
671
672 return (alpha_btop((caddr_t)segs[i].ds_addr + off));
673 }
674
675 /* Page not found. */
676 return (-1);
677 }
678