bus_dma.c revision 1.52 1 /* $NetBSD: bus_dma.c,v 1.52 2003/04/01 02:20:14 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
41
42 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.52 2003/04/01 02:20:14 thorpej Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/proc.h>
50 #include <sys/mbuf.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #define _ALPHA_BUS_DMA_PRIVATE
55 #include <machine/bus.h>
56 #include <machine/intr.h>
57
58 int _bus_dmamap_load_buffer_direct(bus_dma_tag_t,
59 bus_dmamap_t, void *, bus_size_t, struct proc *, int,
60 paddr_t *, int *, int);
61
62 extern paddr_t avail_start, avail_end; /* from pmap.c */
63
64 /*
65 * Common function for DMA map creation. May be called by bus-specific
66 * DMA map creation functions.
67 */
68 int
69 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
70 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
71 {
72 struct alpha_bus_dmamap *map;
73 void *mapstore;
74 size_t mapsize;
75
76 /*
77 * Allocate and initialize the DMA map. The end of the map
78 * is a variable-sized array of segments, so we allocate enough
79 * room for them in one shot.
80 *
81 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
82 * of ALLOCNOW notifes others that we've reserved these resources,
83 * and they are not to be freed.
84 *
85 * The bus_dmamap_t includes one bus_dma_segment_t, hence
86 * the (nsegments - 1).
87 */
88 mapsize = sizeof(struct alpha_bus_dmamap) +
89 (sizeof(bus_dma_segment_t) * (nsegments - 1));
90 if ((mapstore = malloc(mapsize, M_DMAMAP,
91 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
92 return (ENOMEM);
93
94 memset(mapstore, 0, mapsize);
95 map = (struct alpha_bus_dmamap *)mapstore;
96 map->_dm_size = size;
97 map->_dm_segcnt = nsegments;
98 map->_dm_maxsegsz = maxsegsz;
99 if (t->_boundary != 0 && t->_boundary < boundary)
100 map->_dm_boundary = t->_boundary;
101 else
102 map->_dm_boundary = boundary;
103 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
104 map->dm_mapsize = 0; /* no valid mappings */
105 map->dm_nsegs = 0;
106 map->_dm_window = NULL;
107
108 *dmamp = map;
109 return (0);
110 }
111
112 /*
113 * Common function for DMA map destruction. May be called by bus-specific
114 * DMA map destruction functions.
115 */
116 void
117 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
118 {
119
120 free(map, M_DMAMAP);
121 }
122
123 /*
124 * Utility function to load a linear buffer. lastaddrp holds state
125 * between invocations (for multiple-buffer loads). segp contains
126 * the starting segment on entrance, and the ending segment on exit.
127 * first indicates if this is the first invocation of this function.
128 */
129 int
130 _bus_dmamap_load_buffer_direct(bus_dma_tag_t t, bus_dmamap_t map,
131 void *buf, size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
132 int *segp, int first)
133 {
134 bus_size_t sgsize;
135 bus_addr_t curaddr, lastaddr, baddr, bmask;
136 vaddr_t vaddr = (vaddr_t)buf;
137 int seg;
138
139 lastaddr = *lastaddrp;
140 bmask = ~(map->_dm_boundary - 1);
141
142 for (seg = *segp; buflen > 0 ; ) {
143 /*
144 * Get the physical address for this segment.
145 */
146 if (p != NULL)
147 (void) pmap_extract(p->p_vmspace->vm_map.pmap,
148 vaddr, &curaddr);
149 else
150 curaddr = vtophys(vaddr);
151
152 /*
153 * If we're beyond the current DMA window, indicate
154 * that and try to fall back into SGMAPs.
155 */
156 if (t->_wsize != 0 && curaddr >= t->_wsize)
157 return (EINVAL);
158
159 curaddr |= t->_wbase;
160
161 /*
162 * Compute the segment size, and adjust counts.
163 */
164 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
165 if (buflen < sgsize)
166 sgsize = buflen;
167 if (map->_dm_maxsegsz < sgsize)
168 sgsize = map->_dm_maxsegsz;
169
170 /*
171 * Make sure we don't cross any boundaries.
172 */
173 if (map->_dm_boundary > 0) {
174 baddr = (curaddr + map->_dm_boundary) & bmask;
175 if (sgsize > (baddr - curaddr))
176 sgsize = (baddr - curaddr);
177 }
178
179 /*
180 * Insert chunk into a segment, coalescing with
181 * the previous segment if possible.
182 */
183 if (first) {
184 map->dm_segs[seg].ds_addr = curaddr;
185 map->dm_segs[seg].ds_len = sgsize;
186 first = 0;
187 } else {
188 if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
189 curaddr == lastaddr &&
190 (map->dm_segs[seg].ds_len + sgsize) <=
191 map->_dm_maxsegsz &&
192 (map->_dm_boundary == 0 ||
193 (map->dm_segs[seg].ds_addr & bmask) ==
194 (curaddr & bmask)))
195 map->dm_segs[seg].ds_len += sgsize;
196 else {
197 if (++seg >= map->_dm_segcnt)
198 break;
199 map->dm_segs[seg].ds_addr = curaddr;
200 map->dm_segs[seg].ds_len = sgsize;
201 }
202 }
203
204 lastaddr = curaddr + sgsize;
205 vaddr += sgsize;
206 buflen -= sgsize;
207 }
208
209 *segp = seg;
210 *lastaddrp = lastaddr;
211
212 /*
213 * Did we fit?
214 */
215 if (buflen != 0) {
216 /*
217 * If there is a chained window, we will automatically
218 * fall back to it.
219 */
220 return (EFBIG); /* XXX better return value here? */
221 }
222
223 return (0);
224 }
225
226 /*
227 * Common function for loading a direct-mapped DMA map with a linear
228 * buffer. Called by bus-specific DMA map load functions with the
229 * OR value appropriate for indicating "direct-mapped" for that
230 * chipset.
231 */
232 int
233 _bus_dmamap_load_direct(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
234 bus_size_t buflen, struct proc *p, int flags)
235 {
236 paddr_t lastaddr;
237 int seg, error;
238
239 /*
240 * Make sure that on error condition we return "no valid mappings".
241 */
242 map->dm_mapsize = 0;
243 map->dm_nsegs = 0;
244
245 if (buflen > map->_dm_size)
246 return (EINVAL);
247
248 seg = 0;
249 error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen,
250 p, flags, &lastaddr, &seg, 1);
251 if (error == 0) {
252 map->dm_mapsize = buflen;
253 map->dm_nsegs = seg + 1;
254 map->_dm_window = t;
255 } else if (t->_next_window != NULL) {
256 /*
257 * Give the next window a chance.
258 */
259 error = bus_dmamap_load(t->_next_window, map, buf, buflen,
260 p, flags);
261 }
262 return (error);
263 }
264
265 /*
266 * Like _bus_dmamap_load_direct(), but for mbufs.
267 */
268 int
269 _bus_dmamap_load_mbuf_direct(bus_dma_tag_t t, bus_dmamap_t map,
270 struct mbuf *m0, int flags)
271 {
272 paddr_t lastaddr;
273 int seg, error, first;
274 struct mbuf *m;
275
276 /*
277 * Make sure that on error condition we return "no valid mappings."
278 */
279 map->dm_mapsize = 0;
280 map->dm_nsegs = 0;
281
282 #ifdef DIAGNOSTIC
283 if ((m0->m_flags & M_PKTHDR) == 0)
284 panic("_bus_dmamap_load_mbuf_direct: no packet header");
285 #endif
286
287 if (m0->m_pkthdr.len > map->_dm_size)
288 return (EINVAL);
289
290 first = 1;
291 seg = 0;
292 error = 0;
293 for (m = m0; m != NULL && error == 0; m = m->m_next) {
294 error = _bus_dmamap_load_buffer_direct(t, map,
295 m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
296 first = 0;
297 }
298 if (error == 0) {
299 map->dm_mapsize = m0->m_pkthdr.len;
300 map->dm_nsegs = seg + 1;
301 map->_dm_window = t;
302 } else if (t->_next_window != NULL) {
303 /*
304 * Give the next window a chance.
305 */
306 error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
307 }
308 return (error);
309 }
310
311 /*
312 * Like _bus_dmamap_load_direct(), but for uios.
313 */
314 int
315 _bus_dmamap_load_uio_direct(bus_dma_tag_t t, bus_dmamap_t map,
316 struct uio *uio, int flags)
317 {
318 paddr_t lastaddr;
319 int seg, i, error, first;
320 bus_size_t minlen, resid;
321 struct proc *p = NULL;
322 struct iovec *iov;
323 caddr_t addr;
324
325 /*
326 * Make sure that on error condition we return "no valid mappings."
327 */
328 map->dm_mapsize = 0;
329 map->dm_nsegs = 0;
330
331 resid = uio->uio_resid;
332 iov = uio->uio_iov;
333
334 if (uio->uio_segflg == UIO_USERSPACE) {
335 p = uio->uio_procp;
336 #ifdef DIAGNOSTIC
337 if (p == NULL)
338 panic("_bus_dmamap_load_uio_direct: "
339 "USERSPACE but no proc");
340 #endif
341 }
342
343 first = 1;
344 seg = 0;
345 error = 0;
346 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
347 /*
348 * Now at the first iovec to load. Load each iovec
349 * until we have exhausted the residual count.
350 */
351 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
352 addr = (caddr_t)iov[i].iov_base;
353
354 error = _bus_dmamap_load_buffer_direct(t, map,
355 addr, minlen, p, flags, &lastaddr, &seg, first);
356 first = 0;
357
358 resid -= minlen;
359 }
360 if (error == 0) {
361 map->dm_mapsize = uio->uio_resid;
362 map->dm_nsegs = seg + 1;
363 map->_dm_window = t;
364 } else if (t->_next_window != NULL) {
365 /*
366 * Give the next window a chance.
367 */
368 error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
369 }
370 return (error);
371 }
372
373 /*
374 * Like _bus_dmamap_load_direct(), but for raw memory.
375 */
376 int
377 _bus_dmamap_load_raw_direct(bus_dma_tag_t t, bus_dmamap_t map,
378 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
379 {
380
381 panic("_bus_dmamap_load_raw_direct: not implemented");
382 }
383
384 /*
385 * Common function for unloading a DMA map. May be called by
386 * chipset-specific DMA map unload functions.
387 */
388 void
389 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
390 {
391
392 /*
393 * No resources to free; just mark the mappings as
394 * invalid.
395 */
396 map->dm_mapsize = 0;
397 map->dm_nsegs = 0;
398 map->_dm_window = NULL;
399 }
400
401 /*
402 * Common function for DMA map synchronization. May be called
403 * by chipset-specific DMA map synchronization functions.
404 */
405 void
406 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
407 bus_size_t len, int ops)
408 {
409
410 /*
411 * Flush the store buffer.
412 */
413 alpha_mb();
414 }
415
416 /*
417 * Common function for DMA-safe memory allocation. May be called
418 * by bus-specific DMA memory allocation functions.
419 */
420 int
421 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
422 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
423 int flags)
424 {
425
426 return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
427 segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
428 }
429
430 /*
431 * Allocate physical memory from the given physical address range.
432 * Called by DMA-safe memory allocation methods.
433 */
434 int
435 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
436 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
437 int flags, paddr_t low, paddr_t high)
438 {
439 paddr_t curaddr, lastaddr;
440 struct vm_page *m;
441 struct pglist mlist;
442 int curseg, error;
443
444 /* Always round the size. */
445 size = round_page(size);
446
447 /*
448 * Allocate pages from the VM system.
449 */
450 error = uvm_pglistalloc(size, low, high, alignment, boundary,
451 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
452 if (error)
453 return (error);
454
455 /*
456 * Compute the location, size, and number of segments actually
457 * returned by the VM code.
458 */
459 m = mlist.tqh_first;
460 curseg = 0;
461 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
462 segs[curseg].ds_len = PAGE_SIZE;
463 m = m->pageq.tqe_next;
464
465 for (; m != NULL; m = m->pageq.tqe_next) {
466 curaddr = VM_PAGE_TO_PHYS(m);
467 #ifdef DIAGNOSTIC
468 if (curaddr < avail_start || curaddr >= high) {
469 printf("uvm_pglistalloc returned non-sensical"
470 " address 0x%lx\n", curaddr);
471 panic("_bus_dmamem_alloc");
472 }
473 #endif
474 if (curaddr == (lastaddr + PAGE_SIZE))
475 segs[curseg].ds_len += PAGE_SIZE;
476 else {
477 curseg++;
478 segs[curseg].ds_addr = curaddr;
479 segs[curseg].ds_len = PAGE_SIZE;
480 }
481 lastaddr = curaddr;
482 }
483
484 *rsegs = curseg + 1;
485
486 return (0);
487 }
488
489 /*
490 * Common function for freeing DMA-safe memory. May be called by
491 * bus-specific DMA memory free functions.
492 */
493 void
494 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
495 {
496 struct vm_page *m;
497 bus_addr_t addr;
498 struct pglist mlist;
499 int curseg;
500
501 /*
502 * Build a list of pages to free back to the VM system.
503 */
504 TAILQ_INIT(&mlist);
505 for (curseg = 0; curseg < nsegs; curseg++) {
506 for (addr = segs[curseg].ds_addr;
507 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
508 addr += PAGE_SIZE) {
509 m = PHYS_TO_VM_PAGE(addr);
510 TAILQ_INSERT_TAIL(&mlist, m, pageq);
511 }
512 }
513
514 uvm_pglistfree(&mlist);
515 }
516
517 /*
518 * Common function for mapping DMA-safe memory. May be called by
519 * bus-specific DMA memory map functions.
520 */
521 int
522 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
523 size_t size, caddr_t *kvap, int flags)
524 {
525 vaddr_t va;
526 bus_addr_t addr;
527 int curseg;
528
529 /*
530 * If we're only mapping 1 segment, use K0SEG, to avoid
531 * TLB thrashing.
532 */
533 if (nsegs == 1) {
534 *kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
535 return (0);
536 }
537
538 size = round_page(size);
539
540 va = uvm_km_valloc(kernel_map, size);
541
542 if (va == 0)
543 return (ENOMEM);
544
545 *kvap = (caddr_t)va;
546
547 for (curseg = 0; curseg < nsegs; curseg++) {
548 for (addr = segs[curseg].ds_addr;
549 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
550 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
551 if (size == 0)
552 panic("_bus_dmamem_map: size botch");
553 pmap_enter(pmap_kernel(), va, addr,
554 VM_PROT_READ | VM_PROT_WRITE,
555 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
556 }
557 }
558 pmap_update(pmap_kernel());
559
560 return (0);
561 }
562
563 /*
564 * Common function for unmapping DMA-safe memory. May be called by
565 * bus-specific DMA memory unmapping functions.
566 */
567 void
568 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
569 {
570
571 #ifdef DIAGNOSTIC
572 if ((u_long)kva & PGOFSET)
573 panic("_bus_dmamem_unmap");
574 #endif
575
576 /*
577 * Nothing to do if we mapped it with K0SEG.
578 */
579 if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
580 kva <= (caddr_t)ALPHA_K0SEG_END)
581 return;
582
583 size = round_page(size);
584 uvm_km_free(kernel_map, (vaddr_t)kva, size);
585 }
586
587 /*
588 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
589 * bus-specific DMA mmap(2)'ing functions.
590 */
591 paddr_t
592 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
593 off_t off, int prot, int flags)
594 {
595 int i;
596
597 for (i = 0; i < nsegs; i++) {
598 #ifdef DIAGNOSTIC
599 if (off & PGOFSET)
600 panic("_bus_dmamem_mmap: offset unaligned");
601 if (segs[i].ds_addr & PGOFSET)
602 panic("_bus_dmamem_mmap: segment unaligned");
603 if (segs[i].ds_len & PGOFSET)
604 panic("_bus_dmamem_mmap: segment size not multiple"
605 " of page size");
606 #endif
607 if (off >= segs[i].ds_len) {
608 off -= segs[i].ds_len;
609 continue;
610 }
611
612 return (alpha_btop((caddr_t)segs[i].ds_addr + off));
613 }
614
615 /* Page not found. */
616 return (-1);
617 }
618