bus_dmamem_common.c revision 1.1 1 /* $NetBSD: bus_dmamem_common.c,v 1.1 2012/10/02 23:39:43 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
34
35 __KERNEL_RCSID(0, "$NetBSD: bus_dmamem_common.c,v 1.1 2012/10/02 23:39:43 christos Exp $");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/bus.h>
41
42 #include <uvm/uvm.h>
43
44 #include <common/bus_dma/bus_dmamem_common.h>
45
46 /*
47 * _bus_dmamem_alloc_range_common --
48 * Allocate physical memory from the specified physical address range.
49 */
50 int
51 _bus_dmamem_alloc_range_common(bus_dma_tag_t t,
52 bus_size_t size,
53 bus_size_t alignment,
54 bus_size_t boundary,
55 bus_dma_segment_t *segs,
56 int nsegs,
57 int *rsegs,
58 int flags,
59 paddr_t low,
60 paddr_t high)
61 {
62 paddr_t curaddr, lastaddr;
63 struct vm_page *m;
64 struct pglist mlist;
65 int curseg, error;
66
67 /* Always round the size. */
68 size = round_page(size);
69
70 /* Allocate pages from the VM system. */
71 error = uvm_pglistalloc(size, low, high, alignment, boundary,
72 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
73 if (__predict_false(error != 0))
74 return (error);
75
76 /*
77 * Compute the location, size, and number of segments actually
78 * returned by the VM system.
79 */
80 m = TAILQ_FIRST(&mlist);
81 curseg = 0;
82 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
83 segs[curseg].ds_len = PAGE_SIZE;
84 m = TAILQ_NEXT(m, pageq.queue);
85
86 for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
87 curaddr = VM_PAGE_TO_PHYS(m);
88 KASSERT(curaddr >= low);
89 KASSERT(curaddr < high);
90 if (curaddr == (lastaddr + PAGE_SIZE))
91 segs[curseg].ds_len += PAGE_SIZE;
92 else {
93 curseg++;
94 segs[curseg].ds_addr = curaddr;
95 segs[curseg].ds_len = PAGE_SIZE;
96 }
97 lastaddr = curaddr;
98 }
99
100 *rsegs = curseg + 1;
101
102 return (0);
103 }
104
105 /*
106 * _bus_dmamem_free_common --
107 * Free memory allocated with _bus_dmamem_alloc_range_common()
108 * back to the VM system.
109 */
110 void
111 _bus_dmamem_free_common(bus_dma_tag_t t,
112 bus_dma_segment_t *segs,
113 int nsegs)
114 {
115 struct vm_page *m;
116 bus_addr_t addr;
117 struct pglist mlist;
118 int curseg;
119
120 TAILQ_INIT(&mlist);
121 for (curseg = 0; curseg < nsegs; curseg++) {
122 for (addr = segs[curseg].ds_addr;
123 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
124 addr += PAGE_SIZE) {
125 m = PHYS_TO_VM_PAGE(addr);
126 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
127 }
128 }
129
130 uvm_pglistfree(&mlist);
131 }
132
133 /*
134 * _bus_dmamem_map_common --
135 * Map memory allocated with _bus_dmamem_alloc_range_common() into
136 * the kernel virtual address space.
137 */
138 int
139 _bus_dmamem_map_common(bus_dma_tag_t t,
140 bus_dma_segment_t *segs,
141 int nsegs,
142 size_t size,
143 void **kvap,
144 int flags,
145 int pmapflags)
146 {
147 vaddr_t va;
148 bus_addr_t addr;
149 int curseg;
150 const uvm_flag_t kmflags =
151 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
152
153 size = round_page(size);
154
155 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
156 if (__predict_false(va == 0))
157 return (ENOMEM);
158
159 *kvap = (void *)va;
160
161 for (curseg = 0; curseg < nsegs; curseg++) {
162 for (addr = segs[curseg].ds_addr;
163 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
164 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
165 KASSERT(size != 0);
166 /* XXX pmap_kenter_pa()? */
167 pmap_enter(pmap_kernel(), va, addr,
168 VM_PROT_READ | VM_PROT_WRITE,
169 pmapflags | PMAP_WIRED |
170 VM_PROT_READ | VM_PROT_WRITE);
171 }
172 }
173 pmap_update(pmap_kernel());
174
175 return (0);
176 }
177
178 /*
179 * _bus_dmamem_unmap_common --
180 * Remove a mapping created with _bus_dmamem_map_common().
181 */
182 void
183 _bus_dmamem_unmap_common(bus_dma_tag_t t,
184 void *kva,
185 size_t size)
186 {
187
188 KASSERT(((vaddr_t)kva & PAGE_MASK) == 0);
189
190 size = round_page(size);
191 /* XXX pmap_kremove()? See above... */
192 pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
193 pmap_update(pmap_kernel());
194 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
195 }
196
197 /*
198 * _bus_dmamem_mmap_common --
199 * Mmap support for memory allocated with _bus_dmamem_alloc_range_common().
200 */
201 bus_addr_t
202 _bus_dmamem_mmap_common(bus_dma_tag_t t,
203 bus_dma_segment_t *segs,
204 int nsegs,
205 off_t off,
206 int prot,
207 int flags)
208 {
209 int i;
210
211 for (i = 0; i < nsegs; i++) {
212 KASSERT((off & PAGE_MASK) == 0);
213 KASSERT((segs[i].ds_addr & PAGE_MASK) == 0);
214 KASSERT((segs[i].ds_len & PAGE_MASK) == 0);
215 if (off >= segs[i].ds_len) {
216 off -= segs[i].ds_len;
217 continue;
218 }
219
220 /* XXX BUS_DMA_COHERENT */
221
222 return (segs[i].ds_addr + off);
223 }
224
225 /* Page not found. */
226 return ((bus_addr_t)-1);
227 }
228