xen_bus_dma.c revision 1.9 1 /* $NetBSD: xen_bus_dma.c,v 1.9 2007/02/24 21:19:25 bouyer Exp $ */
2 /* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
3
4 /*-
5 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.9 2007/02/24 21:19:25 bouyer Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/proc.h>
50
51 #include <machine/bus.h>
52 #include <machine/bus_private.h>
53
54 #include <uvm/uvm_extern.h>
55
56 extern paddr_t avail_end;
57
58 /* Pure 2^n version of get_order */
59 static inline int get_order(unsigned long size)
60 {
61 int order = -1;
62 size = (size - 1) >> (PAGE_SHIFT - 1);
63 do {
64 size >>= 1;
65 order++;
66 } while (size);
67 return order;
68 }
69
70 static int
71 _xen_alloc_contig(bus_size_t size, bus_size_t alignment, bus_size_t boundary,
72 struct pglist *mlistp, int flags, bus_addr_t low, bus_addr_t high)
73 {
74 int order, i;
75 unsigned long npagesreq, npages, mfn;
76 bus_addr_t pa;
77 struct vm_page *pg, *pgnext;
78 int s, error;
79 #ifdef XEN3
80 struct xen_memory_reservation res;
81 #endif
82
83 /*
84 * When requesting a contigous memory region, the hypervisor will
85 * return a memory range aligned on size. This will automagically
86 * handle "boundary", but the only way to enforce alignment
87 * is to request a memory region of size max(alignment, size).
88 */
89 order = max(get_order(size), get_order(alignment));
90 npages = (1 << order);
91 npagesreq = (size >> PAGE_SHIFT);
92 KASSERT(npages >= npagesreq);
93
94 /* get npages from UWM, and give them back to the hypervisor */
95 error = uvm_pglistalloc(npages << PAGE_SHIFT, 0, avail_end, 0, 0,
96 mlistp, npages, (flags & BUS_DMA_NOWAIT) == 0);
97 if (error)
98 return (error);
99
100 for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.tqe_next) {
101 pa = VM_PAGE_TO_PHYS(pg);
102 mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
103 xpmap_phys_to_machine_mapping[
104 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = INVALID_P2M_ENTRY;
105 #ifdef XEN3
106 res.extent_start = &mfn;
107 res.nr_extents = 1;
108 res.extent_order = 0;
109 res.domid = DOMID_SELF;
110 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &res)
111 < 0) {
112 #ifdef DEBUG
113 printf("xen_alloc_contig: XENMEM_decrease_reservation "
114 "failed!\n");
115 #endif
116 xpmap_phys_to_machine_mapping[
117 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
118
119 error = ENOMEM;
120 goto failed;
121 }
122 #else
123 if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
124 &mfn, 1, 0) != 1) {
125 #ifdef DEBUG
126 printf("xen_alloc_contig: MEMOP_decrease_reservation "
127 "failed!\n");
128 #endif
129 xpmap_phys_to_machine_mapping[
130 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
131 error = ENOMEM;
132 goto failed;
133 }
134 #endif
135 }
136 /* Get the new contiguous memory extent */
137 #ifdef XEN3
138 res.extent_start = &mfn;
139 res.nr_extents = 1;
140 res.extent_order = order;
141 res.address_bits = get_order(high) + PAGE_SHIFT;
142 res.domid = DOMID_SELF;
143 if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res) < 0) {
144 #ifdef DEBUG
145 printf("xen_alloc_contig: XENMEM_increase_reservation "
146 "failed!\n");
147 #endif
148 error = ENOMEM;
149 pg = NULL;
150 goto failed;
151 }
152 #else
153 if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
154 &mfn, 1, order) != 1) {
155 #ifdef DEBUG
156 printf("xen_alloc_contig: MEMOP_increase_reservation "
157 "failed!\n");
158 #endif
159 error = ENOMEM;
160 pg = NULL;
161 goto failed;
162 }
163 #endif
164 s = splvm();
165 /* Map the new extent in place of the old pages */
166 for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) {
167 pgnext = pg->pageq.tqe_next;
168 pa = VM_PAGE_TO_PHYS(pg);
169 xpmap_phys_to_machine_mapping[
170 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn+i;
171 xpq_queue_machphys_update((mfn+i) << PAGE_SHIFT, pa);
172 /* while here, give extra pages back to UVM */
173 if (i >= npagesreq) {
174 TAILQ_REMOVE(mlistp, pg, pageq);
175 uvm_pagefree(pg);
176 }
177
178 }
179 /* Flush updates through and flush the TLB */
180 xpq_queue_tlb_flush();
181 xpq_flush_queue();
182 splx(s);
183 return 0;
184
185 failed:
186 /*
187 * Attempt to recover from a failed decrease or increase reservation:
188 * if decrease_reservation failed, we don't have given all pages
189 * back to Xen; give them back to UVM, and get the missing pages
190 * from Xen.
191 * if increase_reservation failed, we expect pg to be NULL and we just
192 * get back the missing pages from Xen one by one.
193 */
194 /* give back remaining pages to UVM */
195 for (; pg != NULL; pg = pgnext) {
196 pgnext = pg->pageq.tqe_next;
197 TAILQ_REMOVE(mlistp, pg, pageq);
198 uvm_pagefree(pg);
199 }
200 /* remplace the pages that we already gave to Xen */
201 s = splvm();
202 for (pg = mlistp->tqh_first; pg != NULL; pg = pgnext) {
203 pgnext = pg->pageq.tqe_next;
204 #ifdef XEN3
205 res.extent_start = &mfn;
206 res.nr_extents = 1;
207 res.extent_order = 0;
208 res.address_bits = 32;
209 res.domid = DOMID_SELF;
210 if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res)
211 < 0) {
212 printf("xen_alloc_contig: recovery "
213 "XENMEM_increase_reservation failed!\n");
214 break;
215 }
216 #else
217 if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
218 &mfn, 1, 0) != 1) {
219 printf("xen_alloc_contig: recovery "
220 "MEMOP_increase_reservation failed!\n");
221 break;
222 }
223 #endif
224 pa = VM_PAGE_TO_PHYS(pg);
225 xpmap_phys_to_machine_mapping[
226 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
227 xpq_queue_machphys_update((mfn) << PAGE_SHIFT, pa);
228 TAILQ_REMOVE(mlistp, pg, pageq);
229 uvm_pagefree(pg);
230 }
231 /* Flush updates through and flush the TLB */
232 xpq_queue_tlb_flush();
233 xpq_flush_queue();
234 splx(s);
235 return error;
236 }
237
238
239 /*
240 * Allocate physical memory from the given physical address range.
241 * Called by DMA-safe memory allocation methods.
242 * We need our own version to deal with physical vs machine addresses.
243 */
244 int
245 _xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
246 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
247 int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
248 {
249 bus_addr_t curaddr, lastaddr;
250 struct vm_page *m;
251 struct pglist mlist;
252 int curseg, error;
253 int doingrealloc = 0;
254
255 /* Always round the size. */
256 size = round_page(size);
257
258 KASSERT((alignment & (alignment - 1)) == 0);
259 KASSERT((boundary & (boundary - 1)) == 0);
260 if (alignment < PAGE_SIZE)
261 alignment = PAGE_SIZE;
262 if (boundary != 0 && boundary < size)
263 return (EINVAL);
264
265 /*
266 * Allocate pages from the VM system.
267 */
268 error = uvm_pglistalloc(size, 0, avail_end, alignment, boundary,
269 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
270 if (error)
271 return (error);
272 again:
273
274 /*
275 * Compute the location, size, and number of segments actually
276 * returned by the VM code.
277 */
278 m = mlist.tqh_first;
279 curseg = 0;
280 curaddr = lastaddr = segs[curseg].ds_addr = _BUS_VM_PAGE_TO_BUS(m);
281 if (curaddr < low || curaddr >= high)
282 goto badaddr;
283 segs[curseg].ds_len = PAGE_SIZE;
284 m = m->pageq.tqe_next;
285 if ((segs[curseg].ds_addr & (alignment - 1)) != 0)
286 goto dorealloc;
287
288 for (; m != NULL; m = m->pageq.tqe_next) {
289 curaddr = _BUS_VM_PAGE_TO_BUS(m);
290 if (curaddr < low || curaddr >= high)
291 goto badaddr;
292 if (curaddr == (lastaddr + PAGE_SIZE)) {
293 segs[curseg].ds_len += PAGE_SIZE;
294 if ((lastaddr & boundary) != (curaddr & boundary))
295 goto dorealloc;
296 } else {
297 curseg++;
298 if (curseg >= nsegs || (curaddr & (alignment - 1)) != 0)
299 goto dorealloc;
300 segs[curseg].ds_addr = curaddr;
301 segs[curseg].ds_len = PAGE_SIZE;
302 }
303 lastaddr = curaddr;
304 }
305
306 *rsegs = curseg + 1;
307 return (0);
308
309 badaddr:
310 #ifdef XEN3
311 if (doingrealloc == 0)
312 goto dorealloc;
313 if (curaddr < low) {
314 /* no way to enforce this */
315 printf("_xen_bus_dmamem_alloc_range: no way to "
316 "enforce address range\n");
317 uvm_pglistfree(&mlist);
318 return EINVAL;
319 }
320 printf("xen_bus_dmamem_alloc_range: "
321 "curraddr=0x%lx > high=0x%lx\n",
322 (u_long)curaddr, (u_long)high);
323 panic("xen_bus_dmamem_alloc_range 1");
324 #else /* !XEN3 */
325 /*
326 * If machine addresses are outside the allowed
327 * range we have to bail. Xen2 doesn't offer an
328 * interface to get memory in a specific address
329 * range.
330 */
331 printf("_xen_bus_dmamem_alloc_range: no way to "
332 "enforce address range\n");
333 uvm_pglistfree(&mlist);
334 return EINVAL;
335 #endif /* XEN3 */
336 dorealloc:
337 if (doingrealloc == 1)
338 panic("_xen_bus_dmamem_alloc_range: "
339 "xen_alloc_contig returned "
340 "too much segments");
341 doingrealloc = 1;
342 /*
343 * Too much segments, or memory doesn't fit
344 * constraints. Free this memory and
345 * get a contigous segment from the hypervisor.
346 */
347 uvm_pglistfree(&mlist);
348 for (curseg = 0; curseg < nsegs; curseg++) {
349 segs[curseg].ds_addr = 0;
350 segs[curseg].ds_len = 0;
351 }
352 error = _xen_alloc_contig(size, alignment,
353 boundary, &mlist, flags, low, high);
354 if (error)
355 return error;
356 goto again;
357 }
358