xen_bus_dma.c revision 1.11 1 /* $NetBSD: xen_bus_dma.c,v 1.11 2008/06/04 12:41:42 ad Exp $ */
2 /* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
3
4 /*-
5 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.11 2008/06/04 12:41:42 ad Exp $");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/proc.h>
43
44 #include <machine/bus.h>
45 #include <machine/bus_private.h>
46
47 #include <uvm/uvm_extern.h>
48
49 extern paddr_t avail_end;
50
51 /* Pure 2^n version of get_order */
52 static inline int get_order(unsigned long size)
53 {
54 int order = -1;
55 size = (size - 1) >> (PAGE_SHIFT - 1);
56 do {
57 size >>= 1;
58 order++;
59 } while (size);
60 return order;
61 }
62
63 static int
64 _xen_alloc_contig(bus_size_t size, bus_size_t alignment, bus_size_t boundary,
65 struct pglist *mlistp, int flags, bus_addr_t low, bus_addr_t high)
66 {
67 int order, i;
68 unsigned long npagesreq, npages, mfn;
69 bus_addr_t pa;
70 struct vm_page *pg, *pgnext;
71 int s, error;
72 #ifdef XEN3
73 struct xen_memory_reservation res;
74 #endif
75
76 /*
77 * When requesting a contigous memory region, the hypervisor will
78 * return a memory range aligned on size. This will automagically
79 * handle "boundary", but the only way to enforce alignment
80 * is to request a memory region of size max(alignment, size).
81 */
82 order = max(get_order(size), get_order(alignment));
83 npages = (1 << order);
84 npagesreq = (size >> PAGE_SHIFT);
85 KASSERT(npages >= npagesreq);
86
87 /* get npages from UWM, and give them back to the hypervisor */
88 error = uvm_pglistalloc(npages << PAGE_SHIFT, 0, avail_end, 0, 0,
89 mlistp, npages, (flags & BUS_DMA_NOWAIT) == 0);
90 if (error)
91 return (error);
92
93 for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.queue.tqe_next) {
94 pa = VM_PAGE_TO_PHYS(pg);
95 mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
96 xpmap_phys_to_machine_mapping[
97 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = INVALID_P2M_ENTRY;
98 #ifdef XEN3
99 res.extent_start = &mfn;
100 res.nr_extents = 1;
101 res.extent_order = 0;
102 res.domid = DOMID_SELF;
103 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &res)
104 < 0) {
105 #ifdef DEBUG
106 printf("xen_alloc_contig: XENMEM_decrease_reservation "
107 "failed!\n");
108 #endif
109 xpmap_phys_to_machine_mapping[
110 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
111
112 error = ENOMEM;
113 goto failed;
114 }
115 #else
116 if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
117 &mfn, 1, 0) != 1) {
118 #ifdef DEBUG
119 printf("xen_alloc_contig: MEMOP_decrease_reservation "
120 "failed!\n");
121 #endif
122 xpmap_phys_to_machine_mapping[
123 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
124 error = ENOMEM;
125 goto failed;
126 }
127 #endif
128 }
129 /* Get the new contiguous memory extent */
130 #ifdef XEN3
131 res.extent_start = &mfn;
132 res.nr_extents = 1;
133 res.extent_order = order;
134 res.address_bits = get_order(high) + PAGE_SHIFT;
135 res.domid = DOMID_SELF;
136 if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res) < 0) {
137 #ifdef DEBUG
138 printf("xen_alloc_contig: XENMEM_increase_reservation "
139 "failed!\n");
140 #endif
141 error = ENOMEM;
142 pg = NULL;
143 goto failed;
144 }
145 #else
146 if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
147 &mfn, 1, order) != 1) {
148 #ifdef DEBUG
149 printf("xen_alloc_contig: MEMOP_increase_reservation "
150 "failed!\n");
151 #endif
152 error = ENOMEM;
153 pg = NULL;
154 goto failed;
155 }
156 #endif
157 s = splvm();
158 /* Map the new extent in place of the old pages */
159 for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) {
160 pgnext = pg->pageq.queue.tqe_next;
161 pa = VM_PAGE_TO_PHYS(pg);
162 xpmap_phys_to_machine_mapping[
163 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn+i;
164 xpq_queue_machphys_update((mfn+i) << PAGE_SHIFT, pa);
165 /* while here, give extra pages back to UVM */
166 if (i >= npagesreq) {
167 TAILQ_REMOVE(mlistp, pg, pageq.queue);
168 uvm_pagefree(pg);
169 }
170
171 }
172 /* Flush updates through and flush the TLB */
173 xpq_queue_tlb_flush();
174 xpq_flush_queue();
175 splx(s);
176 return 0;
177
178 failed:
179 /*
180 * Attempt to recover from a failed decrease or increase reservation:
181 * if decrease_reservation failed, we don't have given all pages
182 * back to Xen; give them back to UVM, and get the missing pages
183 * from Xen.
184 * if increase_reservation failed, we expect pg to be NULL and we just
185 * get back the missing pages from Xen one by one.
186 */
187 /* give back remaining pages to UVM */
188 for (; pg != NULL; pg = pgnext) {
189 pgnext = pg->pageq.queue.tqe_next;
190 TAILQ_REMOVE(mlistp, pg, pageq.queue);
191 uvm_pagefree(pg);
192 }
193 /* remplace the pages that we already gave to Xen */
194 s = splvm();
195 for (pg = mlistp->tqh_first; pg != NULL; pg = pgnext) {
196 pgnext = pg->pageq.queue.tqe_next;
197 #ifdef XEN3
198 res.extent_start = &mfn;
199 res.nr_extents = 1;
200 res.extent_order = 0;
201 res.address_bits = 32;
202 res.domid = DOMID_SELF;
203 if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res)
204 < 0) {
205 printf("xen_alloc_contig: recovery "
206 "XENMEM_increase_reservation failed!\n");
207 break;
208 }
209 #else
210 if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
211 &mfn, 1, 0) != 1) {
212 printf("xen_alloc_contig: recovery "
213 "MEMOP_increase_reservation failed!\n");
214 break;
215 }
216 #endif
217 pa = VM_PAGE_TO_PHYS(pg);
218 xpmap_phys_to_machine_mapping[
219 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
220 xpq_queue_machphys_update((mfn) << PAGE_SHIFT, pa);
221 TAILQ_REMOVE(mlistp, pg, pageq.queue);
222 uvm_pagefree(pg);
223 }
224 /* Flush updates through and flush the TLB */
225 xpq_queue_tlb_flush();
226 xpq_flush_queue();
227 splx(s);
228 return error;
229 }
230
231
232 /*
233 * Allocate physical memory from the given physical address range.
234 * Called by DMA-safe memory allocation methods.
235 * We need our own version to deal with physical vs machine addresses.
236 */
237 int
238 _xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
239 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
240 int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
241 {
242 bus_addr_t curaddr, lastaddr;
243 struct vm_page *m;
244 struct pglist mlist;
245 int curseg, error;
246 int doingrealloc = 0;
247
248 /* Always round the size. */
249 size = round_page(size);
250
251 KASSERT((alignment & (alignment - 1)) == 0);
252 KASSERT((boundary & (boundary - 1)) == 0);
253 if (alignment < PAGE_SIZE)
254 alignment = PAGE_SIZE;
255 if (boundary != 0 && boundary < size)
256 return (EINVAL);
257
258 /*
259 * Allocate pages from the VM system.
260 */
261 error = uvm_pglistalloc(size, 0, avail_end, alignment, boundary,
262 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
263 if (error)
264 return (error);
265 again:
266
267 /*
268 * Compute the location, size, and number of segments actually
269 * returned by the VM code.
270 */
271 m = mlist.tqh_first;
272 curseg = 0;
273 curaddr = lastaddr = segs[curseg].ds_addr = _BUS_VM_PAGE_TO_BUS(m);
274 if (curaddr < low || curaddr >= high)
275 goto badaddr;
276 segs[curseg].ds_len = PAGE_SIZE;
277 m = m->pageq.queue.tqe_next;
278 if ((segs[curseg].ds_addr & (alignment - 1)) != 0)
279 goto dorealloc;
280
281 for (; m != NULL; m = m->pageq.queue.tqe_next) {
282 curaddr = _BUS_VM_PAGE_TO_BUS(m);
283 if (curaddr < low || curaddr >= high)
284 goto badaddr;
285 if (curaddr == (lastaddr + PAGE_SIZE)) {
286 segs[curseg].ds_len += PAGE_SIZE;
287 if ((lastaddr & boundary) != (curaddr & boundary))
288 goto dorealloc;
289 } else {
290 curseg++;
291 if (curseg >= nsegs || (curaddr & (alignment - 1)) != 0)
292 goto dorealloc;
293 segs[curseg].ds_addr = curaddr;
294 segs[curseg].ds_len = PAGE_SIZE;
295 }
296 lastaddr = curaddr;
297 }
298
299 *rsegs = curseg + 1;
300 return (0);
301
302 badaddr:
303 #ifdef XEN3
304 if (doingrealloc == 0)
305 goto dorealloc;
306 if (curaddr < low) {
307 /* no way to enforce this */
308 printf("_xen_bus_dmamem_alloc_range: no way to "
309 "enforce address range\n");
310 uvm_pglistfree(&mlist);
311 return EINVAL;
312 }
313 printf("xen_bus_dmamem_alloc_range: "
314 "curraddr=0x%lx > high=0x%lx\n",
315 (u_long)curaddr, (u_long)high);
316 panic("xen_bus_dmamem_alloc_range 1");
317 #else /* !XEN3 */
318 /*
319 * If machine addresses are outside the allowed
320 * range we have to bail. Xen2 doesn't offer an
321 * interface to get memory in a specific address
322 * range.
323 */
324 printf("_xen_bus_dmamem_alloc_range: no way to "
325 "enforce address range\n");
326 uvm_pglistfree(&mlist);
327 return EINVAL;
328 #endif /* XEN3 */
329 dorealloc:
330 if (doingrealloc == 1)
331 panic("_xen_bus_dmamem_alloc_range: "
332 "xen_alloc_contig returned "
333 "too much segments");
334 doingrealloc = 1;
335 /*
336 * Too much segments, or memory doesn't fit
337 * constraints. Free this memory and
338 * get a contigous segment from the hypervisor.
339 */
340 uvm_pglistfree(&mlist);
341 for (curseg = 0; curseg < nsegs; curseg++) {
342 segs[curseg].ds_addr = 0;
343 segs[curseg].ds_len = 0;
344 }
345 error = _xen_alloc_contig(size, alignment,
346 boundary, &mlist, flags, low, high);
347 if (error)
348 return error;
349 goto again;
350 }
351