xen_bus_dma.c revision 1.6.16.4 1 1.6.16.4 yamt /* $NetBSD: xen_bus_dma.c,v 1.6.16.4 2007/02/26 09:08:56 yamt Exp $ */
2 1.6.16.2 yamt /* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
3 1.6.16.2 yamt
4 1.6.16.2 yamt /*-
5 1.6.16.2 yamt * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6 1.6.16.2 yamt * All rights reserved.
7 1.6.16.2 yamt *
8 1.6.16.2 yamt * This code is derived from software contributed to The NetBSD Foundation
9 1.6.16.2 yamt * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 1.6.16.2 yamt * Simulation Facility, NASA Ames Research Center.
11 1.6.16.2 yamt *
12 1.6.16.2 yamt * Redistribution and use in source and binary forms, with or without
13 1.6.16.2 yamt * modification, are permitted provided that the following conditions
14 1.6.16.2 yamt * are met:
15 1.6.16.2 yamt * 1. Redistributions of source code must retain the above copyright
16 1.6.16.2 yamt * notice, this list of conditions and the following disclaimer.
17 1.6.16.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
18 1.6.16.2 yamt * notice, this list of conditions and the following disclaimer in the
19 1.6.16.2 yamt * documentation and/or other materials provided with the distribution.
20 1.6.16.2 yamt * 3. All advertising materials mentioning features or use of this software
21 1.6.16.2 yamt * must display the following acknowledgement:
22 1.6.16.2 yamt * This product includes software developed by the NetBSD
23 1.6.16.2 yamt * Foundation, Inc. and its contributors.
24 1.6.16.2 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
25 1.6.16.2 yamt * contributors may be used to endorse or promote products derived
26 1.6.16.2 yamt * from this software without specific prior written permission.
27 1.6.16.2 yamt *
28 1.6.16.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 1.6.16.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 1.6.16.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 1.6.16.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 1.6.16.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 1.6.16.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 1.6.16.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 1.6.16.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 1.6.16.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 1.6.16.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 1.6.16.2 yamt * POSSIBILITY OF SUCH DAMAGE.
39 1.6.16.2 yamt */
40 1.6.16.2 yamt
41 1.6.16.2 yamt #include <sys/cdefs.h>
42 1.6.16.4 yamt __KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.6.16.4 2007/02/26 09:08:56 yamt Exp $");
43 1.6.16.2 yamt
44 1.6.16.2 yamt #include <sys/param.h>
45 1.6.16.2 yamt #include <sys/systm.h>
46 1.6.16.2 yamt #include <sys/kernel.h>
47 1.6.16.2 yamt #include <sys/malloc.h>
48 1.6.16.2 yamt #include <sys/mbuf.h>
49 1.6.16.2 yamt #include <sys/proc.h>
50 1.6.16.2 yamt
51 1.6.16.2 yamt #include <machine/bus.h>
52 1.6.16.2 yamt #include <machine/bus_private.h>
53 1.6.16.2 yamt
54 1.6.16.2 yamt #include <uvm/uvm_extern.h>
55 1.6.16.2 yamt
56 1.6.16.2 yamt extern paddr_t avail_end;
57 1.6.16.2 yamt
58 1.6.16.2 yamt /* Pure 2^n version of get_order */
59 1.6.16.2 yamt static inline int get_order(unsigned long size)
60 1.6.16.2 yamt {
61 1.6.16.2 yamt int order = -1;
62 1.6.16.2 yamt size = (size - 1) >> (PAGE_SHIFT - 1);
63 1.6.16.2 yamt do {
64 1.6.16.2 yamt size >>= 1;
65 1.6.16.2 yamt order++;
66 1.6.16.2 yamt } while (size);
67 1.6.16.2 yamt return order;
68 1.6.16.2 yamt }
69 1.6.16.2 yamt
70 1.6.16.2 yamt static int
71 1.6.16.2 yamt _xen_alloc_contig(bus_size_t size, bus_size_t alignment, bus_size_t boundary,
72 1.6.16.4 yamt struct pglist *mlistp, int flags, bus_addr_t low, bus_addr_t high)
73 1.6.16.2 yamt {
74 1.6.16.2 yamt int order, i;
75 1.6.16.2 yamt unsigned long npagesreq, npages, mfn;
76 1.6.16.2 yamt bus_addr_t pa;
77 1.6.16.2 yamt struct vm_page *pg, *pgnext;
78 1.6.16.2 yamt int s, error;
79 1.6.16.2 yamt #ifdef XEN3
80 1.6.16.2 yamt struct xen_memory_reservation res;
81 1.6.16.2 yamt #endif
82 1.6.16.2 yamt
83 1.6.16.2 yamt /*
84 1.6.16.2 yamt * When requesting a contigous memory region, the hypervisor will
85 1.6.16.2 yamt * return a memory range aligned on size. This will automagically
86 1.6.16.2 yamt * handle "boundary", but the only way to enforce alignment
87 1.6.16.2 yamt * is to request a memory region of size max(alignment, size).
88 1.6.16.2 yamt */
89 1.6.16.2 yamt order = max(get_order(size), get_order(alignment));
90 1.6.16.2 yamt npages = (1 << order);
91 1.6.16.2 yamt npagesreq = (size >> PAGE_SHIFT);
92 1.6.16.2 yamt KASSERT(npages >= npagesreq);
93 1.6.16.2 yamt
94 1.6.16.2 yamt /* get npages from UWM, and give them back to the hypervisor */
95 1.6.16.2 yamt error = uvm_pglistalloc(npages << PAGE_SHIFT, 0, avail_end, 0, 0,
96 1.6.16.2 yamt mlistp, npages, (flags & BUS_DMA_NOWAIT) == 0);
97 1.6.16.2 yamt if (error)
98 1.6.16.2 yamt return (error);
99 1.6.16.2 yamt
100 1.6.16.2 yamt for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.tqe_next) {
101 1.6.16.2 yamt pa = VM_PAGE_TO_PHYS(pg);
102 1.6.16.2 yamt mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
103 1.6.16.2 yamt xpmap_phys_to_machine_mapping[
104 1.6.16.2 yamt (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = INVALID_P2M_ENTRY;
105 1.6.16.2 yamt #ifdef XEN3
106 1.6.16.2 yamt res.extent_start = &mfn;
107 1.6.16.2 yamt res.nr_extents = 1;
108 1.6.16.2 yamt res.extent_order = 0;
109 1.6.16.2 yamt res.domid = DOMID_SELF;
110 1.6.16.2 yamt if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &res)
111 1.6.16.2 yamt < 0) {
112 1.6.16.3 yamt #ifdef DEBUG
113 1.6.16.2 yamt printf("xen_alloc_contig: XENMEM_decrease_reservation "
114 1.6.16.2 yamt "failed!\n");
115 1.6.16.3 yamt #endif
116 1.6.16.3 yamt xpmap_phys_to_machine_mapping[
117 1.6.16.3 yamt (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
118 1.6.16.3 yamt
119 1.6.16.3 yamt error = ENOMEM;
120 1.6.16.3 yamt goto failed;
121 1.6.16.2 yamt }
122 1.6.16.2 yamt #else
123 1.6.16.2 yamt if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
124 1.6.16.2 yamt &mfn, 1, 0) != 1) {
125 1.6.16.3 yamt #ifdef DEBUG
126 1.6.16.2 yamt printf("xen_alloc_contig: MEMOP_decrease_reservation "
127 1.6.16.2 yamt "failed!\n");
128 1.6.16.3 yamt #endif
129 1.6.16.3 yamt xpmap_phys_to_machine_mapping[
130 1.6.16.3 yamt (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
131 1.6.16.3 yamt error = ENOMEM;
132 1.6.16.3 yamt goto failed;
133 1.6.16.2 yamt }
134 1.6.16.2 yamt #endif
135 1.6.16.2 yamt }
136 1.6.16.2 yamt /* Get the new contiguous memory extent */
137 1.6.16.2 yamt #ifdef XEN3
138 1.6.16.2 yamt res.extent_start = &mfn;
139 1.6.16.2 yamt res.nr_extents = 1;
140 1.6.16.2 yamt res.extent_order = order;
141 1.6.16.4 yamt res.address_bits = get_order(high) + PAGE_SHIFT;
142 1.6.16.2 yamt res.domid = DOMID_SELF;
143 1.6.16.2 yamt if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res) < 0) {
144 1.6.16.3 yamt #ifdef DEBUG
145 1.6.16.2 yamt printf("xen_alloc_contig: XENMEM_increase_reservation "
146 1.6.16.2 yamt "failed!\n");
147 1.6.16.3 yamt #endif
148 1.6.16.3 yamt error = ENOMEM;
149 1.6.16.3 yamt pg = NULL;
150 1.6.16.3 yamt goto failed;
151 1.6.16.2 yamt }
152 1.6.16.2 yamt #else
153 1.6.16.2 yamt if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
154 1.6.16.2 yamt &mfn, 1, order) != 1) {
155 1.6.16.3 yamt #ifdef DEBUG
156 1.6.16.2 yamt printf("xen_alloc_contig: MEMOP_increase_reservation "
157 1.6.16.2 yamt "failed!\n");
158 1.6.16.3 yamt #endif
159 1.6.16.3 yamt error = ENOMEM;
160 1.6.16.3 yamt pg = NULL;
161 1.6.16.3 yamt goto failed;
162 1.6.16.2 yamt }
163 1.6.16.2 yamt #endif
164 1.6.16.2 yamt s = splvm();
165 1.6.16.2 yamt /* Map the new extent in place of the old pages */
166 1.6.16.2 yamt for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) {
167 1.6.16.2 yamt pgnext = pg->pageq.tqe_next;
168 1.6.16.2 yamt pa = VM_PAGE_TO_PHYS(pg);
169 1.6.16.2 yamt xpmap_phys_to_machine_mapping[
170 1.6.16.2 yamt (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn+i;
171 1.6.16.2 yamt xpq_queue_machphys_update((mfn+i) << PAGE_SHIFT, pa);
172 1.6.16.2 yamt /* while here, give extra pages back to UVM */
173 1.6.16.2 yamt if (i >= npagesreq) {
174 1.6.16.2 yamt TAILQ_REMOVE(mlistp, pg, pageq);
175 1.6.16.2 yamt uvm_pagefree(pg);
176 1.6.16.2 yamt }
177 1.6.16.2 yamt
178 1.6.16.2 yamt }
179 1.6.16.2 yamt /* Flush updates through and flush the TLB */
180 1.6.16.2 yamt xpq_queue_tlb_flush();
181 1.6.16.2 yamt xpq_flush_queue();
182 1.6.16.2 yamt splx(s);
183 1.6.16.2 yamt return 0;
184 1.6.16.3 yamt
185 1.6.16.3 yamt failed:
186 1.6.16.3 yamt /*
187 1.6.16.3 yamt * Attempt to recover from a failed decrease or increase reservation:
188 1.6.16.3 yamt * if decrease_reservation failed, we don't have given all pages
189 1.6.16.3 yamt * back to Xen; give them back to UVM, and get the missing pages
190 1.6.16.3 yamt * from Xen.
191 1.6.16.3 yamt * if increase_reservation failed, we expect pg to be NULL and we just
192 1.6.16.3 yamt * get back the missing pages from Xen one by one.
193 1.6.16.3 yamt */
194 1.6.16.3 yamt /* give back remaining pages to UVM */
195 1.6.16.3 yamt for (; pg != NULL; pg = pgnext) {
196 1.6.16.3 yamt pgnext = pg->pageq.tqe_next;
197 1.6.16.3 yamt TAILQ_REMOVE(mlistp, pg, pageq);
198 1.6.16.3 yamt uvm_pagefree(pg);
199 1.6.16.3 yamt }
200 1.6.16.3 yamt /* remplace the pages that we already gave to Xen */
201 1.6.16.3 yamt s = splvm();
202 1.6.16.3 yamt for (pg = mlistp->tqh_first; pg != NULL; pg = pgnext) {
203 1.6.16.3 yamt pgnext = pg->pageq.tqe_next;
204 1.6.16.3 yamt #ifdef XEN3
205 1.6.16.3 yamt res.extent_start = &mfn;
206 1.6.16.3 yamt res.nr_extents = 1;
207 1.6.16.3 yamt res.extent_order = 0;
208 1.6.16.4 yamt res.address_bits = 32;
209 1.6.16.3 yamt res.domid = DOMID_SELF;
210 1.6.16.3 yamt if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res)
211 1.6.16.3 yamt < 0) {
212 1.6.16.3 yamt printf("xen_alloc_contig: recovery "
213 1.6.16.3 yamt "XENMEM_increase_reservation failed!\n");
214 1.6.16.3 yamt break;
215 1.6.16.3 yamt }
216 1.6.16.3 yamt #else
217 1.6.16.3 yamt if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
218 1.6.16.3 yamt &mfn, 1, 0) != 1) {
219 1.6.16.3 yamt printf("xen_alloc_contig: recovery "
220 1.6.16.3 yamt "MEMOP_increase_reservation failed!\n");
221 1.6.16.3 yamt break;
222 1.6.16.3 yamt }
223 1.6.16.3 yamt #endif
224 1.6.16.3 yamt pa = VM_PAGE_TO_PHYS(pg);
225 1.6.16.3 yamt xpmap_phys_to_machine_mapping[
226 1.6.16.3 yamt (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
227 1.6.16.3 yamt xpq_queue_machphys_update((mfn) << PAGE_SHIFT, pa);
228 1.6.16.3 yamt TAILQ_REMOVE(mlistp, pg, pageq);
229 1.6.16.3 yamt uvm_pagefree(pg);
230 1.6.16.3 yamt }
231 1.6.16.3 yamt /* Flush updates through and flush the TLB */
232 1.6.16.3 yamt xpq_queue_tlb_flush();
233 1.6.16.3 yamt xpq_flush_queue();
234 1.6.16.3 yamt splx(s);
235 1.6.16.3 yamt return error;
236 1.6.16.2 yamt }
237 1.6.16.2 yamt
238 1.6.16.2 yamt
239 1.6.16.2 yamt /*
240 1.6.16.2 yamt * Allocate physical memory from the given physical address range.
241 1.6.16.2 yamt * Called by DMA-safe memory allocation methods.
242 1.6.16.2 yamt * We need our own version to deal with physical vs machine addresses.
243 1.6.16.2 yamt */
244 1.6.16.2 yamt int
245 1.6.16.2 yamt _xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
246 1.6.16.2 yamt bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
247 1.6.16.2 yamt int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
248 1.6.16.2 yamt {
249 1.6.16.3 yamt bus_addr_t curaddr, lastaddr;
250 1.6.16.2 yamt struct vm_page *m;
251 1.6.16.2 yamt struct pglist mlist;
252 1.6.16.2 yamt int curseg, error;
253 1.6.16.2 yamt int doingrealloc = 0;
254 1.6.16.2 yamt
255 1.6.16.2 yamt /* Always round the size. */
256 1.6.16.2 yamt size = round_page(size);
257 1.6.16.2 yamt
258 1.6.16.2 yamt KASSERT((alignment & (alignment - 1)) == 0);
259 1.6.16.2 yamt KASSERT((boundary & (boundary - 1)) == 0);
260 1.6.16.2 yamt if (alignment < PAGE_SIZE)
261 1.6.16.2 yamt alignment = PAGE_SIZE;
262 1.6.16.2 yamt if (boundary != 0 && boundary < size)
263 1.6.16.2 yamt return (EINVAL);
264 1.6.16.2 yamt
265 1.6.16.2 yamt /*
266 1.6.16.2 yamt * Allocate pages from the VM system.
267 1.6.16.2 yamt */
268 1.6.16.2 yamt error = uvm_pglistalloc(size, 0, avail_end, alignment, boundary,
269 1.6.16.2 yamt &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
270 1.6.16.2 yamt if (error)
271 1.6.16.2 yamt return (error);
272 1.6.16.2 yamt again:
273 1.6.16.2 yamt
274 1.6.16.2 yamt /*
275 1.6.16.2 yamt * Compute the location, size, and number of segments actually
276 1.6.16.2 yamt * returned by the VM code.
277 1.6.16.2 yamt */
278 1.6.16.2 yamt m = mlist.tqh_first;
279 1.6.16.2 yamt curseg = 0;
280 1.6.16.4 yamt curaddr = lastaddr = segs[curseg].ds_addr = _BUS_VM_PAGE_TO_BUS(m);
281 1.6.16.4 yamt if (curaddr < low || curaddr >= high)
282 1.6.16.4 yamt goto badaddr;
283 1.6.16.2 yamt segs[curseg].ds_len = PAGE_SIZE;
284 1.6.16.2 yamt m = m->pageq.tqe_next;
285 1.6.16.3 yamt if ((segs[curseg].ds_addr & (alignment - 1)) != 0)
286 1.6.16.2 yamt goto dorealloc;
287 1.6.16.2 yamt
288 1.6.16.2 yamt for (; m != NULL; m = m->pageq.tqe_next) {
289 1.6.16.3 yamt curaddr = _BUS_VM_PAGE_TO_BUS(m);
290 1.6.16.4 yamt if (curaddr < low || curaddr >= high)
291 1.6.16.4 yamt goto badaddr;
292 1.6.16.3 yamt if (curaddr == (lastaddr + PAGE_SIZE)) {
293 1.6.16.2 yamt segs[curseg].ds_len += PAGE_SIZE;
294 1.6.16.4 yamt if ((lastaddr & boundary) != (curaddr & boundary))
295 1.6.16.2 yamt goto dorealloc;
296 1.6.16.3 yamt } else {
297 1.6.16.2 yamt curseg++;
298 1.6.16.4 yamt if (curseg >= nsegs || (curaddr & (alignment - 1)) != 0)
299 1.6.16.4 yamt goto dorealloc;
300 1.6.16.2 yamt segs[curseg].ds_addr = curaddr;
301 1.6.16.2 yamt segs[curseg].ds_len = PAGE_SIZE;
302 1.6.16.2 yamt }
303 1.6.16.2 yamt lastaddr = curaddr;
304 1.6.16.2 yamt }
305 1.6.16.2 yamt
306 1.6.16.2 yamt *rsegs = curseg + 1;
307 1.6.16.2 yamt return (0);
308 1.6.16.4 yamt
309 1.6.16.4 yamt badaddr:
310 1.6.16.4 yamt #ifdef XEN3
311 1.6.16.4 yamt if (doingrealloc == 0)
312 1.6.16.4 yamt goto dorealloc;
313 1.6.16.4 yamt if (curaddr < low) {
314 1.6.16.4 yamt /* no way to enforce this */
315 1.6.16.4 yamt printf("_xen_bus_dmamem_alloc_range: no way to "
316 1.6.16.4 yamt "enforce address range\n");
317 1.6.16.4 yamt uvm_pglistfree(&mlist);
318 1.6.16.4 yamt return EINVAL;
319 1.6.16.4 yamt }
320 1.6.16.4 yamt printf("xen_bus_dmamem_alloc_range: "
321 1.6.16.4 yamt "curraddr=0x%lx > high=0x%lx\n",
322 1.6.16.4 yamt (u_long)curaddr, (u_long)high);
323 1.6.16.4 yamt panic("xen_bus_dmamem_alloc_range 1");
324 1.6.16.4 yamt #else /* !XEN3 */
325 1.6.16.4 yamt /*
326 1.6.16.4 yamt * If machine addresses are outside the allowed
327 1.6.16.4 yamt * range we have to bail. Xen2 doesn't offer an
328 1.6.16.4 yamt * interface to get memory in a specific address
329 1.6.16.4 yamt * range.
330 1.6.16.4 yamt */
331 1.6.16.4 yamt printf("_xen_bus_dmamem_alloc_range: no way to "
332 1.6.16.4 yamt "enforce address range\n");
333 1.6.16.4 yamt uvm_pglistfree(&mlist);
334 1.6.16.4 yamt return EINVAL;
335 1.6.16.4 yamt #endif /* XEN3 */
336 1.6.16.4 yamt dorealloc:
337 1.6.16.4 yamt if (doingrealloc == 1)
338 1.6.16.4 yamt panic("_xen_bus_dmamem_alloc_range: "
339 1.6.16.4 yamt "xen_alloc_contig returned "
340 1.6.16.4 yamt "too much segments");
341 1.6.16.4 yamt doingrealloc = 1;
342 1.6.16.4 yamt /*
343 1.6.16.4 yamt * Too much segments, or memory doesn't fit
344 1.6.16.4 yamt * constraints. Free this memory and
345 1.6.16.4 yamt * get a contigous segment from the hypervisor.
346 1.6.16.4 yamt */
347 1.6.16.4 yamt uvm_pglistfree(&mlist);
348 1.6.16.4 yamt for (curseg = 0; curseg < nsegs; curseg++) {
349 1.6.16.4 yamt segs[curseg].ds_addr = 0;
350 1.6.16.4 yamt segs[curseg].ds_len = 0;
351 1.6.16.4 yamt }
352 1.6.16.4 yamt error = _xen_alloc_contig(size, alignment,
353 1.6.16.4 yamt boundary, &mlist, flags, low, high);
354 1.6.16.4 yamt if (error)
355 1.6.16.4 yamt return error;
356 1.6.16.4 yamt goto again;
357 1.6.16.2 yamt }
358