xen_shm_machdep.c revision 1.5 1 1.5 cegger /* $NetBSD: xen_shm_machdep.c,v 1.5 2009/03/16 06:17:20 cegger Exp $ */
2 1.2 bouyer
3 1.2 bouyer /*
4 1.2 bouyer * Copyright (c) 2006 Manuel Bouyer.
5 1.2 bouyer *
6 1.2 bouyer * Redistribution and use in source and binary forms, with or without
7 1.2 bouyer * modification, are permitted provided that the following conditions
8 1.2 bouyer * are met:
9 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
10 1.2 bouyer * notice, this list of conditions and the following disclaimer.
11 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
12 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
13 1.2 bouyer * documentation and/or other materials provided with the distribution.
14 1.2 bouyer * 3. All advertising materials mentioning features or use of this software
15 1.2 bouyer * must display the following acknowledgement:
16 1.2 bouyer * This product includes software developed by Manuel Bouyer.
17 1.2 bouyer * 4. The name of the author may not be used to endorse or promote products
18 1.2 bouyer * derived from this software without specific prior written permission.
19 1.2 bouyer *
20 1.2 bouyer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 1.2 bouyer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 1.2 bouyer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 1.2 bouyer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 1.2 bouyer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 1.2 bouyer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 1.2 bouyer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 1.2 bouyer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 1.2 bouyer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 1.2 bouyer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 1.2 bouyer *
31 1.2 bouyer */
32 1.2 bouyer
33 1.3 bouyer #include <sys/cdefs.h>
34 1.5 cegger __KERNEL_RCSID(0, "$NetBSD: xen_shm_machdep.c,v 1.5 2009/03/16 06:17:20 cegger Exp $");
35 1.3 bouyer
36 1.3 bouyer
37 1.2 bouyer #include <sys/types.h>
38 1.2 bouyer #include <sys/param.h>
39 1.2 bouyer #include <sys/systm.h>
40 1.2 bouyer #include <sys/queue.h>
41 1.2 bouyer #include <sys/vmem.h>
42 1.2 bouyer #include <sys/kernel.h>
43 1.2 bouyer #include <uvm/uvm.h>
44 1.2 bouyer
45 1.2 bouyer #include <machine/pmap.h>
46 1.2 bouyer #include <xen/hypervisor.h>
47 1.2 bouyer #include <xen/xen.h>
48 1.2 bouyer #include <xen/evtchn.h>
49 1.2 bouyer #include <xen/xen_shm.h>
50 1.2 bouyer
51 1.2 bouyer /*
52 1.2 bouyer * Helper routines for the backend drivers. This implement the necessary
53 1.2 bouyer * functions to map a bunch of pages from foreign domains in our kernel VM
54 1.2 bouyer * space, do I/O to it, and unmap it.
55 1.2 bouyer *
56 1.2 bouyer * At boot time, we grap some kernel VM space that we'll use to map the foreign
57 1.2 bouyer * pages. We also maintain a virtual to machine mapping table to give back
58 1.2 bouyer * the appropriate address to bus_dma if requested.
59 1.2 bouyer * If no more VM space is available, we return an error. The caller can then
60 1.2 bouyer * register a callback which will be called when the required VM space is
61 1.2 bouyer * available.
62 1.2 bouyer */
63 1.2 bouyer
64 1.2 bouyer /* pointers to our VM space */
65 1.2 bouyer static vaddr_t xen_shm_base_address;
66 1.2 bouyer static u_long xen_shm_base_address_pg;
67 1.2 bouyer static vaddr_t xen_shm_end_address;
68 1.2 bouyer
69 1.2 bouyer /* Grab enouth VM space to map an entire vbd ring. */
70 1.2 bouyer #ifdef XEN3
71 1.2 bouyer /* Xen3 linux guests seems to eat more pages, gives enough for 10 vbd rings */
72 1.2 bouyer #define BLKIF_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
73 1.2 bouyer #define XENSHM_NPAGES (BLKIF_RING_SIZE * (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1) * 10)
74 1.2 bouyer #else
75 1.2 bouyer #define XENSHM_NPAGES (BLKIF_RING_SIZE * (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1))
76 1.2 bouyer #endif
77 1.2 bouyer
78 1.2 bouyer static vsize_t xen_shm_size = (XENSHM_NPAGES * PAGE_SIZE);
79 1.2 bouyer
80 1.2 bouyer /* vm space management */
81 1.2 bouyer static vmem_t *xen_shm_arena;
82 1.2 bouyer
83 1.2 bouyer /* callbacks are registered in a FIFO list. */
84 1.2 bouyer
85 1.2 bouyer static SIMPLEQ_HEAD(xen_shm_callback_head, xen_shm_callback_entry)
86 1.2 bouyer xen_shm_callbacks;
87 1.2 bouyer struct xen_shm_callback_entry {
88 1.2 bouyer SIMPLEQ_ENTRY(xen_shm_callback_entry) xshmc_entries;
89 1.2 bouyer int (*xshmc_callback)(void *); /* our callback */
90 1.2 bouyer void *xshmc_arg; /* cookie passed to the callback */
91 1.2 bouyer };
92 1.2 bouyer /* a pool of struct xen_shm_callback_entry */
93 1.2 bouyer static struct pool xen_shm_callback_pool;
94 1.2 bouyer
95 1.2 bouyer #ifdef DEBUG
96 1.2 bouyer /* for ratecheck(9) */
97 1.2 bouyer static struct timeval xen_shm_errintvl = { 60, 0 }; /* a minute, each */
98 1.2 bouyer #endif
99 1.2 bouyer
100 1.2 bouyer void
101 1.5 cegger xen_shm_init(void)
102 1.2 bouyer {
103 1.2 bouyer SIMPLEQ_INIT(&xen_shm_callbacks);
104 1.2 bouyer pool_init(&xen_shm_callback_pool, sizeof(struct xen_shm_callback_entry),
105 1.2 bouyer 0, 0, 0, "xshmc", NULL, IPL_VM);
106 1.2 bouyer /* ensure we'll always get items */
107 1.2 bouyer if (pool_prime(&xen_shm_callback_pool,
108 1.2 bouyer PAGE_SIZE / sizeof(struct xen_shm_callback_entry)) != 0) {
109 1.2 bouyer panic("xen_shm_init can't prime pool");
110 1.2 bouyer }
111 1.2 bouyer
112 1.2 bouyer xen_shm_base_address = uvm_km_alloc(kernel_map, xen_shm_size, 0,
113 1.2 bouyer UVM_KMF_VAONLY);
114 1.2 bouyer xen_shm_end_address = xen_shm_base_address + xen_shm_size;
115 1.2 bouyer xen_shm_base_address_pg = xen_shm_base_address >> PAGE_SHIFT;
116 1.2 bouyer if (xen_shm_base_address == 0) {
117 1.2 bouyer panic("xen_shm_init no VM space");
118 1.2 bouyer }
119 1.2 bouyer xen_shm_arena = vmem_create("xen_shm",
120 1.2 bouyer xen_shm_base_address_pg,
121 1.2 bouyer (xen_shm_end_address >> PAGE_SHIFT) - 1 - xen_shm_base_address_pg,
122 1.2 bouyer 1, NULL, NULL, NULL, 1, VM_NOSLEEP, IPL_VM);
123 1.2 bouyer if (xen_shm_arena == NULL) {
124 1.2 bouyer panic("xen_shm_init no arena");
125 1.2 bouyer }
126 1.2 bouyer }
127 1.2 bouyer
128 1.2 bouyer int
129 1.2 bouyer #ifdef XEN3
130 1.2 bouyer xen_shm_map(int nentries, int domid, grant_ref_t *grefp, vaddr_t *vap,
131 1.2 bouyer grant_handle_t *handlep, int flags)
132 1.2 bouyer #else
133 1.2 bouyer xen_shm_map(paddr_t *ma, int nentries, int domid, vaddr_t *vap, int flags)
134 1.2 bouyer #endif
135 1.2 bouyer {
136 1.2 bouyer int s, i;
137 1.2 bouyer vaddr_t new_va;
138 1.2 bouyer u_long new_va_pg;
139 1.2 bouyer #ifdef XEN3
140 1.2 bouyer int err;
141 1.2 bouyer gnttab_map_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
142 1.2 bouyer #else
143 1.2 bouyer multicall_entry_t mcl[XENSHM_MAX_PAGES_PER_REQUEST];
144 1.2 bouyer int remap_prot = PG_V | PG_RW | PG_U | PG_M;
145 1.2 bouyer #endif
146 1.2 bouyer
147 1.2 bouyer #ifdef DIAGNOSTIC
148 1.2 bouyer if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) {
149 1.2 bouyer printf("xen_shm_map: %d entries\n", nentries);
150 1.2 bouyer panic("xen_shm_map");
151 1.2 bouyer }
152 1.2 bouyer #endif
153 1.2 bouyer s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
154 1.2 bouyer /*
155 1.2 bouyer * if a driver is waiting for ressources, don't try to allocate
156 1.2 bouyer * yet. This is to avoid a flood of small requests stalling large
157 1.2 bouyer * ones.
158 1.2 bouyer */
159 1.2 bouyer if (__predict_false(SIMPLEQ_FIRST(&xen_shm_callbacks) != NULL) &&
160 1.2 bouyer (flags & XSHM_CALLBACK) == 0) {
161 1.2 bouyer #ifdef DEBUG
162 1.2 bouyer static struct timeval lasttime;
163 1.2 bouyer #endif
164 1.2 bouyer splx(s);
165 1.2 bouyer #ifdef DEBUG
166 1.2 bouyer if (ratecheck(&lasttime, &xen_shm_errintvl))
167 1.2 bouyer printf("xen_shm_map: ENOMEM1\n");
168 1.2 bouyer #endif
169 1.2 bouyer return ENOMEM;
170 1.2 bouyer }
171 1.2 bouyer /* allocate the needed virtual space */
172 1.2 bouyer new_va_pg = vmem_alloc(xen_shm_arena, nentries,
173 1.2 bouyer VM_INSTANTFIT | VM_NOSLEEP);
174 1.2 bouyer if (new_va_pg == 0) {
175 1.2 bouyer #ifdef DEBUG
176 1.2 bouyer static struct timeval lasttime;
177 1.2 bouyer #endif
178 1.2 bouyer splx(s);
179 1.2 bouyer #ifdef DEBUG
180 1.2 bouyer if (ratecheck(&lasttime, &xen_shm_errintvl))
181 1.2 bouyer printf("xen_shm_map: ENOMEM\n");
182 1.2 bouyer #endif
183 1.2 bouyer return ENOMEM;
184 1.2 bouyer }
185 1.2 bouyer splx(s);
186 1.2 bouyer
187 1.2 bouyer new_va = new_va_pg << PAGE_SHIFT;
188 1.2 bouyer #ifdef XEN3
189 1.2 bouyer for (i = 0; i < nentries; i++) {
190 1.2 bouyer op[i].host_addr = new_va + i * PAGE_SIZE;
191 1.2 bouyer op[i].dom = domid;
192 1.2 bouyer op[i].ref = grefp[i];
193 1.2 bouyer op[i].flags = GNTMAP_host_map |
194 1.2 bouyer ((flags & XSHM_RO) ? GNTMAP_readonly : 0);
195 1.2 bouyer }
196 1.2 bouyer err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op, nentries);
197 1.2 bouyer if (__predict_false(err))
198 1.2 bouyer panic("xen_shm_map: HYPERVISOR_grant_table_op failed");
199 1.2 bouyer for (i = 0; i < nentries; i++) {
200 1.2 bouyer if (__predict_false(op[i].status))
201 1.2 bouyer return op[i].status;
202 1.2 bouyer handlep[i] = op[i].handle;
203 1.2 bouyer }
204 1.2 bouyer #else /* !XEN3 */
205 1.2 bouyer for (i = 0; i < nentries; i++, new_va_pg++) {
206 1.2 bouyer mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
207 1.2 bouyer mcl[i].args[0] = new_va_pg;
208 1.2 bouyer mcl[i].args[1] = ma[i] | remap_prot;
209 1.2 bouyer mcl[i].args[2] = 0;
210 1.2 bouyer mcl[i].args[3] = domid;
211 1.2 bouyer }
212 1.2 bouyer if (HYPERVISOR_multicall(mcl, nentries) != 0)
213 1.2 bouyer panic("xen_shm_map: HYPERVISOR_multicall");
214 1.2 bouyer
215 1.2 bouyer for (i = 0; i < nentries; i++) {
216 1.2 bouyer if ((mcl[i].args[5] != 0)) {
217 1.2 bouyer printf("xen_shm_map: mcl[%d] failed\n", i);
218 1.2 bouyer xen_shm_unmap(new_va, ma, nentries, domid);
219 1.2 bouyer return EINVAL;
220 1.2 bouyer }
221 1.2 bouyer }
222 1.2 bouyer #endif /* !XEN3 */
223 1.2 bouyer *vap = new_va;
224 1.2 bouyer return 0;
225 1.2 bouyer }
226 1.2 bouyer
227 1.2 bouyer void
228 1.2 bouyer #ifdef XEN3
229 1.2 bouyer xen_shm_unmap(vaddr_t va, int nentries, grant_handle_t *handlep)
230 1.2 bouyer #else
231 1.2 bouyer xen_shm_unmap(vaddr_t va, paddr_t *pa, int nentries, int domid)
232 1.2 bouyer #endif
233 1.2 bouyer {
234 1.2 bouyer #ifdef XEN3
235 1.2 bouyer gnttab_unmap_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
236 1.2 bouyer int ret;
237 1.2 bouyer #else
238 1.2 bouyer multicall_entry_t mcl[XENSHM_MAX_PAGES_PER_REQUEST];
239 1.2 bouyer #endif
240 1.2 bouyer int i;
241 1.2 bouyer int s;
242 1.2 bouyer struct xen_shm_callback_entry *xshmc;
243 1.2 bouyer
244 1.2 bouyer #ifdef DIAGNOSTIC
245 1.2 bouyer if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) {
246 1.2 bouyer printf("xen_shm_unmap: %d entries\n", nentries);
247 1.2 bouyer panic("xen_shm_unmap");
248 1.2 bouyer }
249 1.2 bouyer #endif
250 1.2 bouyer
251 1.2 bouyer #ifdef XEN3
252 1.2 bouyer for (i = 0; i < nentries; i++) {
253 1.2 bouyer op[i].host_addr = va + i * PAGE_SIZE;
254 1.2 bouyer op[i].dev_bus_addr = 0;
255 1.2 bouyer op[i].handle = handlep[i];
256 1.2 bouyer }
257 1.2 bouyer ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
258 1.2 bouyer op, nentries);
259 1.2 bouyer if (__predict_false(ret))
260 1.2 bouyer panic("xen_shm_unmap: unmap failed");
261 1.2 bouyer va = va >> PAGE_SHIFT;
262 1.2 bouyer #else /* !XEN3 */
263 1.2 bouyer va = va >> PAGE_SHIFT;
264 1.2 bouyer for (i = 0; i < nentries; i++) {
265 1.2 bouyer mcl[i].op = __HYPERVISOR_update_va_mapping;
266 1.2 bouyer mcl[i].args[0] = va + i;
267 1.2 bouyer mcl[i].args[1] = 0;
268 1.2 bouyer mcl[i].args[2] = 0;
269 1.2 bouyer }
270 1.2 bouyer mcl[nentries - 1].args[2] = UVMF_FLUSH_TLB;
271 1.2 bouyer if (HYPERVISOR_multicall(mcl, nentries) != 0)
272 1.2 bouyer panic("xen_shm_unmap");
273 1.2 bouyer #endif /* !XEN3 */
274 1.2 bouyer s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
275 1.2 bouyer vmem_free(xen_shm_arena, va, nentries);
276 1.2 bouyer while (__predict_false((xshmc = SIMPLEQ_FIRST(&xen_shm_callbacks))
277 1.2 bouyer != NULL)) {
278 1.2 bouyer SIMPLEQ_REMOVE_HEAD(&xen_shm_callbacks, xshmc_entries);
279 1.2 bouyer splx(s);
280 1.2 bouyer if (xshmc->xshmc_callback(xshmc->xshmc_arg) == 0) {
281 1.2 bouyer /* callback succeeded */
282 1.2 bouyer s = splvm();
283 1.2 bouyer pool_put(&xen_shm_callback_pool, xshmc);
284 1.2 bouyer } else {
285 1.2 bouyer /* callback failed, probably out of ressources */
286 1.2 bouyer s = splvm();
287 1.2 bouyer SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc,
288 1.2 bouyer xshmc_entries);
289 1.2 bouyer
290 1.2 bouyer break;
291 1.2 bouyer }
292 1.2 bouyer }
293 1.2 bouyer splx(s);
294 1.2 bouyer }
295 1.2 bouyer
296 1.2 bouyer int
297 1.2 bouyer xen_shm_callback(int (*callback)(void *), void *arg)
298 1.2 bouyer {
299 1.2 bouyer struct xen_shm_callback_entry *xshmc;
300 1.2 bouyer int s;
301 1.2 bouyer
302 1.2 bouyer s = splvm();
303 1.2 bouyer xshmc = pool_get(&xen_shm_callback_pool, PR_NOWAIT);
304 1.2 bouyer if (xshmc == NULL) {
305 1.2 bouyer splx(s);
306 1.2 bouyer return ENOMEM;
307 1.2 bouyer }
308 1.2 bouyer xshmc->xshmc_arg = arg;
309 1.2 bouyer xshmc->xshmc_callback = callback;
310 1.2 bouyer SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc, xshmc_entries);
311 1.2 bouyer splx(s);
312 1.2 bouyer return 0;
313 1.2 bouyer }
314