x86_xpmap.c revision 1.1.2.3 1 /* $NetBSD: x86_xpmap.c,v 1.1.2.3 2007/10/26 13:46:51 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2006 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /*
34 *
35 * Copyright (c) 2004 Christian Limpach.
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by Christian Limpach.
49 * 4. The name of the author may not be used to endorse or promote products
50 * derived from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
53 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
54 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
55 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
56 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
57 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
61 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 */
63
64
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.1.2.3 2007/10/26 13:46:51 bouyer Exp $");
67
68 #include "opt_xen.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72
73 #include <uvm/uvm.h>
74
75 #include <machine/gdt.h>
76 #include <xen/xenfunc.h>
77
78 #undef XENDEBUG
79 /* #define XENDEBUG_SYNC */
80 /* #define XENDEBUG_LOW */
81
82 #ifdef XENDEBUG
83 #define XENPRINTF(x) printf x
84 #define XENPRINTK(x) printk x
85 #define XENPRINTK2(x) /* printk x */
86
87 static char XBUF[256];
88 #else
89 #define XENPRINTF(x)
90 #define XENPRINTK(x)
91 #define XENPRINTK2(x)
92 #endif
93 #define PRINTF(x) printf x
94 #define PRINTK(x) printk x
95
96 volatile shared_info_t *HYPERVISOR_shared_info;
97 union start_info_union start_info_union;
98
99 void xen_failsafe_handler(void);
100
101 #ifdef XEN3
102 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
103 HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
104 #else
105 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
106 HYPERVISOR_mmu_update((req), (count), (success_count))
107 #endif
108
109 void
110 xen_failsafe_handler(void)
111 {
112
113 panic("xen_failsafe_handler called!\n");
114 }
115
116
117 #ifndef __x86_64__
118 void
119 xen_update_descriptor(union descriptor *table, union descriptor *entry)
120 {
121 paddr_t pa;
122 pt_entry_t *ptp;
123
124 ptp = kvtopte((vaddr_t)table);
125 pa = (*ptp & PG_FRAME) | ((vaddr_t)table & ~PG_FRAME);
126 if (HYPERVISOR_update_descriptor(pa, entry->raw[0], entry->raw[1]))
127 panic("HYPERVISOR_update_descriptor failed\n");
128 }
129 #endif
130
131 void
132 xen_set_ldt(vaddr_t base, uint32_t entries)
133 {
134 vaddr_t va;
135 vaddr_t end;
136 pt_entry_t *ptp, *maptp;
137 int s;
138
139 #ifdef __x86_64__
140 end = base + (entries << 3);
141 #else
142 end = base + entries * sizeof(union descriptor);
143 #endif
144
145 for (va = base; va < end; va += PAGE_SIZE) {
146 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
147 ptp = kvtopte(va);
148 maptp = (pt_entry_t *)vtomach((vaddr_t)ptp);
149 XENPRINTF(("xen_set_ldt %p %d %p %p\n", (void *)base,
150 entries, ptp, maptp));
151 PTE_CLEARBITS(ptp, maptp, PG_RW);
152 }
153 s = splvm();
154 PTE_UPDATES_FLUSH();
155
156 xpq_queue_set_ldt(base, entries);
157 xpq_flush_queue();
158 splx(s);
159 }
160
161 #ifdef XENDEBUG
162 void xpq_debug_dump(void);
163 #endif
164
165 #define XPQUEUE_SIZE 2048
166 static mmu_update_t xpq_queue[XPQUEUE_SIZE];
167 static int xpq_idx = 0;
168
169 void
170 xpq_flush_queue()
171 {
172 int i, ok;
173
174 XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
175 for (i = 0; i < xpq_idx; i++)
176 XENPRINTK2(("%d: %p %08x\n", i, (u_int)xpq_queue[i].ptr,
177 (u_int)xpq_queue[i].val));
178 if (xpq_idx != 0 &&
179 HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok) < 0)
180 panic("HYPERVISOR_mmu_update failed\n");
181 xpq_idx = 0;
182 }
183
184 static inline void
185 xpq_increment_idx(void)
186 {
187
188 xpq_idx++;
189 if (__predict_false(xpq_idx == XPQUEUE_SIZE))
190 xpq_flush_queue();
191 }
192
193 void
194 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
195 {
196 XENPRINTK2(("xpq_queue_machphys_update ma=%p pa=%p\n", (void *)ma, (void *)pa));
197 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
198 xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
199 xpq_increment_idx();
200 #ifdef XENDEBUG_SYNC
201 xpq_flush_queue();
202 #endif
203 }
204
205 void
206 xpq_queue_pde_update(pd_entry_t *ptr, pd_entry_t val)
207 {
208
209 KASSERT(((paddr_t)ptr & 3) == 0);
210 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
211 xpq_queue[xpq_idx].val = val;
212 xpq_increment_idx();
213 #ifdef XENDEBUG_SYNC
214 xpq_flush_queue();
215 #endif
216 }
217
218 void
219 xpq_queue_pte_update(pt_entry_t *ptr, pt_entry_t val)
220 {
221
222 KASSERT(((paddr_t)ptr & 3) == 0);
223 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
224 xpq_queue[xpq_idx].val = val;
225 xpq_increment_idx();
226 #ifdef XENDEBUG_SYNC
227 xpq_flush_queue();
228 #endif
229 }
230
231 #ifdef XEN3
232 void
233 xpq_queue_pt_switch(paddr_t pa)
234 {
235 struct mmuext_op op;
236 xpq_flush_queue();
237
238 XENPRINTK2(("xpq_queue_pt_switch: %p %p\n", (void *)pa, (void *)pa));
239 op.cmd = MMUEXT_NEW_BASEPTR;
240 op.arg1.mfn = pa >> PAGE_SHIFT;
241 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
242 panic("xpq_queue_pt_switch");
243 }
244
245 void
246 xpq_queue_pin_table(paddr_t pa)
247 {
248 struct mmuext_op op;
249 xpq_flush_queue();
250
251 XENPRINTK2(("xpq_queue_pin_table: %p %p\n", (void *)pa, (void *)pa));
252 op.arg1.mfn = pa >> PAGE_SHIFT;
253
254 #ifdef __x86_64__
255 op.cmd = MMUEXT_PIN_L4_TABLE;
256 #else
257 op.cmd = MMUEXT_PIN_L2_TABLE;
258 #endif
259 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
260 panic("xpq_queue_pin_table");
261 }
262
263 void
264 xpq_queue_unpin_table(paddr_t pa)
265 {
266 struct mmuext_op op;
267 xpq_flush_queue();
268
269 XENPRINTK2(("xpq_queue_unpin_table: %p %p\n", (void *)pa, (void *)pa));
270 op.arg1.mfn = pa >> PAGE_SHIFT;
271 op.cmd = MMUEXT_UNPIN_TABLE;
272 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
273 panic("xpq_queue_unpin_table");
274 }
275
276 void
277 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
278 {
279 struct mmuext_op op;
280 xpq_flush_queue();
281
282 XENPRINTK2(("xpq_queue_set_ldt\n"));
283 KASSERT(va == (va & ~PAGE_MASK));
284 op.cmd = MMUEXT_SET_LDT;
285 op.arg1.linear_addr = va;
286 op.arg2.nr_ents = entries;
287 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
288 panic("xpq_queue_set_ldt");
289 }
290
291 void
292 xpq_queue_tlb_flush()
293 {
294 struct mmuext_op op;
295 xpq_flush_queue();
296
297 XENPRINTK2(("xpq_queue_tlb_flush\n"));
298 op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
299 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
300 panic("xpq_queue_tlb_flush");
301 }
302
303 void
304 xpq_flush_cache()
305 {
306 struct mmuext_op op;
307 int s = splvm();
308 xpq_flush_queue();
309
310 XENPRINTK2(("xpq_queue_flush_cache\n"));
311 op.cmd = MMUEXT_FLUSH_CACHE;
312 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
313 panic("xpq_flush_cache");
314 splx(s);
315 }
316
317 void
318 xpq_queue_invlpg(vaddr_t va)
319 {
320 struct mmuext_op op;
321 xpq_flush_queue();
322
323 XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
324 op.cmd = MMUEXT_INVLPG_LOCAL;
325 op.arg1.linear_addr = (va & ~PAGE_MASK);
326 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
327 panic("xpq_queue_invlpg");
328 }
329
330 int
331 xpq_update_foreign(pt_entry_t *ptr, pt_entry_t val, int dom)
332 {
333 mmu_update_t op;
334 int ok;
335 xpq_flush_queue();
336
337 op.ptr = (paddr_t)ptr;
338 op.val = val;
339 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
340 return EFAULT;
341 return (0);
342 }
343 #else /* XEN3 */
344 void
345 xpq_queue_pt_switch(paddr_t pa)
346 {
347
348 XENPRINTK2(("xpq_queue_pt_switch: %p %p\n", (void *)pa, (void *)pa));
349 xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
350 xpq_queue[xpq_idx].val = MMUEXT_NEW_BASEPTR;
351 xpq_increment_idx();
352 }
353
354 void
355 xpq_queue_pin_table(paddr_t pa)
356 {
357
358 XENPRINTK2(("xpq_queue_pin_table: %p %p\n", (void *)pa, (void *)pa));
359 xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
360 xpq_queue[xpq_idx].val = MMUEXT_PIN_L2_TABLE;
361 xpq_increment_idx();
362 }
363
364 void
365 xpq_queue_unpin_table(paddr_t pa)
366 {
367
368 XENPRINTK2(("xpq_queue_unpin_table: %p %p\n", (void *)pa, (void *)pa));
369 xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
370 xpq_queue[xpq_idx].val = MMUEXT_UNPIN_TABLE;
371 xpq_increment_idx();
372 }
373
374 void
375 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
376 {
377
378 XENPRINTK2(("xpq_queue_set_ldt\n"));
379 KASSERT(va == (va & ~PAGE_MASK));
380 xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND | va;
381 xpq_queue[xpq_idx].val = MMUEXT_SET_LDT | (entries << MMUEXT_CMD_SHIFT);
382 xpq_increment_idx();
383 }
384
385 void
386 xpq_queue_tlb_flush()
387 {
388
389 XENPRINTK2(("xpq_queue_tlb_flush\n"));
390 xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
391 xpq_queue[xpq_idx].val = MMUEXT_TLB_FLUSH;
392 xpq_increment_idx();
393 }
394
395 void
396 xpq_flush_cache()
397 {
398 int s = splvm();
399
400 XENPRINTK2(("xpq_queue_flush_cache\n"));
401 xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
402 xpq_queue[xpq_idx].val = MMUEXT_FLUSH_CACHE;
403 xpq_increment_idx();
404 xpq_flush_queue();
405 splx(s);
406 }
407
408 void
409 xpq_queue_invlpg(vaddr_t va)
410 {
411
412 XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
413 xpq_queue[xpq_idx].ptr = (va & ~PAGE_MASK) | MMU_EXTENDED_COMMAND;
414 xpq_queue[xpq_idx].val = MMUEXT_INVLPG;
415 xpq_increment_idx();
416 }
417
418 int
419 xpq_update_foreign(pt_entry_t *ptr, pt_entry_t val, int dom)
420 {
421 mmu_update_t xpq_up[3];
422
423 xpq_up[0].ptr = MMU_EXTENDED_COMMAND;
424 xpq_up[0].val = MMUEXT_SET_FOREIGNDOM | (dom << 16);
425 xpq_up[1].ptr = (paddr_t)ptr;
426 xpq_up[1].val = val;
427 if (HYPERVISOR_mmu_update_self(xpq_up, 2, NULL) < 0)
428 return EFAULT;
429 return (0);
430 }
431 #endif /* XEN3 */
432
433 #ifdef XENDEBUG
434 void
435 xpq_debug_dump()
436 {
437 int i;
438
439 XENPRINTK2(("idx: %d\n", xpq_idx));
440 for (i = 0; i < xpq_idx; i++) {
441 sprintf(XBUF, "%x %08x ", (u_int)xpq_queue[i].ptr,
442 (u_int)xpq_queue[i].val);
443 if (++i < xpq_idx)
444 sprintf(XBUF + strlen(XBUF), "%x %08x ",
445 (u_int)xpq_queue[i].ptr, (u_int)xpq_queue[i].val);
446 if (++i < xpq_idx)
447 sprintf(XBUF + strlen(XBUF), "%x %08x ",
448 (u_int)xpq_queue[i].ptr, (u_int)xpq_queue[i].val);
449 if (++i < xpq_idx)
450 sprintf(XBUF + strlen(XBUF), "%x %08x ",
451 (u_int)xpq_queue[i].ptr, (u_int)xpq_queue[i].val);
452 XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
453 }
454 }
455 #endif
456