x86_xpmap.c revision 1.12.4.4 1 1.12.4.4 jym /* $NetBSD: x86_xpmap.c,v 1.12.4.4 2009/07/23 23:31:37 jym Exp $ */
2 1.2 bouyer
3 1.2 bouyer /*
4 1.2 bouyer * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
5 1.2 bouyer *
6 1.2 bouyer * Permission to use, copy, modify, and distribute this software for any
7 1.2 bouyer * purpose with or without fee is hereby granted, provided that the above
8 1.2 bouyer * copyright notice and this permission notice appear in all copies.
9 1.2 bouyer *
10 1.2 bouyer * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 1.2 bouyer * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 1.2 bouyer * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 1.2 bouyer * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 1.2 bouyer * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 1.2 bouyer * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 1.2 bouyer * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 1.2 bouyer */
18 1.2 bouyer
19 1.2 bouyer /*
20 1.2 bouyer * Copyright (c) 2006, 2007 Manuel Bouyer.
21 1.2 bouyer *
22 1.2 bouyer * Redistribution and use in source and binary forms, with or without
23 1.2 bouyer * modification, are permitted provided that the following conditions
24 1.2 bouyer * are met:
25 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
26 1.2 bouyer * notice, this list of conditions and the following disclaimer.
27 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
28 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
29 1.2 bouyer * documentation and/or other materials provided with the distribution.
30 1.2 bouyer * 3. All advertising materials mentioning features or use of this software
31 1.2 bouyer * must display the following acknowledgement:
32 1.2 bouyer * This product includes software developed by Manuel Bouyer.
33 1.2 bouyer * 4. The name of the author may not be used to endorse or promote products
34 1.2 bouyer * derived from this software without specific prior written permission.
35 1.2 bouyer *
36 1.2 bouyer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
37 1.2 bouyer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
38 1.2 bouyer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
39 1.2 bouyer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
40 1.2 bouyer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 1.2 bouyer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 1.2 bouyer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 1.2 bouyer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 1.2 bouyer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
45 1.2 bouyer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 1.2 bouyer *
47 1.2 bouyer */
48 1.2 bouyer
49 1.2 bouyer /*
50 1.2 bouyer *
51 1.2 bouyer * Copyright (c) 2004 Christian Limpach.
52 1.2 bouyer * All rights reserved.
53 1.2 bouyer *
54 1.2 bouyer * Redistribution and use in source and binary forms, with or without
55 1.2 bouyer * modification, are permitted provided that the following conditions
56 1.2 bouyer * are met:
57 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
58 1.2 bouyer * notice, this list of conditions and the following disclaimer.
59 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
60 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
61 1.2 bouyer * documentation and/or other materials provided with the distribution.
62 1.2 bouyer * 3. All advertising materials mentioning features or use of this software
63 1.2 bouyer * must display the following acknowledgement:
64 1.2 bouyer * This product includes software developed by Christian Limpach.
65 1.2 bouyer * 4. The name of the author may not be used to endorse or promote products
66 1.2 bouyer * derived from this software without specific prior written permission.
67 1.2 bouyer *
68 1.2 bouyer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
69 1.2 bouyer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
70 1.2 bouyer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
71 1.2 bouyer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
72 1.2 bouyer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
73 1.2 bouyer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
74 1.2 bouyer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
75 1.2 bouyer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
76 1.2 bouyer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
77 1.2 bouyer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
78 1.2 bouyer */
79 1.2 bouyer
80 1.2 bouyer
81 1.2 bouyer #include <sys/cdefs.h>
82 1.12.4.4 jym __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.12.4.4 2009/07/23 23:31:37 jym Exp $");
83 1.2 bouyer
84 1.2 bouyer #include "opt_xen.h"
85 1.4 bouyer #include "opt_ddb.h"
86 1.4 bouyer #include "ksyms.h"
87 1.2 bouyer
88 1.2 bouyer #include <sys/param.h>
89 1.2 bouyer #include <sys/systm.h>
90 1.12.4.1 jym #include <sys/rwlock.h>
91 1.2 bouyer
92 1.2 bouyer #include <uvm/uvm.h>
93 1.2 bouyer
94 1.2 bouyer #include <machine/pmap.h>
95 1.2 bouyer #include <machine/gdt.h>
96 1.2 bouyer #include <xen/xenfunc.h>
97 1.2 bouyer
98 1.2 bouyer #include <dev/isa/isareg.h>
99 1.2 bouyer #include <machine/isa_machdep.h>
100 1.2 bouyer
101 1.2 bouyer #undef XENDEBUG
102 1.2 bouyer /* #define XENDEBUG_SYNC */
103 1.2 bouyer /* #define XENDEBUG_LOW */
104 1.2 bouyer
105 1.2 bouyer #ifdef XENDEBUG
106 1.2 bouyer #define XENPRINTF(x) printf x
107 1.2 bouyer #define XENPRINTK(x) printk x
108 1.2 bouyer #define XENPRINTK2(x) /* printk x */
109 1.2 bouyer
110 1.2 bouyer static char XBUF[256];
111 1.2 bouyer #else
112 1.2 bouyer #define XENPRINTF(x)
113 1.2 bouyer #define XENPRINTK(x)
114 1.2 bouyer #define XENPRINTK2(x)
115 1.2 bouyer #endif
116 1.2 bouyer #define PRINTF(x) printf x
117 1.2 bouyer #define PRINTK(x) printk x
118 1.2 bouyer
119 1.4 bouyer /* on x86_64 kernel runs in ring 3 */
120 1.4 bouyer #ifdef __x86_64__
121 1.4 bouyer #define PG_k PG_u
122 1.4 bouyer #else
123 1.4 bouyer #define PG_k 0
124 1.4 bouyer #endif
125 1.4 bouyer
126 1.2 bouyer volatile shared_info_t *HYPERVISOR_shared_info;
127 1.11 jym /* Xen requires the start_info struct to be page aligned */
128 1.11 jym union start_info_union start_info_union __aligned(PAGE_SIZE);
129 1.6 bouyer unsigned long *xpmap_phys_to_machine_mapping;
130 1.2 bouyer
131 1.12.4.1 jym /*
132 1.12.4.1 jym * We should avoid the domU to manipulate MFNs when it is suspending
133 1.12.4.1 jym * or migrating, as they could be invalid once domU resumes operations.
134 1.12.4.1 jym *
135 1.12.4.1 jym * We use a read/write lock for that: when a thread is expected to
136 1.12.4.1 jym * manipulate MFNs, it should first acquire a reader lock, then proceed
137 1.12.4.1 jym * to MFN's manipulation. Once it has finished with it, the reader lock is
138 1.12.4.1 jym * released.
139 1.12.4.1 jym *
140 1.12.4.1 jym * The thread responsible for the domU suspension will acquire an exclusive
141 1.12.4.1 jym * (writer) lock.
142 1.12.4.3 jym *
143 1.12.4.3 jym * XXX JYM the locking will need revisit - rwlock(9) is currently inadequate
144 1.12.4.1 jym */
145 1.12.4.1 jym static krwlock_t xen_ptom_lock;
146 1.12.4.1 jym
147 1.12.4.1 jym void
148 1.12.4.1 jym xen_init_ptom_lock(void) {
149 1.12.4.1 jym rw_init(&xen_ptom_lock);
150 1.12.4.1 jym }
151 1.12.4.1 jym
152 1.12.4.1 jym void
153 1.12.4.1 jym xen_release_ptom_lock(void) {
154 1.12.4.3 jym /* rw_exit(&xen_ptom_lock); */
155 1.12.4.1 jym }
156 1.12.4.1 jym
157 1.12.4.1 jym void
158 1.12.4.1 jym xen_acquire_reader_ptom_lock(void) {
159 1.12.4.3 jym /* rw_enter(&xen_ptom_lock, RW_READER); */
160 1.12.4.1 jym }
161 1.12.4.1 jym
162 1.12.4.1 jym void
163 1.12.4.1 jym xen_acquire_writer_ptom_lock(void) {
164 1.12.4.3 jym /* rw_enter(&xen_ptom_lock, RW_WRITER); */
165 1.12.4.1 jym }
166 1.12.4.1 jym
167 1.2 bouyer void xen_failsafe_handler(void);
168 1.2 bouyer
169 1.2 bouyer #ifdef XEN3
170 1.2 bouyer #define HYPERVISOR_mmu_update_self(req, count, success_count) \
171 1.2 bouyer HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
172 1.2 bouyer #else
173 1.2 bouyer #define HYPERVISOR_mmu_update_self(req, count, success_count) \
174 1.2 bouyer HYPERVISOR_mmu_update((req), (count), (success_count))
175 1.2 bouyer #endif
176 1.2 bouyer
177 1.2 bouyer void
178 1.2 bouyer xen_failsafe_handler(void)
179 1.2 bouyer {
180 1.2 bouyer
181 1.2 bouyer panic("xen_failsafe_handler called!\n");
182 1.2 bouyer }
183 1.2 bouyer
184 1.2 bouyer
185 1.2 bouyer void
186 1.2 bouyer xen_set_ldt(vaddr_t base, uint32_t entries)
187 1.2 bouyer {
188 1.2 bouyer vaddr_t va;
189 1.2 bouyer vaddr_t end;
190 1.4 bouyer pt_entry_t *ptp;
191 1.2 bouyer int s;
192 1.2 bouyer
193 1.2 bouyer #ifdef __x86_64__
194 1.2 bouyer end = base + (entries << 3);
195 1.2 bouyer #else
196 1.2 bouyer end = base + entries * sizeof(union descriptor);
197 1.2 bouyer #endif
198 1.2 bouyer
199 1.12.4.1 jym #ifdef XEN3
200 1.12.4.1 jym xen_acquire_reader_ptom_lock();
201 1.12.4.1 jym #endif
202 1.12.4.1 jym
203 1.2 bouyer for (va = base; va < end; va += PAGE_SIZE) {
204 1.2 bouyer KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
205 1.2 bouyer ptp = kvtopte(va);
206 1.5 bouyer XENPRINTF(("xen_set_ldt %p %d %p\n", (void *)base,
207 1.5 bouyer entries, ptp));
208 1.4 bouyer pmap_pte_clearbits(ptp, PG_RW);
209 1.2 bouyer }
210 1.2 bouyer s = splvm();
211 1.2 bouyer xpq_queue_set_ldt(base, entries);
212 1.2 bouyer xpq_flush_queue();
213 1.12.4.1 jym
214 1.12.4.1 jym #ifdef XEN3
215 1.12.4.1 jym xen_release_ptom_lock();
216 1.12.4.1 jym #endif
217 1.12.4.1 jym
218 1.2 bouyer splx(s);
219 1.2 bouyer }
220 1.2 bouyer
221 1.2 bouyer #ifdef XENDEBUG
222 1.2 bouyer void xpq_debug_dump(void);
223 1.2 bouyer #endif
224 1.2 bouyer
225 1.2 bouyer #define XPQUEUE_SIZE 2048
226 1.2 bouyer static mmu_update_t xpq_queue[XPQUEUE_SIZE];
227 1.2 bouyer static int xpq_idx = 0;
228 1.2 bouyer
229 1.2 bouyer void
230 1.8 cegger xpq_flush_queue(void)
231 1.2 bouyer {
232 1.2 bouyer int i, ok;
233 1.2 bouyer
234 1.2 bouyer XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
235 1.2 bouyer for (i = 0; i < xpq_idx; i++)
236 1.6 bouyer XENPRINTK2(("%d: %p %08" PRIx64 "\n", i,
237 1.8 cegger (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val));
238 1.2 bouyer if (xpq_idx != 0 &&
239 1.2 bouyer HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok) < 0) {
240 1.2 bouyer printf("xpq_flush_queue: %d entries \n", xpq_idx);
241 1.2 bouyer for (i = 0; i < xpq_idx; i++)
242 1.3 bouyer printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
243 1.8 cegger (uint64_t)xpq_queue[i].ptr,
244 1.8 cegger (uint64_t)xpq_queue[i].val);
245 1.2 bouyer panic("HYPERVISOR_mmu_update failed\n");
246 1.2 bouyer }
247 1.2 bouyer xpq_idx = 0;
248 1.2 bouyer }
249 1.2 bouyer
250 1.2 bouyer static inline void
251 1.2 bouyer xpq_increment_idx(void)
252 1.2 bouyer {
253 1.2 bouyer
254 1.2 bouyer xpq_idx++;
255 1.2 bouyer if (__predict_false(xpq_idx == XPQUEUE_SIZE))
256 1.2 bouyer xpq_flush_queue();
257 1.2 bouyer }
258 1.2 bouyer
259 1.2 bouyer void
260 1.2 bouyer xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
261 1.2 bouyer {
262 1.6 bouyer XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
263 1.6 bouyer "\n", (int64_t)ma, (int64_t)pa));
264 1.2 bouyer xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
265 1.2 bouyer xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
266 1.2 bouyer xpq_increment_idx();
267 1.2 bouyer #ifdef XENDEBUG_SYNC
268 1.2 bouyer xpq_flush_queue();
269 1.2 bouyer #endif
270 1.2 bouyer }
271 1.2 bouyer
272 1.2 bouyer void
273 1.6 bouyer xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
274 1.2 bouyer {
275 1.2 bouyer
276 1.6 bouyer KASSERT((ptr & 3) == 0);
277 1.2 bouyer xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
278 1.2 bouyer xpq_queue[xpq_idx].val = val;
279 1.2 bouyer xpq_increment_idx();
280 1.2 bouyer #ifdef XENDEBUG_SYNC
281 1.2 bouyer xpq_flush_queue();
282 1.2 bouyer #endif
283 1.2 bouyer }
284 1.2 bouyer
285 1.2 bouyer #ifdef XEN3
286 1.2 bouyer void
287 1.2 bouyer xpq_queue_pt_switch(paddr_t pa)
288 1.2 bouyer {
289 1.2 bouyer struct mmuext_op op;
290 1.2 bouyer xpq_flush_queue();
291 1.2 bouyer
292 1.6 bouyer XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
293 1.6 bouyer (int64_t)pa, (int64_t)pa));
294 1.2 bouyer op.cmd = MMUEXT_NEW_BASEPTR;
295 1.2 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
296 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
297 1.2 bouyer panic("xpq_queue_pt_switch");
298 1.2 bouyer }
299 1.2 bouyer
300 1.2 bouyer void
301 1.2 bouyer xpq_queue_pin_table(paddr_t pa)
302 1.2 bouyer {
303 1.2 bouyer struct mmuext_op op;
304 1.2 bouyer xpq_flush_queue();
305 1.2 bouyer
306 1.6 bouyer XENPRINTK2(("xpq_queue_pin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
307 1.6 bouyer (int64_t)pa, (int64_t)pa));
308 1.2 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
309 1.2 bouyer
310 1.6 bouyer #if defined(__x86_64__)
311 1.2 bouyer op.cmd = MMUEXT_PIN_L4_TABLE;
312 1.2 bouyer #else
313 1.2 bouyer op.cmd = MMUEXT_PIN_L2_TABLE;
314 1.2 bouyer #endif
315 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
316 1.2 bouyer panic("xpq_queue_pin_table");
317 1.2 bouyer }
318 1.2 bouyer
319 1.6 bouyer #ifdef PAE
320 1.6 bouyer static void
321 1.6 bouyer xpq_queue_pin_l3_table(paddr_t pa)
322 1.6 bouyer {
323 1.6 bouyer struct mmuext_op op;
324 1.6 bouyer xpq_flush_queue();
325 1.6 bouyer
326 1.6 bouyer XENPRINTK2(("xpq_queue_pin_l2_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
327 1.6 bouyer (int64_t)pa, (int64_t)pa));
328 1.6 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
329 1.6 bouyer
330 1.6 bouyer op.cmd = MMUEXT_PIN_L3_TABLE;
331 1.6 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
332 1.6 bouyer panic("xpq_queue_pin_table");
333 1.6 bouyer }
334 1.6 bouyer #endif
335 1.6 bouyer
336 1.2 bouyer void
337 1.2 bouyer xpq_queue_unpin_table(paddr_t pa)
338 1.2 bouyer {
339 1.2 bouyer struct mmuext_op op;
340 1.2 bouyer xpq_flush_queue();
341 1.2 bouyer
342 1.6 bouyer XENPRINTK2(("xpq_queue_unpin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
343 1.6 bouyer (int64_t)pa, (int64_t)pa));
344 1.2 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
345 1.2 bouyer op.cmd = MMUEXT_UNPIN_TABLE;
346 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
347 1.2 bouyer panic("xpq_queue_unpin_table");
348 1.2 bouyer }
349 1.2 bouyer
350 1.2 bouyer void
351 1.2 bouyer xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
352 1.2 bouyer {
353 1.2 bouyer struct mmuext_op op;
354 1.2 bouyer xpq_flush_queue();
355 1.2 bouyer
356 1.2 bouyer XENPRINTK2(("xpq_queue_set_ldt\n"));
357 1.2 bouyer KASSERT(va == (va & ~PAGE_MASK));
358 1.2 bouyer op.cmd = MMUEXT_SET_LDT;
359 1.2 bouyer op.arg1.linear_addr = va;
360 1.2 bouyer op.arg2.nr_ents = entries;
361 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
362 1.2 bouyer panic("xpq_queue_set_ldt");
363 1.2 bouyer }
364 1.2 bouyer
365 1.2 bouyer void
366 1.8 cegger xpq_queue_tlb_flush(void)
367 1.2 bouyer {
368 1.2 bouyer struct mmuext_op op;
369 1.2 bouyer xpq_flush_queue();
370 1.2 bouyer
371 1.2 bouyer XENPRINTK2(("xpq_queue_tlb_flush\n"));
372 1.2 bouyer op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
373 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
374 1.2 bouyer panic("xpq_queue_tlb_flush");
375 1.2 bouyer }
376 1.2 bouyer
377 1.2 bouyer void
378 1.8 cegger xpq_flush_cache(void)
379 1.2 bouyer {
380 1.2 bouyer struct mmuext_op op;
381 1.2 bouyer int s = splvm();
382 1.2 bouyer xpq_flush_queue();
383 1.2 bouyer
384 1.2 bouyer XENPRINTK2(("xpq_queue_flush_cache\n"));
385 1.2 bouyer op.cmd = MMUEXT_FLUSH_CACHE;
386 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
387 1.2 bouyer panic("xpq_flush_cache");
388 1.2 bouyer splx(s);
389 1.2 bouyer }
390 1.2 bouyer
391 1.2 bouyer void
392 1.2 bouyer xpq_queue_invlpg(vaddr_t va)
393 1.2 bouyer {
394 1.2 bouyer struct mmuext_op op;
395 1.2 bouyer xpq_flush_queue();
396 1.2 bouyer
397 1.2 bouyer XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
398 1.2 bouyer op.cmd = MMUEXT_INVLPG_LOCAL;
399 1.2 bouyer op.arg1.linear_addr = (va & ~PAGE_MASK);
400 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
401 1.2 bouyer panic("xpq_queue_invlpg");
402 1.2 bouyer }
403 1.2 bouyer
404 1.2 bouyer int
405 1.6 bouyer xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
406 1.2 bouyer {
407 1.2 bouyer mmu_update_t op;
408 1.2 bouyer int ok;
409 1.2 bouyer xpq_flush_queue();
410 1.2 bouyer
411 1.6 bouyer op.ptr = ptr;
412 1.2 bouyer op.val = val;
413 1.2 bouyer if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
414 1.2 bouyer return EFAULT;
415 1.2 bouyer return (0);
416 1.2 bouyer }
417 1.2 bouyer #else /* XEN3 */
418 1.2 bouyer void
419 1.2 bouyer xpq_queue_pt_switch(paddr_t pa)
420 1.2 bouyer {
421 1.2 bouyer
422 1.2 bouyer XENPRINTK2(("xpq_queue_pt_switch: %p %p\n", (void *)pa, (void *)pa));
423 1.2 bouyer xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
424 1.2 bouyer xpq_queue[xpq_idx].val = MMUEXT_NEW_BASEPTR;
425 1.2 bouyer xpq_increment_idx();
426 1.2 bouyer }
427 1.2 bouyer
428 1.2 bouyer void
429 1.2 bouyer xpq_queue_pin_table(paddr_t pa)
430 1.2 bouyer {
431 1.2 bouyer
432 1.2 bouyer XENPRINTK2(("xpq_queue_pin_table: %p %p\n", (void *)pa, (void *)pa));
433 1.2 bouyer xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
434 1.2 bouyer xpq_queue[xpq_idx].val = MMUEXT_PIN_L2_TABLE;
435 1.2 bouyer xpq_increment_idx();
436 1.2 bouyer }
437 1.2 bouyer
438 1.2 bouyer void
439 1.2 bouyer xpq_queue_unpin_table(paddr_t pa)
440 1.2 bouyer {
441 1.2 bouyer
442 1.2 bouyer XENPRINTK2(("xpq_queue_unpin_table: %p %p\n", (void *)pa, (void *)pa));
443 1.2 bouyer xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
444 1.2 bouyer xpq_queue[xpq_idx].val = MMUEXT_UNPIN_TABLE;
445 1.2 bouyer xpq_increment_idx();
446 1.2 bouyer }
447 1.2 bouyer
448 1.2 bouyer void
449 1.2 bouyer xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
450 1.2 bouyer {
451 1.2 bouyer
452 1.2 bouyer XENPRINTK2(("xpq_queue_set_ldt\n"));
453 1.2 bouyer KASSERT(va == (va & ~PAGE_MASK));
454 1.2 bouyer xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND | va;
455 1.2 bouyer xpq_queue[xpq_idx].val = MMUEXT_SET_LDT | (entries << MMUEXT_CMD_SHIFT);
456 1.2 bouyer xpq_increment_idx();
457 1.2 bouyer }
458 1.2 bouyer
459 1.2 bouyer void
460 1.8 cegger xpq_queue_tlb_flush(void)
461 1.2 bouyer {
462 1.2 bouyer
463 1.2 bouyer XENPRINTK2(("xpq_queue_tlb_flush\n"));
464 1.2 bouyer xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
465 1.2 bouyer xpq_queue[xpq_idx].val = MMUEXT_TLB_FLUSH;
466 1.2 bouyer xpq_increment_idx();
467 1.2 bouyer }
468 1.2 bouyer
469 1.2 bouyer void
470 1.8 cegger xpq_flush_cache(void)
471 1.2 bouyer {
472 1.2 bouyer int s = splvm();
473 1.2 bouyer
474 1.2 bouyer XENPRINTK2(("xpq_queue_flush_cache\n"));
475 1.2 bouyer xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
476 1.2 bouyer xpq_queue[xpq_idx].val = MMUEXT_FLUSH_CACHE;
477 1.2 bouyer xpq_increment_idx();
478 1.2 bouyer xpq_flush_queue();
479 1.2 bouyer splx(s);
480 1.2 bouyer }
481 1.2 bouyer
482 1.2 bouyer void
483 1.2 bouyer xpq_queue_invlpg(vaddr_t va)
484 1.2 bouyer {
485 1.2 bouyer
486 1.2 bouyer XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
487 1.2 bouyer xpq_queue[xpq_idx].ptr = (va & ~PAGE_MASK) | MMU_EXTENDED_COMMAND;
488 1.2 bouyer xpq_queue[xpq_idx].val = MMUEXT_INVLPG;
489 1.2 bouyer xpq_increment_idx();
490 1.2 bouyer }
491 1.2 bouyer
492 1.2 bouyer int
493 1.6 bouyer xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
494 1.2 bouyer {
495 1.2 bouyer mmu_update_t xpq_up[3];
496 1.2 bouyer
497 1.2 bouyer xpq_up[0].ptr = MMU_EXTENDED_COMMAND;
498 1.2 bouyer xpq_up[0].val = MMUEXT_SET_FOREIGNDOM | (dom << 16);
499 1.6 bouyer xpq_up[1].ptr = ptr;
500 1.2 bouyer xpq_up[1].val = val;
501 1.2 bouyer if (HYPERVISOR_mmu_update_self(xpq_up, 2, NULL) < 0)
502 1.2 bouyer return EFAULT;
503 1.2 bouyer return (0);
504 1.2 bouyer }
505 1.2 bouyer #endif /* XEN3 */
506 1.2 bouyer
507 1.2 bouyer #ifdef XENDEBUG
508 1.2 bouyer void
509 1.8 cegger xpq_debug_dump(void)
510 1.2 bouyer {
511 1.2 bouyer int i;
512 1.2 bouyer
513 1.2 bouyer XENPRINTK2(("idx: %d\n", xpq_idx));
514 1.2 bouyer for (i = 0; i < xpq_idx; i++) {
515 1.12.4.4 jym snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
516 1.8 cegger (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val);
517 1.2 bouyer if (++i < xpq_idx)
518 1.12.4.4 jym snprintf(XBUF + strlen(XBUF),
519 1.12.4.4 jym sizeof(XBUF) - strlen(XBUF),
520 1.12.4.4 jym "%" PRIx64 " %08" PRIx64,
521 1.12.4.4 jym (uint64_t)xpq_queue[i].ptr,
522 1.12.4.4 jym (uint64_t)xpq_queue[i].val);
523 1.2 bouyer if (++i < xpq_idx)
524 1.12.4.4 jym snprintf(XBUF + strlen(XBUF),
525 1.12.4.4 jym sizeof(XBUF) - strlen(XBUF),
526 1.12.4.4 jym "%" PRIx64 " %08" PRIx64,
527 1.12.4.4 jym (uint64_t)xpq_queue[i].ptr,
528 1.12.4.4 jym (uint64_t)xpq_queue[i].val);
529 1.2 bouyer if (++i < xpq_idx)
530 1.12.4.4 jym snprintf(XBUF + strlen(XBUF),
531 1.12.4.4 jym sizeof(XBUF) - strlen(XBUF),
532 1.12.4.4 jym "%" PRIx64 " %08" PRIx64,
533 1.12.4.4 jym (uint64_t)xpq_queue[i].ptr,
534 1.12.4.4 jym (uint64_t)xpq_queue[i].val);
535 1.2 bouyer XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
536 1.2 bouyer }
537 1.2 bouyer }
538 1.2 bouyer #endif
539 1.2 bouyer
540 1.2 bouyer
541 1.2 bouyer extern volatile struct xencons_interface *xencons_interface; /* XXX */
542 1.2 bouyer extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
543 1.2 bouyer
544 1.2 bouyer static void xen_bt_set_readonly (vaddr_t);
545 1.2 bouyer static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
546 1.2 bouyer
547 1.2 bouyer /* How many PDEs ? */
548 1.2 bouyer #if L2_SLOT_KERNBASE > 0
549 1.2 bouyer #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
550 1.2 bouyer #else
551 1.2 bouyer #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
552 1.2 bouyer #endif
553 1.2 bouyer
554 1.2 bouyer /*
555 1.2 bouyer * Construct and switch to new pagetables
556 1.2 bouyer * first_avail is the first vaddr we can use after
557 1.2 bouyer * we get rid of Xen pagetables
558 1.2 bouyer */
559 1.2 bouyer
560 1.2 bouyer vaddr_t xen_pmap_bootstrap (void);
561 1.2 bouyer
562 1.2 bouyer /*
563 1.2 bouyer * Function to get rid of Xen bootstrap tables
564 1.2 bouyer */
565 1.2 bouyer
566 1.6 bouyer /* How many PDP do we need: */
567 1.6 bouyer #ifdef PAE
568 1.6 bouyer /*
569 1.6 bouyer * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
570 1.6 bouyer * all of them mapped by the L3 page. We also need a shadow page
571 1.6 bouyer * for L3[3].
572 1.6 bouyer */
573 1.6 bouyer static const int l2_4_count = 6;
574 1.6 bouyer #else
575 1.6 bouyer static const int l2_4_count = PTP_LEVELS - 1;
576 1.6 bouyer #endif
577 1.6 bouyer
578 1.2 bouyer vaddr_t
579 1.8 cegger xen_pmap_bootstrap(void)
580 1.2 bouyer {
581 1.4 bouyer int count, oldcount;
582 1.4 bouyer long mapsize;
583 1.2 bouyer vaddr_t bootstrap_tables, init_tables;
584 1.2 bouyer
585 1.6 bouyer xpmap_phys_to_machine_mapping =
586 1.6 bouyer (unsigned long *)xen_start_info.mfn_list;
587 1.2 bouyer init_tables = xen_start_info.pt_base;
588 1.2 bouyer __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
589 1.2 bouyer
590 1.2 bouyer /* Space after Xen boostrap tables should be free */
591 1.2 bouyer bootstrap_tables = xen_start_info.pt_base +
592 1.2 bouyer (xen_start_info.nr_pt_frames * PAGE_SIZE);
593 1.2 bouyer
594 1.4 bouyer /*
595 1.4 bouyer * Calculate how many space we need
596 1.4 bouyer * first everything mapped before the Xen bootstrap tables
597 1.4 bouyer */
598 1.4 bouyer mapsize = init_tables - KERNTEXTOFF;
599 1.4 bouyer /* after the tables we'll have:
600 1.4 bouyer * - UAREA
601 1.4 bouyer * - dummy user PGD (x86_64)
602 1.4 bouyer * - HYPERVISOR_shared_info
603 1.4 bouyer * - ISA I/O mem (if needed)
604 1.4 bouyer */
605 1.4 bouyer mapsize += UPAGES * NBPG;
606 1.4 bouyer #ifdef __x86_64__
607 1.4 bouyer mapsize += NBPG;
608 1.4 bouyer #endif
609 1.4 bouyer mapsize += NBPG;
610 1.2 bouyer
611 1.2 bouyer #ifdef DOM0OPS
612 1.10 cegger if (xendomain_is_dom0()) {
613 1.2 bouyer /* space for ISA I/O mem */
614 1.4 bouyer mapsize += IOM_SIZE;
615 1.4 bouyer }
616 1.4 bouyer #endif
617 1.4 bouyer /* at this point mapsize doens't include the table size */
618 1.4 bouyer
619 1.4 bouyer #ifdef __x86_64__
620 1.4 bouyer count = TABLE_L2_ENTRIES;
621 1.4 bouyer #else
622 1.4 bouyer count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
623 1.4 bouyer #endif /* __x86_64__ */
624 1.4 bouyer
625 1.4 bouyer /* now compute how many L2 pages we need exactly */
626 1.4 bouyer XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
627 1.4 bouyer while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
628 1.4 bouyer ((long)count << L2_SHIFT) + KERNBASE) {
629 1.4 bouyer count++;
630 1.2 bouyer }
631 1.4 bouyer #ifndef __x86_64__
632 1.5 bouyer /*
633 1.5 bouyer * one more L2 page: we'll alocate several pages after kva_start
634 1.5 bouyer * in pmap_bootstrap() before pmap_growkernel(), which have not been
635 1.5 bouyer * counted here. It's not a big issue to allocate one more L2 as
636 1.5 bouyer * pmap_growkernel() will be called anyway.
637 1.5 bouyer */
638 1.5 bouyer count++;
639 1.4 bouyer nkptp[1] = count;
640 1.2 bouyer #endif
641 1.2 bouyer
642 1.4 bouyer /*
643 1.4 bouyer * install bootstrap pages. We may need more L2 pages than will
644 1.4 bouyer * have the final table here, as it's installed after the final table
645 1.4 bouyer */
646 1.4 bouyer oldcount = count;
647 1.4 bouyer
648 1.4 bouyer bootstrap_again:
649 1.4 bouyer XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
650 1.2 bouyer /*
651 1.2 bouyer * Xen space we'll reclaim may not be enough for our new page tables,
652 1.2 bouyer * move bootstrap tables if necessary
653 1.2 bouyer */
654 1.4 bouyer if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
655 1.2 bouyer bootstrap_tables = init_tables +
656 1.4 bouyer ((count + l2_4_count) * PAGE_SIZE);
657 1.4 bouyer /* make sure we have enough to map the bootstrap_tables */
658 1.4 bouyer if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
659 1.4 bouyer ((long)oldcount << L2_SHIFT) + KERNBASE) {
660 1.4 bouyer oldcount++;
661 1.4 bouyer goto bootstrap_again;
662 1.4 bouyer }
663 1.2 bouyer
664 1.2 bouyer /* Create temporary tables */
665 1.2 bouyer xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
666 1.4 bouyer xen_start_info.nr_pt_frames, oldcount, 0);
667 1.2 bouyer
668 1.2 bouyer /* Create final tables */
669 1.2 bouyer xen_bootstrap_tables(bootstrap_tables, init_tables,
670 1.4 bouyer oldcount + l2_4_count, count, 1);
671 1.2 bouyer
672 1.4 bouyer /* zero out free space after tables */
673 1.4 bouyer memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
674 1.4 bouyer (UPAGES + 1) * NBPG);
675 1.4 bouyer return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
676 1.2 bouyer }
677 1.2 bouyer
678 1.2 bouyer
679 1.2 bouyer /*
680 1.2 bouyer * Build a new table and switch to it
681 1.2 bouyer * old_count is # of old tables (including PGD, PDTPE and PDE)
682 1.2 bouyer * new_count is # of new tables (PTE only)
683 1.2 bouyer * we assume areas don't overlap
684 1.2 bouyer */
685 1.2 bouyer
686 1.2 bouyer
687 1.2 bouyer static void
688 1.2 bouyer xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
689 1.2 bouyer int old_count, int new_count, int final)
690 1.2 bouyer {
691 1.2 bouyer pd_entry_t *pdtpe, *pde, *pte;
692 1.2 bouyer pd_entry_t *cur_pgd, *bt_pgd;
693 1.6 bouyer paddr_t addr;
694 1.6 bouyer vaddr_t page, avail, text_end, map_end;
695 1.2 bouyer int i;
696 1.2 bouyer extern char __data_start;
697 1.2 bouyer
698 1.2 bouyer __PRINTK(("xen_bootstrap_tables(0x%lx, 0x%lx, %d, %d)\n",
699 1.2 bouyer old_pgd, new_pgd, old_count, new_count));
700 1.2 bouyer text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
701 1.2 bouyer /*
702 1.2 bouyer * size of R/W area after kernel text:
703 1.2 bouyer * xencons_interface (if present)
704 1.2 bouyer * xenstore_interface (if present)
705 1.6 bouyer * table pages (new_count + l2_4_count entries)
706 1.2 bouyer * extra mappings (only when final is true):
707 1.4 bouyer * UAREA
708 1.4 bouyer * dummy user PGD (x86_64 only)/gdt page (i386 only)
709 1.2 bouyer * HYPERVISOR_shared_info
710 1.2 bouyer * ISA I/O mem (if needed)
711 1.2 bouyer */
712 1.6 bouyer map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
713 1.2 bouyer if (final) {
714 1.4 bouyer map_end += (UPAGES + 1) * NBPG;
715 1.4 bouyer HYPERVISOR_shared_info = (shared_info_t *)map_end;
716 1.2 bouyer map_end += NBPG;
717 1.2 bouyer }
718 1.4 bouyer /*
719 1.4 bouyer * we always set atdevbase, as it's used by init386 to find the first
720 1.4 bouyer * available VA. map_end is updated only if we are dom0, so
721 1.4 bouyer * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
722 1.4 bouyer * this case.
723 1.4 bouyer */
724 1.4 bouyer if (final)
725 1.4 bouyer atdevbase = map_end;
726 1.2 bouyer #ifdef DOM0OPS
727 1.10 cegger if (final && xendomain_is_dom0()) {
728 1.2 bouyer /* ISA I/O mem */
729 1.2 bouyer map_end += IOM_SIZE;
730 1.2 bouyer }
731 1.2 bouyer #endif /* DOM0OPS */
732 1.2 bouyer
733 1.2 bouyer __PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
734 1.2 bouyer text_end, map_end));
735 1.12 cegger __PRINTK(("console 0x%lx ", xen_start_info.console.domU.mfn));
736 1.7 bouyer __PRINTK(("xenstore 0x%lx\n", xen_start_info.store_mfn));
737 1.2 bouyer
738 1.2 bouyer /*
739 1.2 bouyer * Create bootstrap page tables
740 1.2 bouyer * What we need:
741 1.2 bouyer * - a PGD (level 4)
742 1.2 bouyer * - a PDTPE (level 3)
743 1.2 bouyer * - a PDE (level2)
744 1.2 bouyer * - some PTEs (level 1)
745 1.2 bouyer */
746 1.2 bouyer
747 1.2 bouyer cur_pgd = (pd_entry_t *) old_pgd;
748 1.2 bouyer bt_pgd = (pd_entry_t *) new_pgd;
749 1.2 bouyer memset (bt_pgd, 0, PAGE_SIZE);
750 1.2 bouyer avail = new_pgd + PAGE_SIZE;
751 1.4 bouyer #if PTP_LEVELS > 3
752 1.2 bouyer /* Install level 3 */
753 1.2 bouyer pdtpe = (pd_entry_t *) avail;
754 1.2 bouyer memset (pdtpe, 0, PAGE_SIZE);
755 1.2 bouyer avail += PAGE_SIZE;
756 1.2 bouyer
757 1.6 bouyer addr = ((u_long) pdtpe) - KERNBASE;
758 1.2 bouyer bt_pgd[pl4_pi(KERNTEXTOFF)] =
759 1.4 bouyer xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
760 1.2 bouyer
761 1.6 bouyer __PRINTK(("L3 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64 " -> L4[0x%x]\n",
762 1.8 cegger pdtpe, (uint64_t)addr, (uint64_t)bt_pgd[pl4_pi(KERNTEXTOFF)],
763 1.6 bouyer pl4_pi(KERNTEXTOFF)));
764 1.4 bouyer #else
765 1.4 bouyer pdtpe = bt_pgd;
766 1.4 bouyer #endif /* PTP_LEVELS > 3 */
767 1.2 bouyer
768 1.4 bouyer #if PTP_LEVELS > 2
769 1.2 bouyer /* Level 2 */
770 1.2 bouyer pde = (pd_entry_t *) avail;
771 1.2 bouyer memset(pde, 0, PAGE_SIZE);
772 1.2 bouyer avail += PAGE_SIZE;
773 1.2 bouyer
774 1.6 bouyer addr = ((u_long) pde) - KERNBASE;
775 1.2 bouyer pdtpe[pl3_pi(KERNTEXTOFF)] =
776 1.6 bouyer xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
777 1.6 bouyer __PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64 " -> L3[0x%x]\n",
778 1.6 bouyer pde, (int64_t)addr, (int64_t)pdtpe[pl3_pi(KERNTEXTOFF)],
779 1.6 bouyer pl3_pi(KERNTEXTOFF)));
780 1.6 bouyer #elif defined(PAE)
781 1.6 bouyer /* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
782 1.6 bouyer pde = (pd_entry_t *) avail;
783 1.6 bouyer memset(pde, 0, PAGE_SIZE * 5);
784 1.6 bouyer avail += PAGE_SIZE * 5;
785 1.6 bouyer addr = ((u_long) pde) - KERNBASE;
786 1.6 bouyer /*
787 1.6 bouyer * enter L2 pages in the L3.
788 1.6 bouyer * The real L2 kernel PD will be the last one (so that
789 1.6 bouyer * pde[L2_SLOT_KERN] always point to the shadow).
790 1.6 bouyer */
791 1.6 bouyer for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
792 1.6 bouyer /*
793 1.6 bouyer * Xen doens't want R/W mappings in L3 entries, it'll add it
794 1.6 bouyer * itself.
795 1.6 bouyer */
796 1.6 bouyer pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
797 1.6 bouyer __PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
798 1.6 bouyer " -> L3[0x%x]\n", (vaddr_t)pde + PAGE_SIZE * i,
799 1.6 bouyer (int64_t)addr, (int64_t)pdtpe[i], i));
800 1.6 bouyer }
801 1.6 bouyer addr += PAGE_SIZE;
802 1.6 bouyer pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
803 1.6 bouyer __PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
804 1.6 bouyer " -> L3[0x%x]\n", (vaddr_t)pde + PAGE_SIZE * 4,
805 1.6 bouyer (int64_t)addr, (int64_t)pdtpe[3], 3));
806 1.6 bouyer
807 1.6 bouyer #else /* PAE */
808 1.4 bouyer pde = bt_pgd;
809 1.6 bouyer #endif /* PTP_LEVELS > 2 */
810 1.2 bouyer
811 1.2 bouyer /* Level 1 */
812 1.2 bouyer page = KERNTEXTOFF;
813 1.2 bouyer for (i = 0; i < new_count; i ++) {
814 1.6 bouyer vaddr_t cur_page = page;
815 1.2 bouyer
816 1.2 bouyer pte = (pd_entry_t *) avail;
817 1.2 bouyer avail += PAGE_SIZE;
818 1.2 bouyer
819 1.2 bouyer memset(pte, 0, PAGE_SIZE);
820 1.2 bouyer while (pl2_pi(page) == pl2_pi (cur_page)) {
821 1.2 bouyer if (page >= map_end) {
822 1.2 bouyer /* not mapped at all */
823 1.2 bouyer pte[pl1_pi(page)] = 0;
824 1.2 bouyer page += PAGE_SIZE;
825 1.2 bouyer continue;
826 1.2 bouyer }
827 1.2 bouyer pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
828 1.2 bouyer if (page == (vaddr_t)HYPERVISOR_shared_info) {
829 1.2 bouyer pte[pl1_pi(page)] = xen_start_info.shared_info;
830 1.2 bouyer __PRINTK(("HYPERVISOR_shared_info "
831 1.6 bouyer "va 0x%lx pte 0x%" PRIx64 "\n",
832 1.6 bouyer HYPERVISOR_shared_info, (int64_t)pte[pl1_pi(page)]));
833 1.2 bouyer }
834 1.4 bouyer #ifdef XEN3
835 1.7 bouyer if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
836 1.12 cegger == xen_start_info.console.domU.mfn) {
837 1.2 bouyer xencons_interface = (void *)page;
838 1.12 cegger pte[pl1_pi(page)] = xen_start_info.console.domU.mfn;
839 1.6 bouyer pte[pl1_pi(page)] <<= PAGE_SHIFT;
840 1.2 bouyer __PRINTK(("xencons_interface "
841 1.6 bouyer "va 0x%lx pte 0x%" PRIx64 "\n",
842 1.6 bouyer xencons_interface, (int64_t)pte[pl1_pi(page)]));
843 1.2 bouyer }
844 1.7 bouyer if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
845 1.7 bouyer == xen_start_info.store_mfn) {
846 1.2 bouyer xenstore_interface = (void *)page;
847 1.6 bouyer pte[pl1_pi(page)] = xen_start_info.store_mfn;
848 1.6 bouyer pte[pl1_pi(page)] <<= PAGE_SHIFT;
849 1.2 bouyer __PRINTK(("xenstore_interface "
850 1.6 bouyer "va 0x%lx pte 0x%" PRIx64 "\n",
851 1.6 bouyer xenstore_interface, (int64_t)pte[pl1_pi(page)]));
852 1.2 bouyer }
853 1.4 bouyer #endif /* XEN3 */
854 1.2 bouyer #ifdef DOM0OPS
855 1.2 bouyer if (page >= (vaddr_t)atdevbase &&
856 1.2 bouyer page < (vaddr_t)atdevbase + IOM_SIZE) {
857 1.2 bouyer pte[pl1_pi(page)] =
858 1.2 bouyer IOM_BEGIN + (page - (vaddr_t)atdevbase);
859 1.2 bouyer }
860 1.2 bouyer #endif
861 1.4 bouyer pte[pl1_pi(page)] |= PG_k | PG_V;
862 1.2 bouyer if (page < text_end) {
863 1.2 bouyer /* map kernel text RO */
864 1.2 bouyer pte[pl1_pi(page)] |= 0;
865 1.2 bouyer } else if (page >= old_pgd
866 1.2 bouyer && page < old_pgd + (old_count * PAGE_SIZE)) {
867 1.2 bouyer /* map old page tables RO */
868 1.2 bouyer pte[pl1_pi(page)] |= 0;
869 1.2 bouyer } else if (page >= new_pgd &&
870 1.6 bouyer page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
871 1.2 bouyer /* map new page tables RO */
872 1.2 bouyer pte[pl1_pi(page)] |= 0;
873 1.2 bouyer } else {
874 1.2 bouyer /* map page RW */
875 1.2 bouyer pte[pl1_pi(page)] |= PG_RW;
876 1.2 bouyer }
877 1.6 bouyer
878 1.9 tron if ((page >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
879 1.9 tron || page >= new_pgd) {
880 1.4 bouyer __PRINTK(("va 0x%lx pa 0x%lx "
881 1.6 bouyer "entry 0x%" PRIx64 " -> L1[0x%x]\n",
882 1.2 bouyer page, page - KERNBASE,
883 1.6 bouyer (int64_t)pte[pl1_pi(page)], pl1_pi(page)));
884 1.9 tron }
885 1.2 bouyer page += PAGE_SIZE;
886 1.2 bouyer }
887 1.2 bouyer
888 1.6 bouyer addr = ((u_long) pte) - KERNBASE;
889 1.2 bouyer pde[pl2_pi(cur_page)] =
890 1.4 bouyer xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
891 1.6 bouyer __PRINTK(("L1 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
892 1.6 bouyer " -> L2[0x%x]\n", pte, (int64_t)addr,
893 1.6 bouyer (int64_t)pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
894 1.2 bouyer /* Mark readonly */
895 1.2 bouyer xen_bt_set_readonly((vaddr_t) pte);
896 1.2 bouyer }
897 1.2 bouyer
898 1.2 bouyer /* Install recursive page tables mapping */
899 1.6 bouyer #ifdef PAE
900 1.6 bouyer /*
901 1.6 bouyer * we need a shadow page for the kernel's L2 page
902 1.6 bouyer * The real L2 kernel PD will be the last one (so that
903 1.6 bouyer * pde[L2_SLOT_KERN] always point to the shadow.
904 1.6 bouyer */
905 1.6 bouyer memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
906 1.6 bouyer pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG];
907 1.6 bouyer pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE;
908 1.6 bouyer
909 1.6 bouyer /*
910 1.6 bouyer * We don't enter a recursive entry from the L3 PD. Instead,
911 1.6 bouyer * we enter the first 4 L2 pages, which includes the kernel's L2
912 1.6 bouyer * shadow. But we have to entrer the shadow after switching
913 1.6 bouyer * %cr3, or Xen will refcount some PTE with the wrong type.
914 1.6 bouyer */
915 1.6 bouyer addr = (u_long)pde - KERNBASE;
916 1.6 bouyer for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
917 1.6 bouyer pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
918 1.6 bouyer __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
919 1.6 bouyer (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i, (long)addr,
920 1.6 bouyer (int64_t)pde[PDIR_SLOT_PTE + i]));
921 1.6 bouyer }
922 1.6 bouyer #if 0
923 1.6 bouyer addr += PAGE_SIZE; /* point to shadow L2 */
924 1.6 bouyer pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
925 1.6 bouyer __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
926 1.6 bouyer (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
927 1.6 bouyer (int64_t)pde[PDIR_SLOT_PTE + 3]));
928 1.6 bouyer #endif
929 1.12.4.4 jym /* Mark tables RO, and pin the kernel's shadow as L2 */
930 1.6 bouyer addr = (u_long)pde - KERNBASE;
931 1.6 bouyer for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
932 1.6 bouyer xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
933 1.6 bouyer if (i == 2 || i == 3)
934 1.6 bouyer continue;
935 1.6 bouyer #if 0
936 1.6 bouyer __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
937 1.6 bouyer xpq_queue_pin_table(xpmap_ptom_masked(addr));
938 1.6 bouyer #endif
939 1.6 bouyer }
940 1.6 bouyer if (final) {
941 1.6 bouyer addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
942 1.6 bouyer __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
943 1.6 bouyer xpq_queue_pin_table(xpmap_ptom_masked(addr));
944 1.6 bouyer }
945 1.6 bouyer #if 0
946 1.6 bouyer addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
947 1.6 bouyer __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
948 1.6 bouyer xpq_queue_pin_table(xpmap_ptom_masked(addr));
949 1.6 bouyer #endif
950 1.6 bouyer #else /* PAE */
951 1.6 bouyer /* recursive entry in higher-level PD */
952 1.2 bouyer bt_pgd[PDIR_SLOT_PTE] =
953 1.4 bouyer xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
954 1.6 bouyer __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va 0x%lx pa 0x%" PRIx64
955 1.6 bouyer " entry 0x%" PRIx64 "\n", new_pgd, (int64_t)new_pgd - KERNBASE,
956 1.6 bouyer (int64_t)bt_pgd[PDIR_SLOT_PTE]));
957 1.2 bouyer /* Mark tables RO */
958 1.2 bouyer xen_bt_set_readonly((vaddr_t) pde);
959 1.6 bouyer #endif
960 1.6 bouyer #if PTP_LEVELS > 2 || defined(PAE)
961 1.2 bouyer xen_bt_set_readonly((vaddr_t) pdtpe);
962 1.4 bouyer #endif
963 1.4 bouyer #if PTP_LEVELS > 3
964 1.2 bouyer xen_bt_set_readonly(new_pgd);
965 1.4 bouyer #endif
966 1.2 bouyer /* Pin the PGD */
967 1.12.4.4 jym __PRINTK(("pin PGD\n"));
968 1.6 bouyer #ifdef PAE
969 1.6 bouyer xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
970 1.6 bouyer #else
971 1.2 bouyer xpq_queue_pin_table(xpmap_ptom_masked(new_pgd - KERNBASE));
972 1.6 bouyer #endif
973 1.4 bouyer #ifdef __i386__
974 1.4 bouyer /* Save phys. addr of PDP, for libkvm. */
975 1.6 bouyer PDPpaddr = (long)pde;
976 1.6 bouyer #ifdef PAE
977 1.6 bouyer /* also save the address of the L3 page */
978 1.6 bouyer pmap_l3pd = pdtpe;
979 1.6 bouyer pmap_l3paddr = (new_pgd - KERNBASE);
980 1.6 bouyer #endif /* PAE */
981 1.6 bouyer #endif /* i386 */
982 1.2 bouyer /* Switch to new tables */
983 1.12.4.4 jym __PRINTK(("switch to PGD\n"));
984 1.2 bouyer xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
985 1.6 bouyer __PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry 0x%" PRIx64 "\n",
986 1.6 bouyer (int64_t)bt_pgd[PDIR_SLOT_PTE]));
987 1.6 bouyer #ifdef PAE
988 1.6 bouyer if (final) {
989 1.6 bouyer /* now enter kernel's PTE mappings */
990 1.6 bouyer addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
991 1.6 bouyer xpq_queue_pte_update(
992 1.6 bouyer xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
993 1.6 bouyer xpmap_ptom_masked(addr) | PG_k | PG_V);
994 1.6 bouyer xpq_flush_queue();
995 1.6 bouyer }
996 1.6 bouyer #endif
997 1.6 bouyer
998 1.6 bouyer
999 1.2 bouyer
1000 1.2 bouyer /* Now we can safely reclaim space taken by old tables */
1001 1.2 bouyer
1002 1.12.4.4 jym __PRINTK(("unpin old PGD\n"));
1003 1.2 bouyer /* Unpin old PGD */
1004 1.2 bouyer xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
1005 1.2 bouyer /* Mark old tables RW */
1006 1.2 bouyer page = old_pgd;
1007 1.2 bouyer addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
1008 1.2 bouyer addr = xpmap_mtop(addr);
1009 1.6 bouyer pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
1010 1.2 bouyer pte += pl1_pi(page);
1011 1.6 bouyer __PRINTK(("*pde 0x%" PRIx64 " addr 0x%" PRIx64 " pte 0x%lx\n",
1012 1.6 bouyer (int64_t)pde[pl2_pi(page)], (int64_t)addr, (long)pte));
1013 1.2 bouyer while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
1014 1.6 bouyer addr = xpmap_ptom(((u_long) pte) - KERNBASE);
1015 1.6 bouyer XENPRINTK(("addr 0x%" PRIx64 " pte 0x%lx *pte 0x%" PRIx64 "\n",
1016 1.6 bouyer (int64_t)addr, (long)pte, (int64_t)*pte));
1017 1.6 bouyer xpq_queue_pte_update(addr, *pte | PG_RW);
1018 1.2 bouyer page += PAGE_SIZE;
1019 1.2 bouyer /*
1020 1.2 bouyer * Our ptes are contiguous
1021 1.2 bouyer * so it's safe to just "++" here
1022 1.2 bouyer */
1023 1.2 bouyer pte++;
1024 1.2 bouyer }
1025 1.2 bouyer xpq_flush_queue();
1026 1.2 bouyer }
1027 1.2 bouyer
1028 1.2 bouyer
1029 1.2 bouyer /*
1030 1.2 bouyer * Bootstrap helper functions
1031 1.2 bouyer */
1032 1.2 bouyer
1033 1.2 bouyer /*
1034 1.2 bouyer * Mark a page readonly
1035 1.2 bouyer * XXX: assuming vaddr = paddr + KERNBASE
1036 1.2 bouyer */
1037 1.2 bouyer
1038 1.2 bouyer static void
1039 1.2 bouyer xen_bt_set_readonly (vaddr_t page)
1040 1.2 bouyer {
1041 1.2 bouyer pt_entry_t entry;
1042 1.2 bouyer
1043 1.12.4.1 jym xen_acquire_reader_ptom_lock();
1044 1.12.4.1 jym
1045 1.2 bouyer entry = xpmap_ptom_masked(page - KERNBASE);
1046 1.4 bouyer entry |= PG_k | PG_V;
1047 1.2 bouyer
1048 1.2 bouyer HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
1049 1.12.4.1 jym
1050 1.12.4.1 jym xen_release_ptom_lock();
1051 1.2 bouyer }
1052 1.4 bouyer
1053 1.4 bouyer #ifdef __x86_64__
1054 1.4 bouyer void
1055 1.4 bouyer xen_set_user_pgd(paddr_t page)
1056 1.4 bouyer {
1057 1.4 bouyer struct mmuext_op op;
1058 1.4 bouyer int s = splvm();
1059 1.4 bouyer
1060 1.4 bouyer xpq_flush_queue();
1061 1.4 bouyer op.cmd = MMUEXT_NEW_USER_BASEPTR;
1062 1.12.4.1 jym
1063 1.12.4.1 jym xen_acquire_reader_ptom_lock();
1064 1.12.4.1 jym
1065 1.12.4.1 jym op.arg1.mfn = pfn_to_mfn(page >> PAGE_SHIFT);
1066 1.4 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
1067 1.4 bouyer panic("xen_set_user_pgd: failed to install new user page"
1068 1.4 bouyer " directory %lx", page);
1069 1.12.4.1 jym
1070 1.12.4.1 jym xen_release_ptom_lock();
1071 1.12.4.1 jym
1072 1.4 bouyer splx(s);
1073 1.4 bouyer }
1074 1.4 bouyer #endif /* __x86_64__ */
1075