x86_xpmap.c revision 1.18 1 1.18 jym /* $NetBSD: x86_xpmap.c,v 1.18 2010/02/12 01:55:45 jym Exp $ */
2 1.2 bouyer
3 1.2 bouyer /*
4 1.2 bouyer * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
5 1.2 bouyer *
6 1.2 bouyer * Permission to use, copy, modify, and distribute this software for any
7 1.2 bouyer * purpose with or without fee is hereby granted, provided that the above
8 1.2 bouyer * copyright notice and this permission notice appear in all copies.
9 1.2 bouyer *
10 1.2 bouyer * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 1.2 bouyer * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 1.2 bouyer * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 1.2 bouyer * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 1.2 bouyer * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 1.2 bouyer * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 1.2 bouyer * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 1.2 bouyer */
18 1.2 bouyer
19 1.2 bouyer /*
20 1.2 bouyer * Copyright (c) 2006, 2007 Manuel Bouyer.
21 1.2 bouyer *
22 1.2 bouyer * Redistribution and use in source and binary forms, with or without
23 1.2 bouyer * modification, are permitted provided that the following conditions
24 1.2 bouyer * are met:
25 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
26 1.2 bouyer * notice, this list of conditions and the following disclaimer.
27 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
28 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
29 1.2 bouyer * documentation and/or other materials provided with the distribution.
30 1.2 bouyer *
31 1.2 bouyer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
32 1.2 bouyer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33 1.2 bouyer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
34 1.2 bouyer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
35 1.2 bouyer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 1.2 bouyer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 1.2 bouyer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 1.2 bouyer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 1.2 bouyer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40 1.2 bouyer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 1.2 bouyer *
42 1.2 bouyer */
43 1.2 bouyer
44 1.2 bouyer /*
45 1.2 bouyer *
46 1.2 bouyer * Copyright (c) 2004 Christian Limpach.
47 1.2 bouyer * All rights reserved.
48 1.2 bouyer *
49 1.2 bouyer * Redistribution and use in source and binary forms, with or without
50 1.2 bouyer * modification, are permitted provided that the following conditions
51 1.2 bouyer * are met:
52 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
53 1.2 bouyer * notice, this list of conditions and the following disclaimer.
54 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
55 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
56 1.2 bouyer * documentation and/or other materials provided with the distribution.
57 1.2 bouyer *
58 1.2 bouyer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
59 1.2 bouyer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60 1.2 bouyer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 1.2 bouyer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
62 1.2 bouyer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63 1.2 bouyer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64 1.2 bouyer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65 1.2 bouyer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66 1.2 bouyer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67 1.2 bouyer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 1.2 bouyer */
69 1.2 bouyer
70 1.2 bouyer
71 1.2 bouyer #include <sys/cdefs.h>
72 1.18 jym __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.18 2010/02/12 01:55:45 jym Exp $");
73 1.2 bouyer
74 1.2 bouyer #include "opt_xen.h"
75 1.4 bouyer #include "opt_ddb.h"
76 1.4 bouyer #include "ksyms.h"
77 1.2 bouyer
78 1.2 bouyer #include <sys/param.h>
79 1.2 bouyer #include <sys/systm.h>
80 1.2 bouyer
81 1.2 bouyer #include <uvm/uvm.h>
82 1.2 bouyer
83 1.2 bouyer #include <machine/pmap.h>
84 1.2 bouyer #include <machine/gdt.h>
85 1.2 bouyer #include <xen/xenfunc.h>
86 1.2 bouyer
87 1.2 bouyer #include <dev/isa/isareg.h>
88 1.2 bouyer #include <machine/isa_machdep.h>
89 1.2 bouyer
90 1.2 bouyer #undef XENDEBUG
91 1.2 bouyer /* #define XENDEBUG_SYNC */
92 1.2 bouyer /* #define XENDEBUG_LOW */
93 1.2 bouyer
94 1.2 bouyer #ifdef XENDEBUG
95 1.2 bouyer #define XENPRINTF(x) printf x
96 1.2 bouyer #define XENPRINTK(x) printk x
97 1.2 bouyer #define XENPRINTK2(x) /* printk x */
98 1.2 bouyer
99 1.2 bouyer static char XBUF[256];
100 1.2 bouyer #else
101 1.2 bouyer #define XENPRINTF(x)
102 1.2 bouyer #define XENPRINTK(x)
103 1.2 bouyer #define XENPRINTK2(x)
104 1.2 bouyer #endif
105 1.2 bouyer #define PRINTF(x) printf x
106 1.2 bouyer #define PRINTK(x) printk x
107 1.2 bouyer
108 1.4 bouyer /* on x86_64 kernel runs in ring 3 */
109 1.4 bouyer #ifdef __x86_64__
110 1.4 bouyer #define PG_k PG_u
111 1.4 bouyer #else
112 1.4 bouyer #define PG_k 0
113 1.4 bouyer #endif
114 1.4 bouyer
115 1.2 bouyer volatile shared_info_t *HYPERVISOR_shared_info;
116 1.11 jym /* Xen requires the start_info struct to be page aligned */
117 1.11 jym union start_info_union start_info_union __aligned(PAGE_SIZE);
118 1.6 bouyer unsigned long *xpmap_phys_to_machine_mapping;
119 1.2 bouyer
120 1.2 bouyer void xen_failsafe_handler(void);
121 1.2 bouyer
122 1.2 bouyer #define HYPERVISOR_mmu_update_self(req, count, success_count) \
123 1.2 bouyer HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
124 1.2 bouyer
125 1.2 bouyer void
126 1.2 bouyer xen_failsafe_handler(void)
127 1.2 bouyer {
128 1.2 bouyer
129 1.2 bouyer panic("xen_failsafe_handler called!\n");
130 1.2 bouyer }
131 1.2 bouyer
132 1.2 bouyer
133 1.2 bouyer void
134 1.2 bouyer xen_set_ldt(vaddr_t base, uint32_t entries)
135 1.2 bouyer {
136 1.2 bouyer vaddr_t va;
137 1.2 bouyer vaddr_t end;
138 1.4 bouyer pt_entry_t *ptp;
139 1.2 bouyer int s;
140 1.2 bouyer
141 1.2 bouyer #ifdef __x86_64__
142 1.2 bouyer end = base + (entries << 3);
143 1.2 bouyer #else
144 1.2 bouyer end = base + entries * sizeof(union descriptor);
145 1.2 bouyer #endif
146 1.2 bouyer
147 1.2 bouyer for (va = base; va < end; va += PAGE_SIZE) {
148 1.2 bouyer KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
149 1.2 bouyer ptp = kvtopte(va);
150 1.5 bouyer XENPRINTF(("xen_set_ldt %p %d %p\n", (void *)base,
151 1.5 bouyer entries, ptp));
152 1.4 bouyer pmap_pte_clearbits(ptp, PG_RW);
153 1.2 bouyer }
154 1.2 bouyer s = splvm();
155 1.2 bouyer xpq_queue_set_ldt(base, entries);
156 1.2 bouyer splx(s);
157 1.2 bouyer }
158 1.2 bouyer
159 1.2 bouyer #ifdef XENDEBUG
160 1.2 bouyer void xpq_debug_dump(void);
161 1.2 bouyer #endif
162 1.2 bouyer
163 1.2 bouyer #define XPQUEUE_SIZE 2048
164 1.2 bouyer static mmu_update_t xpq_queue[XPQUEUE_SIZE];
165 1.2 bouyer static int xpq_idx = 0;
166 1.2 bouyer
167 1.2 bouyer void
168 1.8 cegger xpq_flush_queue(void)
169 1.2 bouyer {
170 1.2 bouyer int i, ok;
171 1.2 bouyer
172 1.2 bouyer XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
173 1.2 bouyer for (i = 0; i < xpq_idx; i++)
174 1.6 bouyer XENPRINTK2(("%d: %p %08" PRIx64 "\n", i,
175 1.8 cegger (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val));
176 1.2 bouyer if (xpq_idx != 0 &&
177 1.2 bouyer HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok) < 0) {
178 1.2 bouyer printf("xpq_flush_queue: %d entries \n", xpq_idx);
179 1.2 bouyer for (i = 0; i < xpq_idx; i++)
180 1.3 bouyer printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
181 1.8 cegger (uint64_t)xpq_queue[i].ptr,
182 1.8 cegger (uint64_t)xpq_queue[i].val);
183 1.2 bouyer panic("HYPERVISOR_mmu_update failed\n");
184 1.2 bouyer }
185 1.2 bouyer xpq_idx = 0;
186 1.2 bouyer }
187 1.2 bouyer
188 1.2 bouyer static inline void
189 1.2 bouyer xpq_increment_idx(void)
190 1.2 bouyer {
191 1.2 bouyer
192 1.2 bouyer xpq_idx++;
193 1.2 bouyer if (__predict_false(xpq_idx == XPQUEUE_SIZE))
194 1.2 bouyer xpq_flush_queue();
195 1.2 bouyer }
196 1.2 bouyer
197 1.2 bouyer void
198 1.2 bouyer xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
199 1.2 bouyer {
200 1.6 bouyer XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
201 1.6 bouyer "\n", (int64_t)ma, (int64_t)pa));
202 1.2 bouyer xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
203 1.2 bouyer xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
204 1.2 bouyer xpq_increment_idx();
205 1.2 bouyer #ifdef XENDEBUG_SYNC
206 1.2 bouyer xpq_flush_queue();
207 1.2 bouyer #endif
208 1.2 bouyer }
209 1.2 bouyer
210 1.2 bouyer void
211 1.6 bouyer xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
212 1.2 bouyer {
213 1.2 bouyer
214 1.6 bouyer KASSERT((ptr & 3) == 0);
215 1.2 bouyer xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
216 1.2 bouyer xpq_queue[xpq_idx].val = val;
217 1.2 bouyer xpq_increment_idx();
218 1.2 bouyer #ifdef XENDEBUG_SYNC
219 1.2 bouyer xpq_flush_queue();
220 1.2 bouyer #endif
221 1.2 bouyer }
222 1.2 bouyer
223 1.2 bouyer void
224 1.2 bouyer xpq_queue_pt_switch(paddr_t pa)
225 1.2 bouyer {
226 1.2 bouyer struct mmuext_op op;
227 1.2 bouyer xpq_flush_queue();
228 1.2 bouyer
229 1.6 bouyer XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
230 1.6 bouyer (int64_t)pa, (int64_t)pa));
231 1.2 bouyer op.cmd = MMUEXT_NEW_BASEPTR;
232 1.2 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
233 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
234 1.2 bouyer panic("xpq_queue_pt_switch");
235 1.2 bouyer }
236 1.2 bouyer
237 1.2 bouyer void
238 1.2 bouyer xpq_queue_pin_table(paddr_t pa)
239 1.2 bouyer {
240 1.2 bouyer struct mmuext_op op;
241 1.2 bouyer xpq_flush_queue();
242 1.2 bouyer
243 1.6 bouyer XENPRINTK2(("xpq_queue_pin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
244 1.6 bouyer (int64_t)pa, (int64_t)pa));
245 1.2 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
246 1.2 bouyer
247 1.6 bouyer #if defined(__x86_64__)
248 1.2 bouyer op.cmd = MMUEXT_PIN_L4_TABLE;
249 1.2 bouyer #else
250 1.2 bouyer op.cmd = MMUEXT_PIN_L2_TABLE;
251 1.2 bouyer #endif
252 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
253 1.2 bouyer panic("xpq_queue_pin_table");
254 1.2 bouyer }
255 1.2 bouyer
256 1.6 bouyer #ifdef PAE
257 1.6 bouyer static void
258 1.6 bouyer xpq_queue_pin_l3_table(paddr_t pa)
259 1.6 bouyer {
260 1.6 bouyer struct mmuext_op op;
261 1.6 bouyer xpq_flush_queue();
262 1.6 bouyer
263 1.6 bouyer XENPRINTK2(("xpq_queue_pin_l2_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
264 1.6 bouyer (int64_t)pa, (int64_t)pa));
265 1.6 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
266 1.6 bouyer
267 1.6 bouyer op.cmd = MMUEXT_PIN_L3_TABLE;
268 1.6 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
269 1.6 bouyer panic("xpq_queue_pin_table");
270 1.6 bouyer }
271 1.6 bouyer #endif
272 1.6 bouyer
273 1.2 bouyer void
274 1.2 bouyer xpq_queue_unpin_table(paddr_t pa)
275 1.2 bouyer {
276 1.2 bouyer struct mmuext_op op;
277 1.2 bouyer xpq_flush_queue();
278 1.2 bouyer
279 1.6 bouyer XENPRINTK2(("xpq_queue_unpin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
280 1.6 bouyer (int64_t)pa, (int64_t)pa));
281 1.2 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
282 1.2 bouyer op.cmd = MMUEXT_UNPIN_TABLE;
283 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
284 1.2 bouyer panic("xpq_queue_unpin_table");
285 1.2 bouyer }
286 1.2 bouyer
287 1.2 bouyer void
288 1.2 bouyer xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
289 1.2 bouyer {
290 1.2 bouyer struct mmuext_op op;
291 1.2 bouyer xpq_flush_queue();
292 1.2 bouyer
293 1.2 bouyer XENPRINTK2(("xpq_queue_set_ldt\n"));
294 1.2 bouyer KASSERT(va == (va & ~PAGE_MASK));
295 1.2 bouyer op.cmd = MMUEXT_SET_LDT;
296 1.2 bouyer op.arg1.linear_addr = va;
297 1.2 bouyer op.arg2.nr_ents = entries;
298 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
299 1.2 bouyer panic("xpq_queue_set_ldt");
300 1.2 bouyer }
301 1.2 bouyer
302 1.2 bouyer void
303 1.8 cegger xpq_queue_tlb_flush(void)
304 1.2 bouyer {
305 1.2 bouyer struct mmuext_op op;
306 1.2 bouyer xpq_flush_queue();
307 1.2 bouyer
308 1.2 bouyer XENPRINTK2(("xpq_queue_tlb_flush\n"));
309 1.2 bouyer op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
310 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
311 1.2 bouyer panic("xpq_queue_tlb_flush");
312 1.2 bouyer }
313 1.2 bouyer
314 1.2 bouyer void
315 1.8 cegger xpq_flush_cache(void)
316 1.2 bouyer {
317 1.2 bouyer struct mmuext_op op;
318 1.2 bouyer int s = splvm();
319 1.2 bouyer xpq_flush_queue();
320 1.2 bouyer
321 1.2 bouyer XENPRINTK2(("xpq_queue_flush_cache\n"));
322 1.2 bouyer op.cmd = MMUEXT_FLUSH_CACHE;
323 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
324 1.2 bouyer panic("xpq_flush_cache");
325 1.2 bouyer splx(s);
326 1.2 bouyer }
327 1.2 bouyer
328 1.2 bouyer void
329 1.2 bouyer xpq_queue_invlpg(vaddr_t va)
330 1.2 bouyer {
331 1.2 bouyer struct mmuext_op op;
332 1.2 bouyer xpq_flush_queue();
333 1.2 bouyer
334 1.2 bouyer XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
335 1.2 bouyer op.cmd = MMUEXT_INVLPG_LOCAL;
336 1.2 bouyer op.arg1.linear_addr = (va & ~PAGE_MASK);
337 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
338 1.2 bouyer panic("xpq_queue_invlpg");
339 1.2 bouyer }
340 1.2 bouyer
341 1.2 bouyer int
342 1.6 bouyer xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
343 1.2 bouyer {
344 1.2 bouyer mmu_update_t op;
345 1.2 bouyer int ok;
346 1.2 bouyer xpq_flush_queue();
347 1.2 bouyer
348 1.6 bouyer op.ptr = ptr;
349 1.2 bouyer op.val = val;
350 1.2 bouyer if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
351 1.2 bouyer return EFAULT;
352 1.2 bouyer return (0);
353 1.2 bouyer }
354 1.2 bouyer
355 1.2 bouyer #ifdef XENDEBUG
356 1.2 bouyer void
357 1.8 cegger xpq_debug_dump(void)
358 1.2 bouyer {
359 1.2 bouyer int i;
360 1.2 bouyer
361 1.2 bouyer XENPRINTK2(("idx: %d\n", xpq_idx));
362 1.2 bouyer for (i = 0; i < xpq_idx; i++) {
363 1.13 cegger snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
364 1.8 cegger (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val);
365 1.2 bouyer if (++i < xpq_idx)
366 1.13 cegger snprintf(XBUF + strlen(XBUF),
367 1.13 cegger sizeof(XBUF) - strlen(XBUF),
368 1.13 cegger "%" PRIx64 " %08" PRIx64,
369 1.13 cegger (uint64_t)xpq_queue[i].ptr,
370 1.13 cegger (uint64_t)xpq_queue[i].val);
371 1.2 bouyer if (++i < xpq_idx)
372 1.13 cegger snprintf(XBUF + strlen(XBUF),
373 1.13 cegger sizeof(XBUF) - strlen(XBUF),
374 1.13 cegger "%" PRIx64 " %08" PRIx64,
375 1.13 cegger (uint64_t)xpq_queue[i].ptr,
376 1.13 cegger (uint64_t)xpq_queue[i].val);
377 1.2 bouyer if (++i < xpq_idx)
378 1.13 cegger snprintf(XBUF + strlen(XBUF),
379 1.13 cegger sizeof(XBUF) - strlen(XBUF),
380 1.13 cegger "%" PRIx64 " %08" PRIx64,
381 1.13 cegger (uint64_t)xpq_queue[i].ptr,
382 1.13 cegger (uint64_t)xpq_queue[i].val);
383 1.2 bouyer XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
384 1.2 bouyer }
385 1.2 bouyer }
386 1.2 bouyer #endif
387 1.2 bouyer
388 1.2 bouyer
389 1.2 bouyer extern volatile struct xencons_interface *xencons_interface; /* XXX */
390 1.2 bouyer extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
391 1.2 bouyer
392 1.2 bouyer static void xen_bt_set_readonly (vaddr_t);
393 1.2 bouyer static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
394 1.2 bouyer
395 1.2 bouyer /* How many PDEs ? */
396 1.2 bouyer #if L2_SLOT_KERNBASE > 0
397 1.2 bouyer #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
398 1.2 bouyer #else
399 1.2 bouyer #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
400 1.2 bouyer #endif
401 1.2 bouyer
402 1.2 bouyer /*
403 1.2 bouyer * Construct and switch to new pagetables
404 1.2 bouyer * first_avail is the first vaddr we can use after
405 1.2 bouyer * we get rid of Xen pagetables
406 1.2 bouyer */
407 1.2 bouyer
408 1.2 bouyer vaddr_t xen_pmap_bootstrap (void);
409 1.2 bouyer
410 1.2 bouyer /*
411 1.2 bouyer * Function to get rid of Xen bootstrap tables
412 1.2 bouyer */
413 1.2 bouyer
414 1.6 bouyer /* How many PDP do we need: */
415 1.6 bouyer #ifdef PAE
416 1.6 bouyer /*
417 1.6 bouyer * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
418 1.6 bouyer * all of them mapped by the L3 page. We also need a shadow page
419 1.6 bouyer * for L3[3].
420 1.6 bouyer */
421 1.6 bouyer static const int l2_4_count = 6;
422 1.6 bouyer #else
423 1.6 bouyer static const int l2_4_count = PTP_LEVELS - 1;
424 1.6 bouyer #endif
425 1.6 bouyer
426 1.2 bouyer vaddr_t
427 1.8 cegger xen_pmap_bootstrap(void)
428 1.2 bouyer {
429 1.4 bouyer int count, oldcount;
430 1.4 bouyer long mapsize;
431 1.2 bouyer vaddr_t bootstrap_tables, init_tables;
432 1.2 bouyer
433 1.6 bouyer xpmap_phys_to_machine_mapping =
434 1.6 bouyer (unsigned long *)xen_start_info.mfn_list;
435 1.2 bouyer init_tables = xen_start_info.pt_base;
436 1.2 bouyer __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
437 1.2 bouyer
438 1.2 bouyer /* Space after Xen boostrap tables should be free */
439 1.2 bouyer bootstrap_tables = xen_start_info.pt_base +
440 1.2 bouyer (xen_start_info.nr_pt_frames * PAGE_SIZE);
441 1.2 bouyer
442 1.4 bouyer /*
443 1.4 bouyer * Calculate how many space we need
444 1.4 bouyer * first everything mapped before the Xen bootstrap tables
445 1.4 bouyer */
446 1.4 bouyer mapsize = init_tables - KERNTEXTOFF;
447 1.4 bouyer /* after the tables we'll have:
448 1.4 bouyer * - UAREA
449 1.4 bouyer * - dummy user PGD (x86_64)
450 1.4 bouyer * - HYPERVISOR_shared_info
451 1.4 bouyer * - ISA I/O mem (if needed)
452 1.4 bouyer */
453 1.4 bouyer mapsize += UPAGES * NBPG;
454 1.4 bouyer #ifdef __x86_64__
455 1.4 bouyer mapsize += NBPG;
456 1.4 bouyer #endif
457 1.4 bouyer mapsize += NBPG;
458 1.2 bouyer
459 1.2 bouyer #ifdef DOM0OPS
460 1.10 cegger if (xendomain_is_dom0()) {
461 1.2 bouyer /* space for ISA I/O mem */
462 1.4 bouyer mapsize += IOM_SIZE;
463 1.4 bouyer }
464 1.4 bouyer #endif
465 1.4 bouyer /* at this point mapsize doens't include the table size */
466 1.4 bouyer
467 1.4 bouyer #ifdef __x86_64__
468 1.4 bouyer count = TABLE_L2_ENTRIES;
469 1.4 bouyer #else
470 1.4 bouyer count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
471 1.4 bouyer #endif /* __x86_64__ */
472 1.4 bouyer
473 1.4 bouyer /* now compute how many L2 pages we need exactly */
474 1.4 bouyer XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
475 1.4 bouyer while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
476 1.4 bouyer ((long)count << L2_SHIFT) + KERNBASE) {
477 1.4 bouyer count++;
478 1.2 bouyer }
479 1.4 bouyer #ifndef __x86_64__
480 1.5 bouyer /*
481 1.5 bouyer * one more L2 page: we'll alocate several pages after kva_start
482 1.5 bouyer * in pmap_bootstrap() before pmap_growkernel(), which have not been
483 1.5 bouyer * counted here. It's not a big issue to allocate one more L2 as
484 1.5 bouyer * pmap_growkernel() will be called anyway.
485 1.5 bouyer */
486 1.5 bouyer count++;
487 1.4 bouyer nkptp[1] = count;
488 1.2 bouyer #endif
489 1.2 bouyer
490 1.4 bouyer /*
491 1.4 bouyer * install bootstrap pages. We may need more L2 pages than will
492 1.4 bouyer * have the final table here, as it's installed after the final table
493 1.4 bouyer */
494 1.4 bouyer oldcount = count;
495 1.4 bouyer
496 1.4 bouyer bootstrap_again:
497 1.4 bouyer XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
498 1.2 bouyer /*
499 1.2 bouyer * Xen space we'll reclaim may not be enough for our new page tables,
500 1.2 bouyer * move bootstrap tables if necessary
501 1.2 bouyer */
502 1.4 bouyer if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
503 1.2 bouyer bootstrap_tables = init_tables +
504 1.4 bouyer ((count + l2_4_count) * PAGE_SIZE);
505 1.4 bouyer /* make sure we have enough to map the bootstrap_tables */
506 1.4 bouyer if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
507 1.4 bouyer ((long)oldcount << L2_SHIFT) + KERNBASE) {
508 1.4 bouyer oldcount++;
509 1.4 bouyer goto bootstrap_again;
510 1.4 bouyer }
511 1.2 bouyer
512 1.2 bouyer /* Create temporary tables */
513 1.2 bouyer xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
514 1.4 bouyer xen_start_info.nr_pt_frames, oldcount, 0);
515 1.2 bouyer
516 1.2 bouyer /* Create final tables */
517 1.2 bouyer xen_bootstrap_tables(bootstrap_tables, init_tables,
518 1.4 bouyer oldcount + l2_4_count, count, 1);
519 1.2 bouyer
520 1.4 bouyer /* zero out free space after tables */
521 1.4 bouyer memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
522 1.4 bouyer (UPAGES + 1) * NBPG);
523 1.4 bouyer return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
524 1.2 bouyer }
525 1.2 bouyer
526 1.2 bouyer
527 1.2 bouyer /*
528 1.2 bouyer * Build a new table and switch to it
529 1.2 bouyer * old_count is # of old tables (including PGD, PDTPE and PDE)
530 1.2 bouyer * new_count is # of new tables (PTE only)
531 1.2 bouyer * we assume areas don't overlap
532 1.2 bouyer */
533 1.2 bouyer
534 1.2 bouyer
535 1.2 bouyer static void
536 1.2 bouyer xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
537 1.2 bouyer int old_count, int new_count, int final)
538 1.2 bouyer {
539 1.2 bouyer pd_entry_t *pdtpe, *pde, *pte;
540 1.2 bouyer pd_entry_t *cur_pgd, *bt_pgd;
541 1.6 bouyer paddr_t addr;
542 1.6 bouyer vaddr_t page, avail, text_end, map_end;
543 1.2 bouyer int i;
544 1.2 bouyer extern char __data_start;
545 1.2 bouyer
546 1.2 bouyer __PRINTK(("xen_bootstrap_tables(0x%lx, 0x%lx, %d, %d)\n",
547 1.2 bouyer old_pgd, new_pgd, old_count, new_count));
548 1.2 bouyer text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
549 1.2 bouyer /*
550 1.2 bouyer * size of R/W area after kernel text:
551 1.2 bouyer * xencons_interface (if present)
552 1.2 bouyer * xenstore_interface (if present)
553 1.6 bouyer * table pages (new_count + l2_4_count entries)
554 1.2 bouyer * extra mappings (only when final is true):
555 1.4 bouyer * UAREA
556 1.4 bouyer * dummy user PGD (x86_64 only)/gdt page (i386 only)
557 1.2 bouyer * HYPERVISOR_shared_info
558 1.2 bouyer * ISA I/O mem (if needed)
559 1.2 bouyer */
560 1.6 bouyer map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
561 1.2 bouyer if (final) {
562 1.4 bouyer map_end += (UPAGES + 1) * NBPG;
563 1.4 bouyer HYPERVISOR_shared_info = (shared_info_t *)map_end;
564 1.2 bouyer map_end += NBPG;
565 1.2 bouyer }
566 1.4 bouyer /*
567 1.4 bouyer * we always set atdevbase, as it's used by init386 to find the first
568 1.4 bouyer * available VA. map_end is updated only if we are dom0, so
569 1.4 bouyer * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
570 1.4 bouyer * this case.
571 1.4 bouyer */
572 1.4 bouyer if (final)
573 1.4 bouyer atdevbase = map_end;
574 1.2 bouyer #ifdef DOM0OPS
575 1.10 cegger if (final && xendomain_is_dom0()) {
576 1.2 bouyer /* ISA I/O mem */
577 1.2 bouyer map_end += IOM_SIZE;
578 1.2 bouyer }
579 1.2 bouyer #endif /* DOM0OPS */
580 1.2 bouyer
581 1.2 bouyer __PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
582 1.2 bouyer text_end, map_end));
583 1.12 cegger __PRINTK(("console 0x%lx ", xen_start_info.console.domU.mfn));
584 1.7 bouyer __PRINTK(("xenstore 0x%lx\n", xen_start_info.store_mfn));
585 1.2 bouyer
586 1.2 bouyer /*
587 1.2 bouyer * Create bootstrap page tables
588 1.2 bouyer * What we need:
589 1.2 bouyer * - a PGD (level 4)
590 1.2 bouyer * - a PDTPE (level 3)
591 1.2 bouyer * - a PDE (level2)
592 1.2 bouyer * - some PTEs (level 1)
593 1.2 bouyer */
594 1.2 bouyer
595 1.2 bouyer cur_pgd = (pd_entry_t *) old_pgd;
596 1.2 bouyer bt_pgd = (pd_entry_t *) new_pgd;
597 1.2 bouyer memset (bt_pgd, 0, PAGE_SIZE);
598 1.2 bouyer avail = new_pgd + PAGE_SIZE;
599 1.4 bouyer #if PTP_LEVELS > 3
600 1.2 bouyer /* Install level 3 */
601 1.2 bouyer pdtpe = (pd_entry_t *) avail;
602 1.2 bouyer memset (pdtpe, 0, PAGE_SIZE);
603 1.2 bouyer avail += PAGE_SIZE;
604 1.2 bouyer
605 1.6 bouyer addr = ((u_long) pdtpe) - KERNBASE;
606 1.2 bouyer bt_pgd[pl4_pi(KERNTEXTOFF)] =
607 1.4 bouyer xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
608 1.2 bouyer
609 1.6 bouyer __PRINTK(("L3 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64 " -> L4[0x%x]\n",
610 1.8 cegger pdtpe, (uint64_t)addr, (uint64_t)bt_pgd[pl4_pi(KERNTEXTOFF)],
611 1.6 bouyer pl4_pi(KERNTEXTOFF)));
612 1.4 bouyer #else
613 1.4 bouyer pdtpe = bt_pgd;
614 1.4 bouyer #endif /* PTP_LEVELS > 3 */
615 1.2 bouyer
616 1.4 bouyer #if PTP_LEVELS > 2
617 1.2 bouyer /* Level 2 */
618 1.2 bouyer pde = (pd_entry_t *) avail;
619 1.2 bouyer memset(pde, 0, PAGE_SIZE);
620 1.2 bouyer avail += PAGE_SIZE;
621 1.2 bouyer
622 1.6 bouyer addr = ((u_long) pde) - KERNBASE;
623 1.2 bouyer pdtpe[pl3_pi(KERNTEXTOFF)] =
624 1.6 bouyer xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
625 1.6 bouyer __PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64 " -> L3[0x%x]\n",
626 1.6 bouyer pde, (int64_t)addr, (int64_t)pdtpe[pl3_pi(KERNTEXTOFF)],
627 1.6 bouyer pl3_pi(KERNTEXTOFF)));
628 1.6 bouyer #elif defined(PAE)
629 1.6 bouyer /* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
630 1.6 bouyer pde = (pd_entry_t *) avail;
631 1.6 bouyer memset(pde, 0, PAGE_SIZE * 5);
632 1.6 bouyer avail += PAGE_SIZE * 5;
633 1.6 bouyer addr = ((u_long) pde) - KERNBASE;
634 1.6 bouyer /*
635 1.6 bouyer * enter L2 pages in the L3.
636 1.6 bouyer * The real L2 kernel PD will be the last one (so that
637 1.6 bouyer * pde[L2_SLOT_KERN] always point to the shadow).
638 1.6 bouyer */
639 1.6 bouyer for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
640 1.6 bouyer /*
641 1.6 bouyer * Xen doens't want R/W mappings in L3 entries, it'll add it
642 1.6 bouyer * itself.
643 1.6 bouyer */
644 1.6 bouyer pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
645 1.6 bouyer __PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
646 1.6 bouyer " -> L3[0x%x]\n", (vaddr_t)pde + PAGE_SIZE * i,
647 1.6 bouyer (int64_t)addr, (int64_t)pdtpe[i], i));
648 1.6 bouyer }
649 1.6 bouyer addr += PAGE_SIZE;
650 1.6 bouyer pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
651 1.6 bouyer __PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
652 1.6 bouyer " -> L3[0x%x]\n", (vaddr_t)pde + PAGE_SIZE * 4,
653 1.6 bouyer (int64_t)addr, (int64_t)pdtpe[3], 3));
654 1.6 bouyer
655 1.6 bouyer #else /* PAE */
656 1.4 bouyer pde = bt_pgd;
657 1.6 bouyer #endif /* PTP_LEVELS > 2 */
658 1.2 bouyer
659 1.2 bouyer /* Level 1 */
660 1.2 bouyer page = KERNTEXTOFF;
661 1.2 bouyer for (i = 0; i < new_count; i ++) {
662 1.6 bouyer vaddr_t cur_page = page;
663 1.2 bouyer
664 1.2 bouyer pte = (pd_entry_t *) avail;
665 1.2 bouyer avail += PAGE_SIZE;
666 1.2 bouyer
667 1.2 bouyer memset(pte, 0, PAGE_SIZE);
668 1.2 bouyer while (pl2_pi(page) == pl2_pi (cur_page)) {
669 1.2 bouyer if (page >= map_end) {
670 1.2 bouyer /* not mapped at all */
671 1.2 bouyer pte[pl1_pi(page)] = 0;
672 1.2 bouyer page += PAGE_SIZE;
673 1.2 bouyer continue;
674 1.2 bouyer }
675 1.2 bouyer pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
676 1.2 bouyer if (page == (vaddr_t)HYPERVISOR_shared_info) {
677 1.2 bouyer pte[pl1_pi(page)] = xen_start_info.shared_info;
678 1.2 bouyer __PRINTK(("HYPERVISOR_shared_info "
679 1.6 bouyer "va 0x%lx pte 0x%" PRIx64 "\n",
680 1.6 bouyer HYPERVISOR_shared_info, (int64_t)pte[pl1_pi(page)]));
681 1.2 bouyer }
682 1.7 bouyer if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
683 1.12 cegger == xen_start_info.console.domU.mfn) {
684 1.2 bouyer xencons_interface = (void *)page;
685 1.12 cegger pte[pl1_pi(page)] = xen_start_info.console.domU.mfn;
686 1.6 bouyer pte[pl1_pi(page)] <<= PAGE_SHIFT;
687 1.2 bouyer __PRINTK(("xencons_interface "
688 1.6 bouyer "va 0x%lx pte 0x%" PRIx64 "\n",
689 1.6 bouyer xencons_interface, (int64_t)pte[pl1_pi(page)]));
690 1.2 bouyer }
691 1.7 bouyer if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
692 1.7 bouyer == xen_start_info.store_mfn) {
693 1.2 bouyer xenstore_interface = (void *)page;
694 1.6 bouyer pte[pl1_pi(page)] = xen_start_info.store_mfn;
695 1.6 bouyer pte[pl1_pi(page)] <<= PAGE_SHIFT;
696 1.2 bouyer __PRINTK(("xenstore_interface "
697 1.6 bouyer "va 0x%lx pte 0x%" PRIx64 "\n",
698 1.6 bouyer xenstore_interface, (int64_t)pte[pl1_pi(page)]));
699 1.2 bouyer }
700 1.2 bouyer #ifdef DOM0OPS
701 1.2 bouyer if (page >= (vaddr_t)atdevbase &&
702 1.2 bouyer page < (vaddr_t)atdevbase + IOM_SIZE) {
703 1.2 bouyer pte[pl1_pi(page)] =
704 1.2 bouyer IOM_BEGIN + (page - (vaddr_t)atdevbase);
705 1.2 bouyer }
706 1.2 bouyer #endif
707 1.4 bouyer pte[pl1_pi(page)] |= PG_k | PG_V;
708 1.2 bouyer if (page < text_end) {
709 1.2 bouyer /* map kernel text RO */
710 1.2 bouyer pte[pl1_pi(page)] |= 0;
711 1.2 bouyer } else if (page >= old_pgd
712 1.2 bouyer && page < old_pgd + (old_count * PAGE_SIZE)) {
713 1.2 bouyer /* map old page tables RO */
714 1.2 bouyer pte[pl1_pi(page)] |= 0;
715 1.2 bouyer } else if (page >= new_pgd &&
716 1.6 bouyer page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
717 1.2 bouyer /* map new page tables RO */
718 1.2 bouyer pte[pl1_pi(page)] |= 0;
719 1.2 bouyer } else {
720 1.2 bouyer /* map page RW */
721 1.2 bouyer pte[pl1_pi(page)] |= PG_RW;
722 1.2 bouyer }
723 1.6 bouyer
724 1.9 tron if ((page >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
725 1.9 tron || page >= new_pgd) {
726 1.4 bouyer __PRINTK(("va 0x%lx pa 0x%lx "
727 1.6 bouyer "entry 0x%" PRIx64 " -> L1[0x%x]\n",
728 1.2 bouyer page, page - KERNBASE,
729 1.6 bouyer (int64_t)pte[pl1_pi(page)], pl1_pi(page)));
730 1.9 tron }
731 1.2 bouyer page += PAGE_SIZE;
732 1.2 bouyer }
733 1.2 bouyer
734 1.6 bouyer addr = ((u_long) pte) - KERNBASE;
735 1.2 bouyer pde[pl2_pi(cur_page)] =
736 1.4 bouyer xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
737 1.6 bouyer __PRINTK(("L1 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
738 1.6 bouyer " -> L2[0x%x]\n", pte, (int64_t)addr,
739 1.6 bouyer (int64_t)pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
740 1.2 bouyer /* Mark readonly */
741 1.2 bouyer xen_bt_set_readonly((vaddr_t) pte);
742 1.2 bouyer }
743 1.2 bouyer
744 1.2 bouyer /* Install recursive page tables mapping */
745 1.6 bouyer #ifdef PAE
746 1.6 bouyer /*
747 1.6 bouyer * we need a shadow page for the kernel's L2 page
748 1.6 bouyer * The real L2 kernel PD will be the last one (so that
749 1.6 bouyer * pde[L2_SLOT_KERN] always point to the shadow.
750 1.6 bouyer */
751 1.6 bouyer memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
752 1.6 bouyer pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG];
753 1.6 bouyer pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE;
754 1.6 bouyer
755 1.6 bouyer /*
756 1.6 bouyer * We don't enter a recursive entry from the L3 PD. Instead,
757 1.6 bouyer * we enter the first 4 L2 pages, which includes the kernel's L2
758 1.6 bouyer * shadow. But we have to entrer the shadow after switching
759 1.6 bouyer * %cr3, or Xen will refcount some PTE with the wrong type.
760 1.6 bouyer */
761 1.6 bouyer addr = (u_long)pde - KERNBASE;
762 1.6 bouyer for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
763 1.6 bouyer pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
764 1.6 bouyer __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
765 1.6 bouyer (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i, (long)addr,
766 1.6 bouyer (int64_t)pde[PDIR_SLOT_PTE + i]));
767 1.6 bouyer }
768 1.6 bouyer #if 0
769 1.6 bouyer addr += PAGE_SIZE; /* point to shadow L2 */
770 1.6 bouyer pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
771 1.6 bouyer __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
772 1.6 bouyer (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
773 1.6 bouyer (int64_t)pde[PDIR_SLOT_PTE + 3]));
774 1.6 bouyer #endif
775 1.14 jym /* Mark tables RO, and pin the kernel's shadow as L2 */
776 1.6 bouyer addr = (u_long)pde - KERNBASE;
777 1.6 bouyer for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
778 1.6 bouyer xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
779 1.6 bouyer if (i == 2 || i == 3)
780 1.6 bouyer continue;
781 1.6 bouyer #if 0
782 1.6 bouyer __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
783 1.6 bouyer xpq_queue_pin_table(xpmap_ptom_masked(addr));
784 1.6 bouyer #endif
785 1.6 bouyer }
786 1.6 bouyer if (final) {
787 1.6 bouyer addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
788 1.6 bouyer __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
789 1.6 bouyer xpq_queue_pin_table(xpmap_ptom_masked(addr));
790 1.6 bouyer }
791 1.6 bouyer #if 0
792 1.6 bouyer addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
793 1.6 bouyer __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
794 1.6 bouyer xpq_queue_pin_table(xpmap_ptom_masked(addr));
795 1.6 bouyer #endif
796 1.6 bouyer #else /* PAE */
797 1.6 bouyer /* recursive entry in higher-level PD */
798 1.2 bouyer bt_pgd[PDIR_SLOT_PTE] =
799 1.4 bouyer xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
800 1.6 bouyer __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va 0x%lx pa 0x%" PRIx64
801 1.6 bouyer " entry 0x%" PRIx64 "\n", new_pgd, (int64_t)new_pgd - KERNBASE,
802 1.6 bouyer (int64_t)bt_pgd[PDIR_SLOT_PTE]));
803 1.2 bouyer /* Mark tables RO */
804 1.2 bouyer xen_bt_set_readonly((vaddr_t) pde);
805 1.6 bouyer #endif
806 1.6 bouyer #if PTP_LEVELS > 2 || defined(PAE)
807 1.2 bouyer xen_bt_set_readonly((vaddr_t) pdtpe);
808 1.4 bouyer #endif
809 1.4 bouyer #if PTP_LEVELS > 3
810 1.2 bouyer xen_bt_set_readonly(new_pgd);
811 1.4 bouyer #endif
812 1.2 bouyer /* Pin the PGD */
813 1.14 jym __PRINTK(("pin PGD\n"));
814 1.6 bouyer #ifdef PAE
815 1.6 bouyer xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
816 1.6 bouyer #else
817 1.2 bouyer xpq_queue_pin_table(xpmap_ptom_masked(new_pgd - KERNBASE));
818 1.6 bouyer #endif
819 1.4 bouyer #ifdef __i386__
820 1.4 bouyer /* Save phys. addr of PDP, for libkvm. */
821 1.6 bouyer PDPpaddr = (long)pde;
822 1.6 bouyer #ifdef PAE
823 1.6 bouyer /* also save the address of the L3 page */
824 1.6 bouyer pmap_l3pd = pdtpe;
825 1.6 bouyer pmap_l3paddr = (new_pgd - KERNBASE);
826 1.6 bouyer #endif /* PAE */
827 1.6 bouyer #endif /* i386 */
828 1.2 bouyer /* Switch to new tables */
829 1.14 jym __PRINTK(("switch to PGD\n"));
830 1.2 bouyer xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
831 1.6 bouyer __PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry 0x%" PRIx64 "\n",
832 1.6 bouyer (int64_t)bt_pgd[PDIR_SLOT_PTE]));
833 1.6 bouyer #ifdef PAE
834 1.6 bouyer if (final) {
835 1.6 bouyer /* now enter kernel's PTE mappings */
836 1.6 bouyer addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
837 1.6 bouyer xpq_queue_pte_update(
838 1.6 bouyer xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
839 1.6 bouyer xpmap_ptom_masked(addr) | PG_k | PG_V);
840 1.6 bouyer xpq_flush_queue();
841 1.6 bouyer }
842 1.6 bouyer #endif
843 1.6 bouyer
844 1.6 bouyer
845 1.2 bouyer
846 1.2 bouyer /* Now we can safely reclaim space taken by old tables */
847 1.2 bouyer
848 1.14 jym __PRINTK(("unpin old PGD\n"));
849 1.2 bouyer /* Unpin old PGD */
850 1.2 bouyer xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
851 1.2 bouyer /* Mark old tables RW */
852 1.2 bouyer page = old_pgd;
853 1.2 bouyer addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
854 1.2 bouyer addr = xpmap_mtop(addr);
855 1.6 bouyer pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
856 1.2 bouyer pte += pl1_pi(page);
857 1.6 bouyer __PRINTK(("*pde 0x%" PRIx64 " addr 0x%" PRIx64 " pte 0x%lx\n",
858 1.6 bouyer (int64_t)pde[pl2_pi(page)], (int64_t)addr, (long)pte));
859 1.2 bouyer while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
860 1.6 bouyer addr = xpmap_ptom(((u_long) pte) - KERNBASE);
861 1.6 bouyer XENPRINTK(("addr 0x%" PRIx64 " pte 0x%lx *pte 0x%" PRIx64 "\n",
862 1.6 bouyer (int64_t)addr, (long)pte, (int64_t)*pte));
863 1.6 bouyer xpq_queue_pte_update(addr, *pte | PG_RW);
864 1.2 bouyer page += PAGE_SIZE;
865 1.2 bouyer /*
866 1.2 bouyer * Our ptes are contiguous
867 1.2 bouyer * so it's safe to just "++" here
868 1.2 bouyer */
869 1.2 bouyer pte++;
870 1.2 bouyer }
871 1.2 bouyer xpq_flush_queue();
872 1.2 bouyer }
873 1.2 bouyer
874 1.2 bouyer
875 1.2 bouyer /*
876 1.2 bouyer * Bootstrap helper functions
877 1.2 bouyer */
878 1.2 bouyer
879 1.2 bouyer /*
880 1.2 bouyer * Mark a page readonly
881 1.2 bouyer * XXX: assuming vaddr = paddr + KERNBASE
882 1.2 bouyer */
883 1.2 bouyer
884 1.2 bouyer static void
885 1.2 bouyer xen_bt_set_readonly (vaddr_t page)
886 1.2 bouyer {
887 1.2 bouyer pt_entry_t entry;
888 1.2 bouyer
889 1.2 bouyer entry = xpmap_ptom_masked(page - KERNBASE);
890 1.4 bouyer entry |= PG_k | PG_V;
891 1.2 bouyer
892 1.2 bouyer HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
893 1.2 bouyer }
894 1.4 bouyer
895 1.4 bouyer #ifdef __x86_64__
896 1.4 bouyer void
897 1.4 bouyer xen_set_user_pgd(paddr_t page)
898 1.4 bouyer {
899 1.4 bouyer struct mmuext_op op;
900 1.4 bouyer int s = splvm();
901 1.4 bouyer
902 1.4 bouyer xpq_flush_queue();
903 1.4 bouyer op.cmd = MMUEXT_NEW_USER_BASEPTR;
904 1.4 bouyer op.arg1.mfn = xpmap_phys_to_machine_mapping[page >> PAGE_SHIFT];
905 1.4 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
906 1.4 bouyer panic("xen_set_user_pgd: failed to install new user page"
907 1.4 bouyer " directory %lx", page);
908 1.4 bouyer splx(s);
909 1.4 bouyer }
910 1.4 bouyer #endif /* __x86_64__ */
911