x86_xpmap.c revision 1.35 1 1.35 cherry /* $NetBSD: x86_xpmap.c,v 1.35 2011/11/06 11:40:47 cherry Exp $ */
2 1.2 bouyer
3 1.2 bouyer /*
4 1.2 bouyer * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
5 1.2 bouyer *
6 1.2 bouyer * Permission to use, copy, modify, and distribute this software for any
7 1.2 bouyer * purpose with or without fee is hereby granted, provided that the above
8 1.2 bouyer * copyright notice and this permission notice appear in all copies.
9 1.2 bouyer *
10 1.2 bouyer * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 1.2 bouyer * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 1.2 bouyer * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 1.2 bouyer * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 1.2 bouyer * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 1.2 bouyer * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 1.2 bouyer * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 1.2 bouyer */
18 1.2 bouyer
19 1.2 bouyer /*
20 1.2 bouyer * Copyright (c) 2006, 2007 Manuel Bouyer.
21 1.2 bouyer *
22 1.2 bouyer * Redistribution and use in source and binary forms, with or without
23 1.2 bouyer * modification, are permitted provided that the following conditions
24 1.2 bouyer * are met:
25 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
26 1.2 bouyer * notice, this list of conditions and the following disclaimer.
27 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
28 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
29 1.2 bouyer * documentation and/or other materials provided with the distribution.
30 1.2 bouyer *
31 1.2 bouyer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
32 1.2 bouyer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33 1.2 bouyer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
34 1.2 bouyer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
35 1.2 bouyer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 1.2 bouyer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 1.2 bouyer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 1.2 bouyer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 1.2 bouyer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40 1.2 bouyer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 1.2 bouyer *
42 1.2 bouyer */
43 1.2 bouyer
44 1.2 bouyer /*
45 1.2 bouyer *
46 1.2 bouyer * Copyright (c) 2004 Christian Limpach.
47 1.2 bouyer * All rights reserved.
48 1.2 bouyer *
49 1.2 bouyer * Redistribution and use in source and binary forms, with or without
50 1.2 bouyer * modification, are permitted provided that the following conditions
51 1.2 bouyer * are met:
52 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
53 1.2 bouyer * notice, this list of conditions and the following disclaimer.
54 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
55 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
56 1.2 bouyer * documentation and/or other materials provided with the distribution.
57 1.2 bouyer *
58 1.2 bouyer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
59 1.2 bouyer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60 1.2 bouyer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 1.2 bouyer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
62 1.2 bouyer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63 1.2 bouyer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64 1.2 bouyer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65 1.2 bouyer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66 1.2 bouyer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67 1.2 bouyer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 1.2 bouyer */
69 1.2 bouyer
70 1.2 bouyer
71 1.2 bouyer #include <sys/cdefs.h>
72 1.35 cherry __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.35 2011/11/06 11:40:47 cherry Exp $");
73 1.2 bouyer
74 1.2 bouyer #include "opt_xen.h"
75 1.4 bouyer #include "opt_ddb.h"
76 1.4 bouyer #include "ksyms.h"
77 1.2 bouyer
78 1.2 bouyer #include <sys/param.h>
79 1.2 bouyer #include <sys/systm.h>
80 1.29 cherry #include <sys/simplelock.h>
81 1.2 bouyer
82 1.2 bouyer #include <uvm/uvm.h>
83 1.2 bouyer
84 1.2 bouyer #include <machine/pmap.h>
85 1.2 bouyer #include <machine/gdt.h>
86 1.2 bouyer #include <xen/xenfunc.h>
87 1.2 bouyer
88 1.2 bouyer #include <dev/isa/isareg.h>
89 1.2 bouyer #include <machine/isa_machdep.h>
90 1.2 bouyer
91 1.2 bouyer #undef XENDEBUG
92 1.2 bouyer /* #define XENDEBUG_SYNC */
93 1.2 bouyer /* #define XENDEBUG_LOW */
94 1.2 bouyer
95 1.2 bouyer #ifdef XENDEBUG
96 1.2 bouyer #define XENPRINTF(x) printf x
97 1.2 bouyer #define XENPRINTK(x) printk x
98 1.2 bouyer #define XENPRINTK2(x) /* printk x */
99 1.2 bouyer
100 1.2 bouyer static char XBUF[256];
101 1.2 bouyer #else
102 1.2 bouyer #define XENPRINTF(x)
103 1.2 bouyer #define XENPRINTK(x)
104 1.2 bouyer #define XENPRINTK2(x)
105 1.2 bouyer #endif
106 1.2 bouyer #define PRINTF(x) printf x
107 1.2 bouyer #define PRINTK(x) printk x
108 1.2 bouyer
109 1.4 bouyer /* on x86_64 kernel runs in ring 3 */
110 1.4 bouyer #ifdef __x86_64__
111 1.4 bouyer #define PG_k PG_u
112 1.4 bouyer #else
113 1.4 bouyer #define PG_k 0
114 1.4 bouyer #endif
115 1.4 bouyer
116 1.2 bouyer volatile shared_info_t *HYPERVISOR_shared_info;
117 1.11 jym /* Xen requires the start_info struct to be page aligned */
118 1.11 jym union start_info_union start_info_union __aligned(PAGE_SIZE);
119 1.6 bouyer unsigned long *xpmap_phys_to_machine_mapping;
120 1.2 bouyer
121 1.2 bouyer void xen_failsafe_handler(void);
122 1.2 bouyer
123 1.2 bouyer #define HYPERVISOR_mmu_update_self(req, count, success_count) \
124 1.2 bouyer HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
125 1.2 bouyer
126 1.2 bouyer void
127 1.2 bouyer xen_failsafe_handler(void)
128 1.2 bouyer {
129 1.2 bouyer
130 1.2 bouyer panic("xen_failsafe_handler called!\n");
131 1.2 bouyer }
132 1.2 bouyer
133 1.2 bouyer
134 1.2 bouyer void
135 1.2 bouyer xen_set_ldt(vaddr_t base, uint32_t entries)
136 1.2 bouyer {
137 1.2 bouyer vaddr_t va;
138 1.2 bouyer vaddr_t end;
139 1.4 bouyer pt_entry_t *ptp;
140 1.2 bouyer int s;
141 1.2 bouyer
142 1.2 bouyer #ifdef __x86_64__
143 1.2 bouyer end = base + (entries << 3);
144 1.2 bouyer #else
145 1.2 bouyer end = base + entries * sizeof(union descriptor);
146 1.2 bouyer #endif
147 1.2 bouyer
148 1.2 bouyer for (va = base; va < end; va += PAGE_SIZE) {
149 1.2 bouyer KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
150 1.2 bouyer ptp = kvtopte(va);
151 1.19 jym XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
152 1.19 jym base, entries, ptp));
153 1.4 bouyer pmap_pte_clearbits(ptp, PG_RW);
154 1.2 bouyer }
155 1.2 bouyer s = splvm();
156 1.2 bouyer xpq_queue_set_ldt(base, entries);
157 1.2 bouyer splx(s);
158 1.2 bouyer }
159 1.2 bouyer
160 1.2 bouyer #ifdef XENDEBUG
161 1.2 bouyer void xpq_debug_dump(void);
162 1.2 bouyer #endif
163 1.2 bouyer
164 1.2 bouyer #define XPQUEUE_SIZE 2048
165 1.35 cherry static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
166 1.35 cherry static int xpq_idx_array[MAXCPUS];
167 1.30 cherry
168 1.35 cherry extern struct cpu_info * (*xpq_cpu)(void);
169 1.2 bouyer
170 1.2 bouyer void
171 1.35 cherry xpq_flush_queue(void)
172 1.30 cherry {
173 1.35 cherry int i, ok = 0, ret;
174 1.30 cherry
175 1.35 cherry mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
176 1.35 cherry int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
177 1.2 bouyer
178 1.2 bouyer XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
179 1.2 bouyer for (i = 0; i < xpq_idx; i++)
180 1.19 jym XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
181 1.19 jym xpq_queue[i].ptr, xpq_queue[i].val));
182 1.23 jym
183 1.35 cherry retry:
184 1.23 jym ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
185 1.23 jym
186 1.23 jym if (xpq_idx != 0 && ret < 0) {
187 1.22 jym printf("xpq_flush_queue: %d entries (%d successful)\n",
188 1.22 jym xpq_idx, ok);
189 1.35 cherry
190 1.35 cherry if (ok != 0) {
191 1.35 cherry xpq_queue += ok;
192 1.35 cherry xpq_idx -= ok;
193 1.35 cherry ok = 0;
194 1.35 cherry goto retry;
195 1.35 cherry }
196 1.35 cherry
197 1.2 bouyer for (i = 0; i < xpq_idx; i++)
198 1.3 bouyer printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
199 1.19 jym xpq_queue[i].ptr, xpq_queue[i].val);
200 1.23 jym panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
201 1.2 bouyer }
202 1.35 cherry xpq_idx_array[xpq_cpu()->ci_cpuid] = 0;
203 1.2 bouyer }
204 1.2 bouyer
205 1.2 bouyer static inline void
206 1.2 bouyer xpq_increment_idx(void)
207 1.2 bouyer {
208 1.2 bouyer
209 1.35 cherry if (__predict_false(++xpq_idx_array[xpq_cpu()->ci_cpuid] == XPQUEUE_SIZE))
210 1.2 bouyer xpq_flush_queue();
211 1.2 bouyer }
212 1.2 bouyer
213 1.2 bouyer void
214 1.2 bouyer xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
215 1.2 bouyer {
216 1.35 cherry
217 1.35 cherry mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
218 1.35 cherry int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
219 1.35 cherry
220 1.6 bouyer XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
221 1.6 bouyer "\n", (int64_t)ma, (int64_t)pa));
222 1.35 cherry
223 1.2 bouyer xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
224 1.2 bouyer xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
225 1.2 bouyer xpq_increment_idx();
226 1.2 bouyer #ifdef XENDEBUG_SYNC
227 1.2 bouyer xpq_flush_queue();
228 1.2 bouyer #endif
229 1.2 bouyer }
230 1.2 bouyer
231 1.2 bouyer void
232 1.6 bouyer xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
233 1.2 bouyer {
234 1.2 bouyer
235 1.35 cherry mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
236 1.35 cherry int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
237 1.35 cherry
238 1.6 bouyer KASSERT((ptr & 3) == 0);
239 1.2 bouyer xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
240 1.2 bouyer xpq_queue[xpq_idx].val = val;
241 1.2 bouyer xpq_increment_idx();
242 1.2 bouyer #ifdef XENDEBUG_SYNC
243 1.2 bouyer xpq_flush_queue();
244 1.2 bouyer #endif
245 1.2 bouyer }
246 1.2 bouyer
247 1.2 bouyer void
248 1.2 bouyer xpq_queue_pt_switch(paddr_t pa)
249 1.2 bouyer {
250 1.2 bouyer struct mmuext_op op;
251 1.2 bouyer xpq_flush_queue();
252 1.2 bouyer
253 1.6 bouyer XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
254 1.6 bouyer (int64_t)pa, (int64_t)pa));
255 1.2 bouyer op.cmd = MMUEXT_NEW_BASEPTR;
256 1.2 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
257 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
258 1.2 bouyer panic("xpq_queue_pt_switch");
259 1.2 bouyer }
260 1.2 bouyer
261 1.2 bouyer void
262 1.24 jym xpq_queue_pin_table(paddr_t pa, int lvl)
263 1.2 bouyer {
264 1.2 bouyer struct mmuext_op op;
265 1.29 cherry
266 1.2 bouyer xpq_flush_queue();
267 1.2 bouyer
268 1.24 jym XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
269 1.24 jym lvl + 1, pa));
270 1.2 bouyer
271 1.6 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
272 1.24 jym op.cmd = lvl;
273 1.6 bouyer
274 1.6 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
275 1.6 bouyer panic("xpq_queue_pin_table");
276 1.6 bouyer }
277 1.6 bouyer
278 1.2 bouyer void
279 1.2 bouyer xpq_queue_unpin_table(paddr_t pa)
280 1.2 bouyer {
281 1.2 bouyer struct mmuext_op op;
282 1.29 cherry
283 1.2 bouyer xpq_flush_queue();
284 1.2 bouyer
285 1.24 jym XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
286 1.2 bouyer op.arg1.mfn = pa >> PAGE_SHIFT;
287 1.2 bouyer op.cmd = MMUEXT_UNPIN_TABLE;
288 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
289 1.2 bouyer panic("xpq_queue_unpin_table");
290 1.2 bouyer }
291 1.2 bouyer
292 1.2 bouyer void
293 1.2 bouyer xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
294 1.2 bouyer {
295 1.2 bouyer struct mmuext_op op;
296 1.29 cherry
297 1.2 bouyer xpq_flush_queue();
298 1.2 bouyer
299 1.2 bouyer XENPRINTK2(("xpq_queue_set_ldt\n"));
300 1.2 bouyer KASSERT(va == (va & ~PAGE_MASK));
301 1.2 bouyer op.cmd = MMUEXT_SET_LDT;
302 1.2 bouyer op.arg1.linear_addr = va;
303 1.2 bouyer op.arg2.nr_ents = entries;
304 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
305 1.2 bouyer panic("xpq_queue_set_ldt");
306 1.2 bouyer }
307 1.2 bouyer
308 1.2 bouyer void
309 1.8 cegger xpq_queue_tlb_flush(void)
310 1.2 bouyer {
311 1.2 bouyer struct mmuext_op op;
312 1.29 cherry
313 1.2 bouyer xpq_flush_queue();
314 1.2 bouyer
315 1.2 bouyer XENPRINTK2(("xpq_queue_tlb_flush\n"));
316 1.2 bouyer op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
317 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
318 1.2 bouyer panic("xpq_queue_tlb_flush");
319 1.2 bouyer }
320 1.2 bouyer
321 1.2 bouyer void
322 1.8 cegger xpq_flush_cache(void)
323 1.2 bouyer {
324 1.2 bouyer struct mmuext_op op;
325 1.29 cherry int s = splvm(), err;
326 1.29 cherry
327 1.2 bouyer xpq_flush_queue();
328 1.2 bouyer
329 1.2 bouyer XENPRINTK2(("xpq_queue_flush_cache\n"));
330 1.2 bouyer op.cmd = MMUEXT_FLUSH_CACHE;
331 1.33 jym if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0) {
332 1.33 jym panic("xpq_flush_cache, err %d", err);
333 1.33 jym }
334 1.29 cherry splx(s); /* XXX: removeme */
335 1.2 bouyer }
336 1.2 bouyer
337 1.2 bouyer void
338 1.2 bouyer xpq_queue_invlpg(vaddr_t va)
339 1.2 bouyer {
340 1.2 bouyer struct mmuext_op op;
341 1.2 bouyer xpq_flush_queue();
342 1.2 bouyer
343 1.19 jym XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
344 1.2 bouyer op.cmd = MMUEXT_INVLPG_LOCAL;
345 1.2 bouyer op.arg1.linear_addr = (va & ~PAGE_MASK);
346 1.2 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
347 1.2 bouyer panic("xpq_queue_invlpg");
348 1.2 bouyer }
349 1.2 bouyer
350 1.29 cherry void
351 1.29 cherry xen_mcast_invlpg(vaddr_t va, uint32_t cpumask)
352 1.29 cherry {
353 1.29 cherry mmuext_op_t op;
354 1.29 cherry
355 1.29 cherry /* Flush pending page updates */
356 1.29 cherry xpq_flush_queue();
357 1.29 cherry
358 1.29 cherry op.cmd = MMUEXT_INVLPG_MULTI;
359 1.29 cherry op.arg1.linear_addr = va;
360 1.29 cherry op.arg2.vcpumask = &cpumask;
361 1.29 cherry
362 1.29 cherry if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
363 1.29 cherry panic("xpq_queue_invlpg_all");
364 1.29 cherry }
365 1.29 cherry
366 1.29 cherry return;
367 1.29 cherry }
368 1.29 cherry
369 1.29 cherry void
370 1.29 cherry xen_bcast_invlpg(vaddr_t va)
371 1.29 cherry {
372 1.29 cherry mmuext_op_t op;
373 1.29 cherry
374 1.29 cherry /* Flush pending page updates */
375 1.29 cherry xpq_flush_queue();
376 1.29 cherry
377 1.29 cherry op.cmd = MMUEXT_INVLPG_ALL;
378 1.29 cherry op.arg1.linear_addr = va;
379 1.29 cherry
380 1.29 cherry if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
381 1.29 cherry panic("xpq_queue_invlpg_all");
382 1.29 cherry }
383 1.29 cherry
384 1.29 cherry return;
385 1.29 cherry }
386 1.29 cherry
387 1.29 cherry /* This is a synchronous call. */
388 1.29 cherry void
389 1.29 cherry xen_mcast_tlbflush(uint32_t cpumask)
390 1.29 cherry {
391 1.29 cherry mmuext_op_t op;
392 1.29 cherry
393 1.29 cherry /* Flush pending page updates */
394 1.29 cherry xpq_flush_queue();
395 1.29 cherry
396 1.29 cherry op.cmd = MMUEXT_TLB_FLUSH_MULTI;
397 1.29 cherry op.arg2.vcpumask = &cpumask;
398 1.29 cherry
399 1.29 cherry if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
400 1.29 cherry panic("xpq_queue_invlpg_all");
401 1.29 cherry }
402 1.29 cherry
403 1.29 cherry return;
404 1.29 cherry }
405 1.29 cherry
406 1.29 cherry /* This is a synchronous call. */
407 1.29 cherry void
408 1.29 cherry xen_bcast_tlbflush(void)
409 1.29 cherry {
410 1.29 cherry mmuext_op_t op;
411 1.29 cherry
412 1.29 cherry /* Flush pending page updates */
413 1.29 cherry xpq_flush_queue();
414 1.29 cherry
415 1.29 cherry op.cmd = MMUEXT_TLB_FLUSH_ALL;
416 1.29 cherry
417 1.29 cherry if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
418 1.29 cherry panic("xpq_queue_invlpg_all");
419 1.29 cherry }
420 1.29 cherry
421 1.29 cherry return;
422 1.29 cherry }
423 1.29 cherry
424 1.29 cherry /* This is a synchronous call. */
425 1.29 cherry void
426 1.29 cherry xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, uint32_t cpumask)
427 1.29 cherry {
428 1.29 cherry KASSERT(eva > sva);
429 1.29 cherry
430 1.29 cherry /* Flush pending page updates */
431 1.29 cherry xpq_flush_queue();
432 1.29 cherry
433 1.29 cherry /* Align to nearest page boundary */
434 1.29 cherry sva &= ~PAGE_MASK;
435 1.29 cherry eva &= ~PAGE_MASK;
436 1.29 cherry
437 1.29 cherry for ( ; sva <= eva; sva += PAGE_SIZE) {
438 1.29 cherry xen_mcast_invlpg(sva, cpumask);
439 1.29 cherry }
440 1.29 cherry
441 1.29 cherry return;
442 1.29 cherry }
443 1.29 cherry
444 1.29 cherry /* This is a synchronous call. */
445 1.29 cherry void
446 1.29 cherry xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
447 1.29 cherry {
448 1.29 cherry KASSERT(eva > sva);
449 1.29 cherry
450 1.29 cherry /* Flush pending page updates */
451 1.29 cherry xpq_flush_queue();
452 1.29 cherry
453 1.29 cherry /* Align to nearest page boundary */
454 1.29 cherry sva &= ~PAGE_MASK;
455 1.29 cherry eva &= ~PAGE_MASK;
456 1.29 cherry
457 1.29 cherry for ( ; sva <= eva; sva += PAGE_SIZE) {
458 1.29 cherry xen_bcast_invlpg(sva);
459 1.29 cherry }
460 1.29 cherry
461 1.29 cherry return;
462 1.29 cherry }
463 1.29 cherry
464 1.2 bouyer int
465 1.6 bouyer xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
466 1.2 bouyer {
467 1.2 bouyer mmu_update_t op;
468 1.2 bouyer int ok;
469 1.29 cherry
470 1.2 bouyer xpq_flush_queue();
471 1.2 bouyer
472 1.6 bouyer op.ptr = ptr;
473 1.2 bouyer op.val = val;
474 1.2 bouyer if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
475 1.2 bouyer return EFAULT;
476 1.2 bouyer return (0);
477 1.2 bouyer }
478 1.2 bouyer
479 1.2 bouyer #ifdef XENDEBUG
480 1.2 bouyer void
481 1.8 cegger xpq_debug_dump(void)
482 1.2 bouyer {
483 1.2 bouyer int i;
484 1.2 bouyer
485 1.35 cherry mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
486 1.35 cherry int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
487 1.35 cherry
488 1.2 bouyer XENPRINTK2(("idx: %d\n", xpq_idx));
489 1.2 bouyer for (i = 0; i < xpq_idx; i++) {
490 1.13 cegger snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
491 1.19 jym xpq_queue[i].ptr, xpq_queue[i].val);
492 1.2 bouyer if (++i < xpq_idx)
493 1.13 cegger snprintf(XBUF + strlen(XBUF),
494 1.13 cegger sizeof(XBUF) - strlen(XBUF),
495 1.13 cegger "%" PRIx64 " %08" PRIx64,
496 1.19 jym xpq_queue[i].ptr, xpq_queue[i].val);
497 1.2 bouyer if (++i < xpq_idx)
498 1.13 cegger snprintf(XBUF + strlen(XBUF),
499 1.13 cegger sizeof(XBUF) - strlen(XBUF),
500 1.13 cegger "%" PRIx64 " %08" PRIx64,
501 1.19 jym xpq_queue[i].ptr, xpq_queue[i].val);
502 1.2 bouyer if (++i < xpq_idx)
503 1.13 cegger snprintf(XBUF + strlen(XBUF),
504 1.13 cegger sizeof(XBUF) - strlen(XBUF),
505 1.13 cegger "%" PRIx64 " %08" PRIx64,
506 1.19 jym xpq_queue[i].ptr, xpq_queue[i].val);
507 1.2 bouyer XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
508 1.2 bouyer }
509 1.2 bouyer }
510 1.2 bouyer #endif
511 1.2 bouyer
512 1.2 bouyer
513 1.2 bouyer extern volatile struct xencons_interface *xencons_interface; /* XXX */
514 1.2 bouyer extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
515 1.2 bouyer
516 1.2 bouyer static void xen_bt_set_readonly (vaddr_t);
517 1.2 bouyer static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
518 1.2 bouyer
519 1.2 bouyer /* How many PDEs ? */
520 1.2 bouyer #if L2_SLOT_KERNBASE > 0
521 1.2 bouyer #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
522 1.2 bouyer #else
523 1.2 bouyer #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
524 1.2 bouyer #endif
525 1.2 bouyer
526 1.2 bouyer /*
527 1.2 bouyer * Construct and switch to new pagetables
528 1.2 bouyer * first_avail is the first vaddr we can use after
529 1.2 bouyer * we get rid of Xen pagetables
530 1.2 bouyer */
531 1.2 bouyer
532 1.2 bouyer vaddr_t xen_pmap_bootstrap (void);
533 1.2 bouyer
534 1.2 bouyer /*
535 1.2 bouyer * Function to get rid of Xen bootstrap tables
536 1.2 bouyer */
537 1.2 bouyer
538 1.6 bouyer /* How many PDP do we need: */
539 1.6 bouyer #ifdef PAE
540 1.6 bouyer /*
541 1.6 bouyer * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
542 1.6 bouyer * all of them mapped by the L3 page. We also need a shadow page
543 1.6 bouyer * for L3[3].
544 1.6 bouyer */
545 1.6 bouyer static const int l2_4_count = 6;
546 1.6 bouyer #else
547 1.6 bouyer static const int l2_4_count = PTP_LEVELS - 1;
548 1.6 bouyer #endif
549 1.6 bouyer
550 1.2 bouyer vaddr_t
551 1.8 cegger xen_pmap_bootstrap(void)
552 1.2 bouyer {
553 1.4 bouyer int count, oldcount;
554 1.4 bouyer long mapsize;
555 1.2 bouyer vaddr_t bootstrap_tables, init_tables;
556 1.2 bouyer
557 1.35 cherry memset(xpq_idx_array, 0, sizeof xpq_idx_array);
558 1.35 cherry
559 1.6 bouyer xpmap_phys_to_machine_mapping =
560 1.6 bouyer (unsigned long *)xen_start_info.mfn_list;
561 1.2 bouyer init_tables = xen_start_info.pt_base;
562 1.2 bouyer __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
563 1.2 bouyer
564 1.2 bouyer /* Space after Xen boostrap tables should be free */
565 1.2 bouyer bootstrap_tables = xen_start_info.pt_base +
566 1.2 bouyer (xen_start_info.nr_pt_frames * PAGE_SIZE);
567 1.2 bouyer
568 1.4 bouyer /*
569 1.4 bouyer * Calculate how many space we need
570 1.4 bouyer * first everything mapped before the Xen bootstrap tables
571 1.4 bouyer */
572 1.4 bouyer mapsize = init_tables - KERNTEXTOFF;
573 1.4 bouyer /* after the tables we'll have:
574 1.4 bouyer * - UAREA
575 1.4 bouyer * - dummy user PGD (x86_64)
576 1.4 bouyer * - HYPERVISOR_shared_info
577 1.4 bouyer * - ISA I/O mem (if needed)
578 1.4 bouyer */
579 1.4 bouyer mapsize += UPAGES * NBPG;
580 1.4 bouyer #ifdef __x86_64__
581 1.4 bouyer mapsize += NBPG;
582 1.4 bouyer #endif
583 1.4 bouyer mapsize += NBPG;
584 1.2 bouyer
585 1.2 bouyer #ifdef DOM0OPS
586 1.10 cegger if (xendomain_is_dom0()) {
587 1.2 bouyer /* space for ISA I/O mem */
588 1.4 bouyer mapsize += IOM_SIZE;
589 1.4 bouyer }
590 1.4 bouyer #endif
591 1.4 bouyer /* at this point mapsize doens't include the table size */
592 1.4 bouyer
593 1.4 bouyer #ifdef __x86_64__
594 1.4 bouyer count = TABLE_L2_ENTRIES;
595 1.4 bouyer #else
596 1.4 bouyer count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
597 1.4 bouyer #endif /* __x86_64__ */
598 1.4 bouyer
599 1.4 bouyer /* now compute how many L2 pages we need exactly */
600 1.4 bouyer XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
601 1.4 bouyer while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
602 1.4 bouyer ((long)count << L2_SHIFT) + KERNBASE) {
603 1.4 bouyer count++;
604 1.2 bouyer }
605 1.4 bouyer #ifndef __x86_64__
606 1.5 bouyer /*
607 1.5 bouyer * one more L2 page: we'll alocate several pages after kva_start
608 1.5 bouyer * in pmap_bootstrap() before pmap_growkernel(), which have not been
609 1.5 bouyer * counted here. It's not a big issue to allocate one more L2 as
610 1.5 bouyer * pmap_growkernel() will be called anyway.
611 1.5 bouyer */
612 1.5 bouyer count++;
613 1.4 bouyer nkptp[1] = count;
614 1.2 bouyer #endif
615 1.2 bouyer
616 1.4 bouyer /*
617 1.4 bouyer * install bootstrap pages. We may need more L2 pages than will
618 1.4 bouyer * have the final table here, as it's installed after the final table
619 1.4 bouyer */
620 1.4 bouyer oldcount = count;
621 1.4 bouyer
622 1.4 bouyer bootstrap_again:
623 1.4 bouyer XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
624 1.2 bouyer /*
625 1.2 bouyer * Xen space we'll reclaim may not be enough for our new page tables,
626 1.2 bouyer * move bootstrap tables if necessary
627 1.2 bouyer */
628 1.4 bouyer if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
629 1.2 bouyer bootstrap_tables = init_tables +
630 1.4 bouyer ((count + l2_4_count) * PAGE_SIZE);
631 1.4 bouyer /* make sure we have enough to map the bootstrap_tables */
632 1.4 bouyer if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
633 1.4 bouyer ((long)oldcount << L2_SHIFT) + KERNBASE) {
634 1.4 bouyer oldcount++;
635 1.4 bouyer goto bootstrap_again;
636 1.4 bouyer }
637 1.2 bouyer
638 1.2 bouyer /* Create temporary tables */
639 1.2 bouyer xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
640 1.4 bouyer xen_start_info.nr_pt_frames, oldcount, 0);
641 1.2 bouyer
642 1.2 bouyer /* Create final tables */
643 1.2 bouyer xen_bootstrap_tables(bootstrap_tables, init_tables,
644 1.4 bouyer oldcount + l2_4_count, count, 1);
645 1.2 bouyer
646 1.4 bouyer /* zero out free space after tables */
647 1.4 bouyer memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
648 1.4 bouyer (UPAGES + 1) * NBPG);
649 1.28 rmind
650 1.28 rmind /* Finally, flush TLB. */
651 1.28 rmind xpq_queue_tlb_flush();
652 1.28 rmind
653 1.4 bouyer return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
654 1.2 bouyer }
655 1.2 bouyer
656 1.2 bouyer /*
657 1.2 bouyer * Build a new table and switch to it
658 1.2 bouyer * old_count is # of old tables (including PGD, PDTPE and PDE)
659 1.2 bouyer * new_count is # of new tables (PTE only)
660 1.2 bouyer * we assume areas don't overlap
661 1.2 bouyer */
662 1.2 bouyer static void
663 1.2 bouyer xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
664 1.2 bouyer int old_count, int new_count, int final)
665 1.2 bouyer {
666 1.2 bouyer pd_entry_t *pdtpe, *pde, *pte;
667 1.2 bouyer pd_entry_t *cur_pgd, *bt_pgd;
668 1.6 bouyer paddr_t addr;
669 1.6 bouyer vaddr_t page, avail, text_end, map_end;
670 1.2 bouyer int i;
671 1.2 bouyer extern char __data_start;
672 1.2 bouyer
673 1.19 jym __PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR ","
674 1.19 jym " %d, %d)\n",
675 1.2 bouyer old_pgd, new_pgd, old_count, new_count));
676 1.2 bouyer text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
677 1.2 bouyer /*
678 1.2 bouyer * size of R/W area after kernel text:
679 1.2 bouyer * xencons_interface (if present)
680 1.2 bouyer * xenstore_interface (if present)
681 1.6 bouyer * table pages (new_count + l2_4_count entries)
682 1.2 bouyer * extra mappings (only when final is true):
683 1.4 bouyer * UAREA
684 1.4 bouyer * dummy user PGD (x86_64 only)/gdt page (i386 only)
685 1.2 bouyer * HYPERVISOR_shared_info
686 1.2 bouyer * ISA I/O mem (if needed)
687 1.2 bouyer */
688 1.6 bouyer map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
689 1.2 bouyer if (final) {
690 1.4 bouyer map_end += (UPAGES + 1) * NBPG;
691 1.4 bouyer HYPERVISOR_shared_info = (shared_info_t *)map_end;
692 1.2 bouyer map_end += NBPG;
693 1.2 bouyer }
694 1.4 bouyer /*
695 1.4 bouyer * we always set atdevbase, as it's used by init386 to find the first
696 1.4 bouyer * available VA. map_end is updated only if we are dom0, so
697 1.4 bouyer * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
698 1.4 bouyer * this case.
699 1.4 bouyer */
700 1.4 bouyer if (final)
701 1.4 bouyer atdevbase = map_end;
702 1.2 bouyer #ifdef DOM0OPS
703 1.10 cegger if (final && xendomain_is_dom0()) {
704 1.2 bouyer /* ISA I/O mem */
705 1.2 bouyer map_end += IOM_SIZE;
706 1.2 bouyer }
707 1.2 bouyer #endif /* DOM0OPS */
708 1.2 bouyer
709 1.2 bouyer __PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
710 1.2 bouyer text_end, map_end));
711 1.19 jym __PRINTK(("console %#lx ", xen_start_info.console_mfn));
712 1.19 jym __PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
713 1.2 bouyer
714 1.2 bouyer /*
715 1.2 bouyer * Create bootstrap page tables
716 1.2 bouyer * What we need:
717 1.2 bouyer * - a PGD (level 4)
718 1.2 bouyer * - a PDTPE (level 3)
719 1.2 bouyer * - a PDE (level2)
720 1.2 bouyer * - some PTEs (level 1)
721 1.2 bouyer */
722 1.2 bouyer
723 1.2 bouyer cur_pgd = (pd_entry_t *) old_pgd;
724 1.2 bouyer bt_pgd = (pd_entry_t *) new_pgd;
725 1.2 bouyer memset (bt_pgd, 0, PAGE_SIZE);
726 1.2 bouyer avail = new_pgd + PAGE_SIZE;
727 1.4 bouyer #if PTP_LEVELS > 3
728 1.2 bouyer /* Install level 3 */
729 1.2 bouyer pdtpe = (pd_entry_t *) avail;
730 1.2 bouyer memset (pdtpe, 0, PAGE_SIZE);
731 1.2 bouyer avail += PAGE_SIZE;
732 1.2 bouyer
733 1.6 bouyer addr = ((u_long) pdtpe) - KERNBASE;
734 1.2 bouyer bt_pgd[pl4_pi(KERNTEXTOFF)] =
735 1.4 bouyer xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
736 1.2 bouyer
737 1.19 jym __PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
738 1.19 jym " -> L4[%#x]\n",
739 1.19 jym pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF)));
740 1.4 bouyer #else
741 1.4 bouyer pdtpe = bt_pgd;
742 1.4 bouyer #endif /* PTP_LEVELS > 3 */
743 1.2 bouyer
744 1.4 bouyer #if PTP_LEVELS > 2
745 1.2 bouyer /* Level 2 */
746 1.2 bouyer pde = (pd_entry_t *) avail;
747 1.2 bouyer memset(pde, 0, PAGE_SIZE);
748 1.2 bouyer avail += PAGE_SIZE;
749 1.2 bouyer
750 1.6 bouyer addr = ((u_long) pde) - KERNBASE;
751 1.2 bouyer pdtpe[pl3_pi(KERNTEXTOFF)] =
752 1.6 bouyer xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
753 1.19 jym __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
754 1.19 jym " -> L3[%#x]\n",
755 1.19 jym pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF)));
756 1.6 bouyer #elif defined(PAE)
757 1.6 bouyer /* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
758 1.6 bouyer pde = (pd_entry_t *) avail;
759 1.6 bouyer memset(pde, 0, PAGE_SIZE * 5);
760 1.6 bouyer avail += PAGE_SIZE * 5;
761 1.6 bouyer addr = ((u_long) pde) - KERNBASE;
762 1.6 bouyer /*
763 1.6 bouyer * enter L2 pages in the L3.
764 1.6 bouyer * The real L2 kernel PD will be the last one (so that
765 1.6 bouyer * pde[L2_SLOT_KERN] always point to the shadow).
766 1.6 bouyer */
767 1.6 bouyer for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
768 1.6 bouyer /*
769 1.25 jym * Xen doesn't want R/W mappings in L3 entries, it'll add it
770 1.6 bouyer * itself.
771 1.6 bouyer */
772 1.6 bouyer pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
773 1.19 jym __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
774 1.19 jym " -> L3[%#x]\n",
775 1.19 jym (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i));
776 1.6 bouyer }
777 1.6 bouyer addr += PAGE_SIZE;
778 1.6 bouyer pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
779 1.19 jym __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
780 1.19 jym " -> L3[%#x]\n",
781 1.19 jym (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3));
782 1.6 bouyer
783 1.6 bouyer #else /* PAE */
784 1.4 bouyer pde = bt_pgd;
785 1.6 bouyer #endif /* PTP_LEVELS > 2 */
786 1.2 bouyer
787 1.2 bouyer /* Level 1 */
788 1.2 bouyer page = KERNTEXTOFF;
789 1.2 bouyer for (i = 0; i < new_count; i ++) {
790 1.6 bouyer vaddr_t cur_page = page;
791 1.2 bouyer
792 1.2 bouyer pte = (pd_entry_t *) avail;
793 1.2 bouyer avail += PAGE_SIZE;
794 1.2 bouyer
795 1.2 bouyer memset(pte, 0, PAGE_SIZE);
796 1.2 bouyer while (pl2_pi(page) == pl2_pi (cur_page)) {
797 1.2 bouyer if (page >= map_end) {
798 1.2 bouyer /* not mapped at all */
799 1.2 bouyer pte[pl1_pi(page)] = 0;
800 1.2 bouyer page += PAGE_SIZE;
801 1.2 bouyer continue;
802 1.2 bouyer }
803 1.2 bouyer pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
804 1.2 bouyer if (page == (vaddr_t)HYPERVISOR_shared_info) {
805 1.2 bouyer pte[pl1_pi(page)] = xen_start_info.shared_info;
806 1.2 bouyer __PRINTK(("HYPERVISOR_shared_info "
807 1.19 jym "va %#lx pte %#" PRIxPADDR "\n",
808 1.19 jym HYPERVISOR_shared_info, pte[pl1_pi(page)]));
809 1.2 bouyer }
810 1.7 bouyer if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
811 1.12 cegger == xen_start_info.console.domU.mfn) {
812 1.2 bouyer xencons_interface = (void *)page;
813 1.19 jym pte[pl1_pi(page)] = xen_start_info.console_mfn;
814 1.6 bouyer pte[pl1_pi(page)] <<= PAGE_SHIFT;
815 1.2 bouyer __PRINTK(("xencons_interface "
816 1.19 jym "va %#lx pte %#" PRIxPADDR "\n",
817 1.19 jym xencons_interface, pte[pl1_pi(page)]));
818 1.2 bouyer }
819 1.7 bouyer if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
820 1.7 bouyer == xen_start_info.store_mfn) {
821 1.2 bouyer xenstore_interface = (void *)page;
822 1.6 bouyer pte[pl1_pi(page)] = xen_start_info.store_mfn;
823 1.6 bouyer pte[pl1_pi(page)] <<= PAGE_SHIFT;
824 1.2 bouyer __PRINTK(("xenstore_interface "
825 1.19 jym "va %#lx pte %#" PRIxPADDR "\n",
826 1.19 jym xenstore_interface, pte[pl1_pi(page)]));
827 1.2 bouyer }
828 1.2 bouyer #ifdef DOM0OPS
829 1.2 bouyer if (page >= (vaddr_t)atdevbase &&
830 1.2 bouyer page < (vaddr_t)atdevbase + IOM_SIZE) {
831 1.2 bouyer pte[pl1_pi(page)] =
832 1.2 bouyer IOM_BEGIN + (page - (vaddr_t)atdevbase);
833 1.2 bouyer }
834 1.2 bouyer #endif
835 1.4 bouyer pte[pl1_pi(page)] |= PG_k | PG_V;
836 1.2 bouyer if (page < text_end) {
837 1.2 bouyer /* map kernel text RO */
838 1.2 bouyer pte[pl1_pi(page)] |= 0;
839 1.2 bouyer } else if (page >= old_pgd
840 1.2 bouyer && page < old_pgd + (old_count * PAGE_SIZE)) {
841 1.2 bouyer /* map old page tables RO */
842 1.2 bouyer pte[pl1_pi(page)] |= 0;
843 1.2 bouyer } else if (page >= new_pgd &&
844 1.6 bouyer page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
845 1.2 bouyer /* map new page tables RO */
846 1.2 bouyer pte[pl1_pi(page)] |= 0;
847 1.2 bouyer } else {
848 1.2 bouyer /* map page RW */
849 1.2 bouyer pte[pl1_pi(page)] |= PG_RW;
850 1.2 bouyer }
851 1.6 bouyer
852 1.9 tron if ((page >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
853 1.9 tron || page >= new_pgd) {
854 1.19 jym __PRINTK(("va %#lx pa %#lx "
855 1.19 jym "entry 0x%" PRIxPADDR " -> L1[%#x]\n",
856 1.2 bouyer page, page - KERNBASE,
857 1.19 jym pte[pl1_pi(page)], pl1_pi(page)));
858 1.9 tron }
859 1.2 bouyer page += PAGE_SIZE;
860 1.2 bouyer }
861 1.2 bouyer
862 1.6 bouyer addr = ((u_long) pte) - KERNBASE;
863 1.2 bouyer pde[pl2_pi(cur_page)] =
864 1.4 bouyer xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
865 1.19 jym __PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
866 1.19 jym " -> L2[%#x]\n",
867 1.19 jym pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
868 1.2 bouyer /* Mark readonly */
869 1.2 bouyer xen_bt_set_readonly((vaddr_t) pte);
870 1.2 bouyer }
871 1.2 bouyer
872 1.2 bouyer /* Install recursive page tables mapping */
873 1.6 bouyer #ifdef PAE
874 1.6 bouyer /*
875 1.6 bouyer * we need a shadow page for the kernel's L2 page
876 1.6 bouyer * The real L2 kernel PD will be the last one (so that
877 1.6 bouyer * pde[L2_SLOT_KERN] always point to the shadow.
878 1.6 bouyer */
879 1.6 bouyer memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
880 1.6 bouyer pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG];
881 1.6 bouyer pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE;
882 1.6 bouyer
883 1.6 bouyer /*
884 1.6 bouyer * We don't enter a recursive entry from the L3 PD. Instead,
885 1.6 bouyer * we enter the first 4 L2 pages, which includes the kernel's L2
886 1.6 bouyer * shadow. But we have to entrer the shadow after switching
887 1.6 bouyer * %cr3, or Xen will refcount some PTE with the wrong type.
888 1.6 bouyer */
889 1.6 bouyer addr = (u_long)pde - KERNBASE;
890 1.6 bouyer for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
891 1.6 bouyer pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
892 1.19 jym __PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR
893 1.19 jym " entry %#" PRIxPADDR "\n",
894 1.19 jym (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i,
895 1.19 jym addr, pde[PDIR_SLOT_PTE + i]));
896 1.6 bouyer }
897 1.6 bouyer #if 0
898 1.6 bouyer addr += PAGE_SIZE; /* point to shadow L2 */
899 1.6 bouyer pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
900 1.6 bouyer __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
901 1.6 bouyer (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
902 1.6 bouyer (int64_t)pde[PDIR_SLOT_PTE + 3]));
903 1.6 bouyer #endif
904 1.14 jym /* Mark tables RO, and pin the kernel's shadow as L2 */
905 1.6 bouyer addr = (u_long)pde - KERNBASE;
906 1.6 bouyer for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
907 1.6 bouyer xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
908 1.6 bouyer if (i == 2 || i == 3)
909 1.6 bouyer continue;
910 1.6 bouyer #if 0
911 1.6 bouyer __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
912 1.24 jym xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
913 1.6 bouyer #endif
914 1.6 bouyer }
915 1.6 bouyer if (final) {
916 1.6 bouyer addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
917 1.19 jym __PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr));
918 1.24 jym xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
919 1.6 bouyer }
920 1.6 bouyer #if 0
921 1.6 bouyer addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
922 1.6 bouyer __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
923 1.24 jym xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
924 1.6 bouyer #endif
925 1.6 bouyer #else /* PAE */
926 1.6 bouyer /* recursive entry in higher-level PD */
927 1.2 bouyer bt_pgd[PDIR_SLOT_PTE] =
928 1.4 bouyer xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
929 1.19 jym __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR
930 1.19 jym " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE,
931 1.19 jym bt_pgd[PDIR_SLOT_PTE]));
932 1.2 bouyer /* Mark tables RO */
933 1.2 bouyer xen_bt_set_readonly((vaddr_t) pde);
934 1.6 bouyer #endif
935 1.6 bouyer #if PTP_LEVELS > 2 || defined(PAE)
936 1.2 bouyer xen_bt_set_readonly((vaddr_t) pdtpe);
937 1.4 bouyer #endif
938 1.4 bouyer #if PTP_LEVELS > 3
939 1.2 bouyer xen_bt_set_readonly(new_pgd);
940 1.4 bouyer #endif
941 1.2 bouyer /* Pin the PGD */
942 1.26 jym __PRINTK(("pin PGD: %"PRIxVADDR"\n", new_pgd - KERNBASE));
943 1.24 jym #ifdef __x86_64__
944 1.24 jym xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
945 1.24 jym #elif PAE
946 1.6 bouyer xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
947 1.6 bouyer #else
948 1.24 jym xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
949 1.6 bouyer #endif
950 1.21 jym
951 1.4 bouyer /* Save phys. addr of PDP, for libkvm. */
952 1.6 bouyer #ifdef PAE
953 1.21 jym PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
954 1.21 jym #else
955 1.21 jym PDPpaddr = (u_long)new_pgd - KERNBASE;
956 1.21 jym #endif
957 1.21 jym
958 1.2 bouyer /* Switch to new tables */
959 1.14 jym __PRINTK(("switch to PGD\n"));
960 1.2 bouyer xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
961 1.19 jym __PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n",
962 1.19 jym bt_pgd[PDIR_SLOT_PTE]));
963 1.21 jym
964 1.6 bouyer #ifdef PAE
965 1.6 bouyer if (final) {
966 1.21 jym /* save the address of the L3 page */
967 1.21 jym cpu_info_primary.ci_pae_l3_pdir = pdtpe;
968 1.21 jym cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
969 1.21 jym
970 1.6 bouyer /* now enter kernel's PTE mappings */
971 1.6 bouyer addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
972 1.6 bouyer xpq_queue_pte_update(
973 1.6 bouyer xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
974 1.6 bouyer xpmap_ptom_masked(addr) | PG_k | PG_V);
975 1.6 bouyer xpq_flush_queue();
976 1.6 bouyer }
977 1.6 bouyer #endif
978 1.6 bouyer
979 1.2 bouyer /* Now we can safely reclaim space taken by old tables */
980 1.2 bouyer
981 1.14 jym __PRINTK(("unpin old PGD\n"));
982 1.2 bouyer /* Unpin old PGD */
983 1.2 bouyer xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
984 1.2 bouyer /* Mark old tables RW */
985 1.2 bouyer page = old_pgd;
986 1.2 bouyer addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
987 1.2 bouyer addr = xpmap_mtop(addr);
988 1.6 bouyer pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
989 1.2 bouyer pte += pl1_pi(page);
990 1.19 jym __PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n",
991 1.19 jym pde[pl2_pi(page)], addr, (long)pte));
992 1.2 bouyer while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
993 1.6 bouyer addr = xpmap_ptom(((u_long) pte) - KERNBASE);
994 1.19 jym XENPRINTK(("addr %#" PRIxPADDR " pte %#lx "
995 1.19 jym "*pte %#" PRIxPADDR "\n",
996 1.19 jym addr, (long)pte, *pte));
997 1.6 bouyer xpq_queue_pte_update(addr, *pte | PG_RW);
998 1.2 bouyer page += PAGE_SIZE;
999 1.2 bouyer /*
1000 1.2 bouyer * Our ptes are contiguous
1001 1.2 bouyer * so it's safe to just "++" here
1002 1.2 bouyer */
1003 1.2 bouyer pte++;
1004 1.2 bouyer }
1005 1.2 bouyer xpq_flush_queue();
1006 1.2 bouyer }
1007 1.2 bouyer
1008 1.2 bouyer
1009 1.2 bouyer /*
1010 1.2 bouyer * Bootstrap helper functions
1011 1.2 bouyer */
1012 1.2 bouyer
1013 1.2 bouyer /*
1014 1.2 bouyer * Mark a page readonly
1015 1.2 bouyer * XXX: assuming vaddr = paddr + KERNBASE
1016 1.2 bouyer */
1017 1.2 bouyer
1018 1.2 bouyer static void
1019 1.2 bouyer xen_bt_set_readonly (vaddr_t page)
1020 1.2 bouyer {
1021 1.2 bouyer pt_entry_t entry;
1022 1.2 bouyer
1023 1.2 bouyer entry = xpmap_ptom_masked(page - KERNBASE);
1024 1.4 bouyer entry |= PG_k | PG_V;
1025 1.2 bouyer
1026 1.2 bouyer HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
1027 1.2 bouyer }
1028 1.4 bouyer
1029 1.4 bouyer #ifdef __x86_64__
1030 1.4 bouyer void
1031 1.4 bouyer xen_set_user_pgd(paddr_t page)
1032 1.4 bouyer {
1033 1.4 bouyer struct mmuext_op op;
1034 1.4 bouyer int s = splvm();
1035 1.4 bouyer
1036 1.4 bouyer xpq_flush_queue();
1037 1.4 bouyer op.cmd = MMUEXT_NEW_USER_BASEPTR;
1038 1.34 jym op.arg1.mfn = pfn_to_mfn(page >> PAGE_SHIFT);
1039 1.4 bouyer if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
1040 1.4 bouyer panic("xen_set_user_pgd: failed to install new user page"
1041 1.19 jym " directory %#" PRIxPADDR, page);
1042 1.4 bouyer splx(s);
1043 1.4 bouyer }
1044 1.4 bouyer #endif /* __x86_64__ */
1045