x86_xpmap.c revision 1.54 1 /* $NetBSD: x86_xpmap.c,v 1.54 2016/05/29 17:06:17 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /*
20 * Copyright (c) 2006, 2007 Manuel Bouyer.
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
24 * are met:
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
32 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
34 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 */
43
44 /*
45 *
46 * Copyright (c) 2004 Christian Limpach.
47 * All rights reserved.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */
69
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.54 2016/05/29 17:06:17 bouyer Exp $");
73
74 #include "opt_xen.h"
75 #include "opt_ddb.h"
76 #include "ksyms.h"
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/mutex.h>
81 #include <sys/cpu.h>
82
83 #include <uvm/uvm.h>
84
85 #include <x86/pmap.h>
86 #include <machine/gdt.h>
87 #include <xen/xenfunc.h>
88
89 #include <dev/isa/isareg.h>
90 #include <machine/isa_machdep.h>
91
92 #undef XENDEBUG
93 /* #define XENDEBUG_SYNC */
94 /* #define XENDEBUG_LOW */
95
96 #ifdef XENDEBUG
97 #define XENPRINTF(x) printf x
98 #define XENPRINTK(x) printk x
99 #define XENPRINTK2(x) /* printk x */
100
101 static char XBUF[256];
102 #else
103 #define XENPRINTF(x)
104 #define XENPRINTK(x)
105 #define XENPRINTK2(x)
106 #endif
107 #define PRINTF(x) printf x
108 #define PRINTK(x) printk x
109
110 volatile shared_info_t *HYPERVISOR_shared_info;
111 /* Xen requires the start_info struct to be page aligned */
112 union start_info_union start_info_union __aligned(PAGE_SIZE);
113 unsigned long *xpmap_phys_to_machine_mapping;
114 kmutex_t pte_lock;
115
116 void xen_failsafe_handler(void);
117
118 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
119 HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
120
121 /*
122 * kcpuset internally uses an array of uint32_t while xen uses an array of
123 * u_long. As we're little-endian we can cast one to the other.
124 */
125 typedef union {
126 #ifdef _LP64
127 uint32_t xcpum_km[2];
128 #else
129 uint32_t xcpum_km[1];
130 #endif
131 u_long xcpum_xm;
132 } xcpumask_t;
133
134 void
135 xen_failsafe_handler(void)
136 {
137
138 panic("xen_failsafe_handler called!\n");
139 }
140
141
142 void
143 xen_set_ldt(vaddr_t base, uint32_t entries)
144 {
145 vaddr_t va;
146 vaddr_t end;
147 pt_entry_t *ptp;
148 int s;
149
150 #ifdef __x86_64__
151 end = base + (entries << 3);
152 #else
153 end = base + entries * sizeof(union descriptor);
154 #endif
155
156 for (va = base; va < end; va += PAGE_SIZE) {
157 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
158 ptp = kvtopte(va);
159 XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
160 base, entries, ptp));
161 pmap_pte_clearbits(ptp, PG_RW);
162 }
163 s = splvm();
164 xpq_queue_set_ldt(base, entries);
165 splx(s);
166 }
167
168 #ifdef XENDEBUG
169 void xpq_debug_dump(void);
170 #endif
171
172 #define XPQUEUE_SIZE 2048
173 static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
174 static int xpq_idx_array[MAXCPUS];
175
176 #ifdef i386
177 extern union descriptor tmpgdt[];
178 #endif /* i386 */
179 void
180 xpq_flush_queue(void)
181 {
182 int i, ok = 0, ret;
183
184 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
185 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
186
187 XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
188 for (i = 0; i < xpq_idx; i++)
189 XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
190 xpq_queue[i].ptr, xpq_queue[i].val));
191
192 retry:
193 ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
194
195 if (xpq_idx != 0 && ret < 0) {
196 struct cpu_info *ci;
197 CPU_INFO_ITERATOR cii;
198
199 printf("xpq_flush_queue: %d entries (%d successful) on "
200 "cpu%d (%ld)\n",
201 xpq_idx, ok, curcpu()->ci_index, curcpu()->ci_cpuid);
202
203 if (ok != 0) {
204 xpq_queue += ok;
205 xpq_idx -= ok;
206 ok = 0;
207 goto retry;
208 }
209
210 for (CPU_INFO_FOREACH(cii, ci)) {
211 xpq_queue = xpq_queue_array[ci->ci_cpuid];
212 xpq_idx = xpq_idx_array[ci->ci_cpuid];
213 printf("cpu%d (%ld):\n", ci->ci_index, ci->ci_cpuid);
214 for (i = 0; i < xpq_idx; i++) {
215 printf(" 0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
216 xpq_queue[i].ptr, xpq_queue[i].val);
217 }
218 #ifdef __x86_64__
219 for (i = 0; i < PDIR_SLOT_PTE; i++) {
220 if (ci->ci_kpm_pdir[i] == 0)
221 continue;
222 printf(" kpm_pdir[%d]: 0x%" PRIx64 "\n",
223 i, ci->ci_kpm_pdir[i]);
224 }
225 #endif
226 }
227 panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
228 }
229 xpq_idx_array[curcpu()->ci_cpuid] = 0;
230 }
231
232 static inline void
233 xpq_increment_idx(void)
234 {
235
236 if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE))
237 xpq_flush_queue();
238 }
239
240 void
241 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
242 {
243
244 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
245 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
246
247 XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
248 "\n", (int64_t)ma, (int64_t)pa));
249
250 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
251 xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT;
252 xpq_increment_idx();
253 #ifdef XENDEBUG_SYNC
254 xpq_flush_queue();
255 #endif
256 }
257
258 void
259 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
260 {
261
262 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
263 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
264
265 KASSERT((ptr & 3) == 0);
266 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
267 xpq_queue[xpq_idx].val = val;
268 xpq_increment_idx();
269 #ifdef XENDEBUG_SYNC
270 xpq_flush_queue();
271 #endif
272 }
273
274 void
275 xpq_queue_pt_switch(paddr_t pa)
276 {
277 struct mmuext_op op;
278 xpq_flush_queue();
279
280 XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
281 (int64_t)pa, (int64_t)pa));
282 op.cmd = MMUEXT_NEW_BASEPTR;
283 op.arg1.mfn = pa >> PAGE_SHIFT;
284 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
285 panic("xpq_queue_pt_switch");
286 }
287
288 void
289 xpq_queue_pin_table(paddr_t pa, int lvl)
290 {
291 struct mmuext_op op;
292
293 xpq_flush_queue();
294
295 XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
296 lvl + 1, pa));
297
298 op.arg1.mfn = pa >> PAGE_SHIFT;
299 op.cmd = lvl;
300
301 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
302 panic("xpq_queue_pin_table");
303 }
304
305 void
306 xpq_queue_unpin_table(paddr_t pa)
307 {
308 struct mmuext_op op;
309
310 xpq_flush_queue();
311
312 XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
313 op.arg1.mfn = pa >> PAGE_SHIFT;
314 op.cmd = MMUEXT_UNPIN_TABLE;
315 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
316 panic("xpq_queue_unpin_table");
317 }
318
319 void
320 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
321 {
322 struct mmuext_op op;
323
324 xpq_flush_queue();
325
326 XENPRINTK2(("xpq_queue_set_ldt\n"));
327 KASSERT(va == (va & ~PAGE_MASK));
328 op.cmd = MMUEXT_SET_LDT;
329 op.arg1.linear_addr = va;
330 op.arg2.nr_ents = entries;
331 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
332 panic("xpq_queue_set_ldt");
333 }
334
335 void
336 xpq_queue_tlb_flush(void)
337 {
338 struct mmuext_op op;
339
340 xpq_flush_queue();
341
342 XENPRINTK2(("xpq_queue_tlb_flush\n"));
343 op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
344 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
345 panic("xpq_queue_tlb_flush");
346 }
347
348 void
349 xpq_flush_cache(void)
350 {
351 int s = splvm();
352
353 xpq_flush_queue();
354
355 XENPRINTK2(("xpq_queue_flush_cache\n"));
356 asm("wbinvd":::"memory");
357 splx(s); /* XXX: removeme */
358 }
359
360 void
361 xpq_queue_invlpg(vaddr_t va)
362 {
363 struct mmuext_op op;
364 xpq_flush_queue();
365
366 XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
367 op.cmd = MMUEXT_INVLPG_LOCAL;
368 op.arg1.linear_addr = (va & ~PAGE_MASK);
369 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
370 panic("xpq_queue_invlpg");
371 }
372
373 void
374 xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc)
375 {
376 xcpumask_t xcpumask;
377 mmuext_op_t op;
378
379 kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
380
381 /* Flush pending page updates */
382 xpq_flush_queue();
383
384 op.cmd = MMUEXT_INVLPG_MULTI;
385 op.arg1.linear_addr = va;
386 op.arg2.vcpumask = &xcpumask.xcpum_xm;
387
388 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
389 panic("xpq_queue_invlpg_all");
390 }
391
392 return;
393 }
394
395 void
396 xen_bcast_invlpg(vaddr_t va)
397 {
398 mmuext_op_t op;
399
400 /* Flush pending page updates */
401 xpq_flush_queue();
402
403 op.cmd = MMUEXT_INVLPG_ALL;
404 op.arg1.linear_addr = va;
405
406 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
407 panic("xpq_queue_invlpg_all");
408 }
409
410 return;
411 }
412
413 /* This is a synchronous call. */
414 void
415 xen_mcast_tlbflush(kcpuset_t *kc)
416 {
417 xcpumask_t xcpumask;
418 mmuext_op_t op;
419
420 kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
421
422 /* Flush pending page updates */
423 xpq_flush_queue();
424
425 op.cmd = MMUEXT_TLB_FLUSH_MULTI;
426 op.arg2.vcpumask = &xcpumask.xcpum_xm;
427
428 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
429 panic("xpq_queue_invlpg_all");
430 }
431
432 return;
433 }
434
435 /* This is a synchronous call. */
436 void
437 xen_bcast_tlbflush(void)
438 {
439 mmuext_op_t op;
440
441 /* Flush pending page updates */
442 xpq_flush_queue();
443
444 op.cmd = MMUEXT_TLB_FLUSH_ALL;
445
446 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
447 panic("xpq_queue_invlpg_all");
448 }
449
450 return;
451 }
452
453 /* This is a synchronous call. */
454 void
455 xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, kcpuset_t *kc)
456 {
457 KASSERT(eva > sva);
458
459 /* Flush pending page updates */
460 xpq_flush_queue();
461
462 /* Align to nearest page boundary */
463 sva &= ~PAGE_MASK;
464 eva &= ~PAGE_MASK;
465
466 for ( ; sva <= eva; sva += PAGE_SIZE) {
467 xen_mcast_invlpg(sva, kc);
468 }
469
470 return;
471 }
472
473 /* This is a synchronous call. */
474 void
475 xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
476 {
477 KASSERT(eva > sva);
478
479 /* Flush pending page updates */
480 xpq_flush_queue();
481
482 /* Align to nearest page boundary */
483 sva &= ~PAGE_MASK;
484 eva &= ~PAGE_MASK;
485
486 for ( ; sva <= eva; sva += PAGE_SIZE) {
487 xen_bcast_invlpg(sva);
488 }
489
490 return;
491 }
492
493 /* Copy a page */
494 void
495 xen_copy_page(paddr_t srcpa, paddr_t dstpa)
496 {
497 mmuext_op_t op;
498
499 op.cmd = MMUEXT_COPY_PAGE;
500 op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT;
501 op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT;
502
503 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
504 panic(__func__);
505 }
506 }
507
508 /* Zero a physical page */
509 void
510 xen_pagezero(paddr_t pa)
511 {
512 mmuext_op_t op;
513
514 op.cmd = MMUEXT_CLEAR_PAGE;
515 op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
516
517 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
518 panic(__func__);
519 }
520 }
521
522 int
523 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
524 {
525 mmu_update_t op;
526 int ok;
527
528 xpq_flush_queue();
529
530 op.ptr = ptr;
531 op.val = val;
532 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
533 return EFAULT;
534 return (0);
535 }
536
537 #ifdef XENDEBUG
538 void
539 xpq_debug_dump(void)
540 {
541 int i;
542
543 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
544 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
545
546 XENPRINTK2(("idx: %d\n", xpq_idx));
547 for (i = 0; i < xpq_idx; i++) {
548 snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
549 xpq_queue[i].ptr, xpq_queue[i].val);
550 if (++i < xpq_idx)
551 snprintf(XBUF + strlen(XBUF),
552 sizeof(XBUF) - strlen(XBUF),
553 "%" PRIx64 " %08" PRIx64,
554 xpq_queue[i].ptr, xpq_queue[i].val);
555 if (++i < xpq_idx)
556 snprintf(XBUF + strlen(XBUF),
557 sizeof(XBUF) - strlen(XBUF),
558 "%" PRIx64 " %08" PRIx64,
559 xpq_queue[i].ptr, xpq_queue[i].val);
560 if (++i < xpq_idx)
561 snprintf(XBUF + strlen(XBUF),
562 sizeof(XBUF) - strlen(XBUF),
563 "%" PRIx64 " %08" PRIx64,
564 xpq_queue[i].ptr, xpq_queue[i].val);
565 XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
566 }
567 }
568 #endif
569
570
571 extern volatile struct xencons_interface *xencons_interface; /* XXX */
572 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
573
574 static void xen_bt_set_readonly (vaddr_t);
575 static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
576
577 /* How many PDEs ? */
578 #if L2_SLOT_KERNBASE > 0
579 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
580 #else
581 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
582 #endif
583
584 /*
585 * Construct and switch to new pagetables
586 * first_avail is the first vaddr we can use after
587 * we get rid of Xen pagetables
588 */
589
590 vaddr_t xen_pmap_bootstrap (void);
591
592 /*
593 * Function to get rid of Xen bootstrap tables
594 */
595
596 /* How many PDP do we need: */
597 #ifdef PAE
598 /*
599 * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
600 * all of them mapped by the L3 page. We also need a shadow page
601 * for L3[3].
602 */
603 static const int l2_4_count = 6;
604 #elif defined(__x86_64__)
605 static const int l2_4_count = PTP_LEVELS;
606 #else
607 static const int l2_4_count = PTP_LEVELS - 1;
608 #endif
609
610 vaddr_t
611 xen_pmap_bootstrap(void)
612 {
613 int count, oldcount;
614 long mapsize;
615 vaddr_t bootstrap_tables, init_tables;
616
617 xen_init_features();
618
619 memset(xpq_idx_array, 0, sizeof xpq_idx_array);
620
621 xpmap_phys_to_machine_mapping =
622 (unsigned long *)xen_start_info.mfn_list;
623 init_tables = xen_start_info.pt_base;
624 __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
625
626 /* Space after Xen boostrap tables should be free */
627 bootstrap_tables = xen_start_info.pt_base +
628 (xen_start_info.nr_pt_frames * PAGE_SIZE);
629
630 /*
631 * Calculate how many space we need
632 * first everything mapped before the Xen bootstrap tables
633 */
634 mapsize = init_tables - KERNTEXTOFF;
635 /* after the tables we'll have:
636 * - UAREA
637 * - dummy user PGD (x86_64)
638 * - HYPERVISOR_shared_info
639 * - early_zerop
640 * - ISA I/O mem (if needed)
641 */
642 mapsize += UPAGES * NBPG;
643 #ifdef __x86_64__
644 mapsize += NBPG;
645 #endif
646 mapsize += NBPG;
647 mapsize += NBPG;
648
649 #ifdef DOM0OPS
650 if (xendomain_is_dom0()) {
651 /* space for ISA I/O mem */
652 mapsize += IOM_SIZE;
653 }
654 #endif
655 /* at this point mapsize doens't include the table size */
656
657 #ifdef __x86_64__
658 count = TABLE_L2_ENTRIES;
659 #else
660 count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
661 #endif /* __x86_64__ */
662
663 /* now compute how many L2 pages we need exactly */
664 XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
665 while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
666 ((long)count << L2_SHIFT) + KERNBASE) {
667 count++;
668 }
669 #ifndef __x86_64__
670 /*
671 * one more L2 page: we'll alocate several pages after kva_start
672 * in pmap_bootstrap() before pmap_growkernel(), which have not been
673 * counted here. It's not a big issue to allocate one more L2 as
674 * pmap_growkernel() will be called anyway.
675 */
676 count++;
677 nkptp[1] = count;
678 #endif
679
680 /*
681 * install bootstrap pages. We may need more L2 pages than will
682 * have the final table here, as it's installed after the final table
683 */
684 oldcount = count;
685
686 bootstrap_again:
687 XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
688 /*
689 * Xen space we'll reclaim may not be enough for our new page tables,
690 * move bootstrap tables if necessary
691 */
692 if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
693 bootstrap_tables = init_tables +
694 ((count + l2_4_count) * PAGE_SIZE);
695 /* make sure we have enough to map the bootstrap_tables */
696 if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
697 ((long)oldcount << L2_SHIFT) + KERNBASE) {
698 oldcount++;
699 goto bootstrap_again;
700 }
701
702 /* Create temporary tables */
703 xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
704 xen_start_info.nr_pt_frames, oldcount, 0);
705
706 /* Create final tables */
707 xen_bootstrap_tables(bootstrap_tables, init_tables,
708 oldcount + l2_4_count, count, 1);
709
710 /* zero out free space after tables */
711 memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
712 (UPAGES + 1) * NBPG);
713
714 /* Finally, flush TLB. */
715 xpq_queue_tlb_flush();
716
717 return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
718 }
719
720 /*
721 * Build a new table and switch to it
722 * old_count is # of old tables (including PGD, PDTPE and PDE)
723 * new_count is # of new tables (PTE only)
724 * we assume areas don't overlap
725 */
726 static void
727 xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
728 int old_count, int new_count, int final)
729 {
730 pd_entry_t *pdtpe, *pde, *pte;
731 pd_entry_t *bt_pgd;
732 paddr_t addr;
733 vaddr_t page, avail, text_end, map_end;
734 int i;
735 extern char __data_start;
736 extern char *early_zerop; /* from pmap.c */
737
738 __PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR ","
739 " %d, %d)\n",
740 old_pgd, new_pgd, old_count, new_count));
741 text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
742 /*
743 * size of R/W area after kernel text:
744 * xencons_interface (if present)
745 * xenstore_interface (if present)
746 * table pages (new_count + l2_4_count entries)
747 * extra mappings (only when final is true):
748 * UAREA
749 * dummy user PGD (x86_64 only)/gdt page (i386 only)
750 * HYPERVISOR_shared_info
751 * early_zerop
752 * ISA I/O mem (if needed)
753 */
754 map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
755 if (final) {
756 map_end += (UPAGES + 1) * NBPG;
757 HYPERVISOR_shared_info = (shared_info_t *)map_end;
758 map_end += NBPG;
759 early_zerop = (char *)map_end;
760 map_end += NBPG;
761 }
762 /*
763 * we always set atdevbase, as it's used by init386 to find the first
764 * available VA. map_end is updated only if we are dom0, so
765 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
766 * this case.
767 */
768 if (final)
769 atdevbase = map_end;
770 #ifdef DOM0OPS
771 if (final && xendomain_is_dom0()) {
772 /* ISA I/O mem */
773 map_end += IOM_SIZE;
774 }
775 #endif /* DOM0OPS */
776
777 __PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
778 text_end, map_end));
779 __PRINTK(("console %#lx ", xen_start_info.console_mfn));
780 __PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
781
782 /*
783 * Create bootstrap page tables
784 * What we need:
785 * - a PGD (level 4)
786 * - a PDTPE (level 3)
787 * - a PDE (level2)
788 * - some PTEs (level 1)
789 */
790
791 bt_pgd = (pd_entry_t *) new_pgd;
792 memset (bt_pgd, 0, PAGE_SIZE);
793 avail = new_pgd + PAGE_SIZE;
794 #if PTP_LEVELS > 3
795 /* per-cpu L4 PD */
796 pd_entry_t *bt_cpu_pgd = bt_pgd;
797 /* pmap_kernel() "shadow" L4 PD */
798 bt_pgd = (pd_entry_t *) avail;
799 memset(bt_pgd, 0, PAGE_SIZE);
800 avail += PAGE_SIZE;
801
802 /* Install level 3 */
803 pdtpe = (pd_entry_t *) avail;
804 memset (pdtpe, 0, PAGE_SIZE);
805 avail += PAGE_SIZE;
806
807 addr = ((u_long) pdtpe) - KERNBASE;
808 bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] =
809 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
810
811 __PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
812 " -> L4[%#x]\n",
813 pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF)));
814 #else
815 pdtpe = bt_pgd;
816 #endif /* PTP_LEVELS > 3 */
817
818 #if PTP_LEVELS > 2
819 /* Level 2 */
820 pde = (pd_entry_t *) avail;
821 memset(pde, 0, PAGE_SIZE);
822 avail += PAGE_SIZE;
823
824 addr = ((u_long) pde) - KERNBASE;
825 pdtpe[pl3_pi(KERNTEXTOFF)] =
826 xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
827 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
828 " -> L3[%#x]\n",
829 pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF)));
830 #elif defined(PAE)
831 /* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
832 pde = (pd_entry_t *) avail;
833 memset(pde, 0, PAGE_SIZE * 5);
834 avail += PAGE_SIZE * 5;
835 addr = ((u_long) pde) - KERNBASE;
836 /*
837 * enter L2 pages in the L3.
838 * The real L2 kernel PD will be the last one (so that
839 * pde[L2_SLOT_KERN] always point to the shadow).
840 */
841 for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
842 /*
843 * Xen doesn't want R/W mappings in L3 entries, it'll add it
844 * itself.
845 */
846 pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
847 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
848 " -> L3[%#x]\n",
849 (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i));
850 }
851 addr += PAGE_SIZE;
852 pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
853 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
854 " -> L3[%#x]\n",
855 (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3));
856
857 #else /* PAE */
858 pde = bt_pgd;
859 #endif /* PTP_LEVELS > 2 */
860
861 /* Level 1 */
862 page = KERNTEXTOFF;
863 for (i = 0; i < new_count; i ++) {
864 vaddr_t cur_page = page;
865
866 pte = (pd_entry_t *) avail;
867 avail += PAGE_SIZE;
868
869 memset(pte, 0, PAGE_SIZE);
870 while (pl2_pi(page) == pl2_pi (cur_page)) {
871 if (page >= map_end) {
872 /* not mapped at all */
873 pte[pl1_pi(page)] = 0;
874 page += PAGE_SIZE;
875 continue;
876 }
877 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
878 if (page == (vaddr_t)HYPERVISOR_shared_info) {
879 pte[pl1_pi(page)] = xen_start_info.shared_info;
880 __PRINTK(("HYPERVISOR_shared_info "
881 "va %#lx pte %#" PRIxPADDR "\n",
882 HYPERVISOR_shared_info, pte[pl1_pi(page)]));
883 }
884 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
885 == xen_start_info.console.domU.mfn) {
886 xencons_interface = (void *)page;
887 pte[pl1_pi(page)] = xen_start_info.console_mfn;
888 pte[pl1_pi(page)] <<= PAGE_SHIFT;
889 __PRINTK(("xencons_interface "
890 "va %#lx pte %#" PRIxPADDR "\n",
891 xencons_interface, pte[pl1_pi(page)]));
892 }
893 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
894 == xen_start_info.store_mfn) {
895 xenstore_interface = (void *)page;
896 pte[pl1_pi(page)] = xen_start_info.store_mfn;
897 pte[pl1_pi(page)] <<= PAGE_SHIFT;
898 __PRINTK(("xenstore_interface "
899 "va %#lx pte %#" PRIxPADDR "\n",
900 xenstore_interface, pte[pl1_pi(page)]));
901 }
902 #ifdef DOM0OPS
903 if (page >= (vaddr_t)atdevbase &&
904 page < (vaddr_t)atdevbase + IOM_SIZE) {
905 pte[pl1_pi(page)] =
906 IOM_BEGIN + (page - (vaddr_t)atdevbase);
907 }
908 #endif
909 pte[pl1_pi(page)] |= PG_k | PG_V;
910 if (page < text_end) {
911 /* map kernel text RO */
912 pte[pl1_pi(page)] |= 0;
913 } else if (page >= old_pgd
914 && page < old_pgd + (old_count * PAGE_SIZE)) {
915 /* map old page tables RO */
916 pte[pl1_pi(page)] |= 0;
917 } else if (page >= new_pgd &&
918 page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
919 /* map new page tables RO */
920 pte[pl1_pi(page)] |= 0;
921 #ifdef i386
922 } else if (page == (vaddr_t)tmpgdt) {
923 /*
924 * Map bootstrap gdt R/O. Later, we
925 * will re-add this to page to uvm
926 * after making it writable.
927 */
928
929 pte[pl1_pi(page)] = 0;
930 page += PAGE_SIZE;
931 continue;
932 #endif /* i386 */
933 } else {
934 /* map page RW */
935 pte[pl1_pi(page)] |= PG_RW;
936 }
937
938 if ((page >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
939 || page >= new_pgd) {
940 __PRINTK(("va %#lx pa %#lx "
941 "entry 0x%" PRIxPADDR " -> L1[%#x]\n",
942 page, page - KERNBASE,
943 pte[pl1_pi(page)], pl1_pi(page)));
944 }
945 page += PAGE_SIZE;
946 }
947
948 addr = ((u_long) pte) - KERNBASE;
949 pde[pl2_pi(cur_page)] =
950 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
951 __PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
952 " -> L2[%#x]\n",
953 pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
954 /* Mark readonly */
955 xen_bt_set_readonly((vaddr_t) pte);
956 }
957
958 /* Install recursive page tables mapping */
959 #ifdef PAE
960 /*
961 * we need a shadow page for the kernel's L2 page
962 * The real L2 kernel PD will be the last one (so that
963 * pde[L2_SLOT_KERN] always point to the shadow.
964 */
965 memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
966 cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG];
967 cpu_info_primary.ci_kpm_pdirpa =
968 (vaddr_t) cpu_info_primary.ci_kpm_pdir - KERNBASE;
969
970 /*
971 * We don't enter a recursive entry from the L3 PD. Instead,
972 * we enter the first 4 L2 pages, which includes the kernel's L2
973 * shadow. But we have to entrer the shadow after switching
974 * %cr3, or Xen will refcount some PTE with the wrong type.
975 */
976 addr = (u_long)pde - KERNBASE;
977 for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
978 pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
979 __PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR
980 " entry %#" PRIxPADDR "\n",
981 (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i,
982 addr, pde[PDIR_SLOT_PTE + i]));
983 }
984 #if 0
985 addr += PAGE_SIZE; /* point to shadow L2 */
986 pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
987 __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
988 (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
989 (int64_t)pde[PDIR_SLOT_PTE + 3]));
990 #endif
991 /* Mark tables RO, and pin the kernel's shadow as L2 */
992 addr = (u_long)pde - KERNBASE;
993 for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
994 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
995 if (i == 2 || i == 3)
996 continue;
997 #if 0
998 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
999 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
1000 #endif
1001 }
1002 if (final) {
1003 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
1004 __PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr));
1005 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
1006 }
1007 #if 0
1008 addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
1009 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
1010 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
1011 #endif
1012 #else /* PAE */
1013 /* recursive entry in higher-level per-cpu PD and pmap_kernel() */
1014 bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE) | PG_k | PG_V;
1015 #ifdef __x86_64__
1016 bt_cpu_pgd[PDIR_SLOT_PTE] =
1017 xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE) | PG_k | PG_V;
1018 #endif /* __x86_64__ */
1019 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR
1020 " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE,
1021 bt_pgd[PDIR_SLOT_PTE]));
1022 /* Mark tables RO */
1023 xen_bt_set_readonly((vaddr_t) pde);
1024 #endif
1025 #if PTP_LEVELS > 2 || defined(PAE)
1026 xen_bt_set_readonly((vaddr_t) pdtpe);
1027 #endif
1028 #if PTP_LEVELS > 3
1029 xen_bt_set_readonly(new_pgd);
1030 #endif
1031 /* Pin the PGD */
1032 __PRINTK(("pin PGD: %"PRIxVADDR"\n", new_pgd - KERNBASE));
1033 #ifdef __x86_64__
1034 xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
1035 #elif PAE
1036 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
1037 #else
1038 xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
1039 #endif
1040
1041 /* Save phys. addr of PDP, for libkvm. */
1042 #ifdef PAE
1043 PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
1044 #else
1045 PDPpaddr = (u_long)bt_pgd - KERNBASE;
1046 #endif
1047
1048 /* Switch to new tables */
1049 __PRINTK(("switch to PGD\n"));
1050 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
1051 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n",
1052 bt_pgd[PDIR_SLOT_PTE]));
1053
1054 #ifdef PAE
1055 if (final) {
1056 /* save the address of the L3 page */
1057 cpu_info_primary.ci_pae_l3_pdir = pdtpe;
1058 cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
1059
1060 /* now enter kernel's PTE mappings */
1061 addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
1062 xpq_queue_pte_update(
1063 xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
1064 xpmap_ptom_masked(addr) | PG_k | PG_V);
1065 xpq_flush_queue();
1066 }
1067 #elif defined(__x86_64__)
1068 if (final) {
1069 /* save the address of the real per-cpu L4 pgd page */
1070 cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd;
1071 cpu_info_primary.ci_kpm_pdirpa = ((paddr_t) bt_cpu_pgd - KERNBASE);
1072 }
1073 #endif
1074 __USE(pdtpe);
1075
1076 /* Now we can safely reclaim space taken by old tables */
1077
1078 __PRINTK(("unpin old PGD\n"));
1079 /* Unpin old PGD */
1080 xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
1081 /* Mark old tables RW */
1082 page = old_pgd;
1083 addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
1084 addr = xpmap_mtop(addr);
1085 pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
1086 pte += pl1_pi(page);
1087 __PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n",
1088 pde[pl2_pi(page)], addr, (long)pte));
1089 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
1090 addr = xpmap_ptom(((u_long) pte) - KERNBASE);
1091 XENPRINTK(("addr %#" PRIxPADDR " pte %#lx "
1092 "*pte %#" PRIxPADDR "\n",
1093 addr, (long)pte, *pte));
1094 xpq_queue_pte_update(addr, *pte | PG_RW);
1095 page += PAGE_SIZE;
1096 /*
1097 * Our ptes are contiguous
1098 * so it's safe to just "++" here
1099 */
1100 pte++;
1101 }
1102 xpq_flush_queue();
1103 }
1104
1105
1106 /*
1107 * Bootstrap helper functions
1108 */
1109
1110 /*
1111 * Mark a page readonly
1112 * XXX: assuming vaddr = paddr + KERNBASE
1113 */
1114
1115 static void
1116 xen_bt_set_readonly (vaddr_t page)
1117 {
1118 pt_entry_t entry;
1119
1120 entry = xpmap_ptom_masked(page - KERNBASE);
1121 entry |= PG_k | PG_V;
1122
1123 HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
1124 }
1125
1126 #ifdef __x86_64__
1127 void
1128 xen_set_user_pgd(paddr_t page)
1129 {
1130 struct mmuext_op op;
1131 int s = splvm();
1132
1133 xpq_flush_queue();
1134 op.cmd = MMUEXT_NEW_USER_BASEPTR;
1135 op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT;
1136 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
1137 panic("xen_set_user_pgd: failed to install new user page"
1138 " directory %#" PRIxPADDR, page);
1139 splx(s);
1140 }
1141 #endif /* __x86_64__ */
1142