x86_xpmap.c revision 1.69.2.1 1 /* $NetBSD: x86_xpmap.c,v 1.69.2.1 2017/04/21 16:53:39 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2017 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
34 *
35 * Permission to use, copy, modify, and distribute this software for any
36 * purpose with or without fee is hereby granted, provided that the above
37 * copyright notice and this permission notice appear in all copies.
38 *
39 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
40 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
41 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
42 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
44 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
45 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
46 */
47
48 /*
49 * Copyright (c) 2006, 2007 Manuel Bouyer.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
61 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
62 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
63 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
64 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
65 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
66 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
67 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
68 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
69 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
70 */
71
72 /*
73 * Copyright (c) 2004 Christian Limpach.
74 * All rights reserved.
75 *
76 * Redistribution and use in source and binary forms, with or without
77 * modification, are permitted provided that the following conditions
78 * are met:
79 * 1. Redistributions of source code must retain the above copyright
80 * notice, this list of conditions and the following disclaimer.
81 * 2. Redistributions in binary form must reproduce the above copyright
82 * notice, this list of conditions and the following disclaimer in the
83 * documentation and/or other materials provided with the distribution.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
86 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
87 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
88 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
89 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
90 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
91 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
92 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
93 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
94 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95 */
96
97 #include <sys/cdefs.h>
98 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.69.2.1 2017/04/21 16:53:39 bouyer Exp $");
99
100 #include "opt_xen.h"
101 #include "opt_ddb.h"
102 #include "ksyms.h"
103
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/mutex.h>
107 #include <sys/cpu.h>
108
109 #include <uvm/uvm.h>
110
111 #include <x86/pmap.h>
112 #include <machine/gdt.h>
113 #include <xen/xenfunc.h>
114
115 #include <dev/isa/isareg.h>
116 #include <machine/isa_machdep.h>
117
118 #undef XENDEBUG
119
120 #ifdef XENDEBUG
121 #define XENPRINTF(x) printf x
122 #else
123 #define XENPRINTF(x)
124 #endif
125
126 /* Xen requires the start_info struct to be page aligned */
127 union start_info_union start_info_union __aligned(PAGE_SIZE);
128
129 volatile shared_info_t *HYPERVISOR_shared_info __read_mostly;
130 unsigned long *xpmap_phys_to_machine_mapping __read_mostly;
131 kmutex_t pte_lock __cacheline_aligned;
132 vaddr_t xen_dummy_page;
133 pt_entry_t xpmap_pg_nx __read_mostly;
134
135 #define XPQUEUE_SIZE 2048
136 static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
137 static int xpq_idx_array[MAXCPUS];
138
139 void xen_failsafe_handler(void);
140
141 extern volatile struct xencons_interface *xencons_interface; /* XXX */
142 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
143
144 static void xen_bt_set_readonly(vaddr_t);
145 static void xen_bootstrap_tables(vaddr_t, vaddr_t, size_t, size_t, bool);
146
147 vaddr_t xen_locore(void);
148
149 /*
150 * kcpuset internally uses an array of uint32_t while xen uses an array of
151 * u_long. As we're little-endian we can cast one to the other.
152 */
153 typedef union {
154 #ifdef _LP64
155 uint32_t xcpum_km[2];
156 #else
157 uint32_t xcpum_km[1];
158 #endif
159 u_long xcpum_xm;
160 } xcpumask_t;
161
162 void
163 xen_failsafe_handler(void)
164 {
165
166 panic("xen_failsafe_handler called!\n");
167 }
168
169 void
170 xen_set_ldt(vaddr_t base, uint32_t entries)
171 {
172 vaddr_t va;
173 vaddr_t end;
174 pt_entry_t *ptp;
175 int s;
176
177 #ifdef __x86_64__
178 end = base + (entries << 3);
179 #else
180 end = base + entries * sizeof(union descriptor);
181 #endif
182
183 for (va = base; va < end; va += PAGE_SIZE) {
184 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
185 ptp = kvtopte(va);
186 XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
187 base, entries, ptp));
188 pmap_pte_clearbits(ptp, PG_RW);
189 }
190 s = splvm();
191 xpq_queue_set_ldt(base, entries);
192 splx(s);
193 }
194
195 void
196 xpq_flush_queue(void)
197 {
198 mmu_update_t *xpq_queue;
199 int done = 0, ret, xpq_idx;
200
201 xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
202 xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
203
204 retry:
205 ret = HYPERVISOR_mmu_update(xpq_queue, xpq_idx, &done, DOMID_SELF);
206
207 if (ret < 0 && xpq_idx != 0) {
208 printf("xpq_flush_queue: %d entries (%d successful) on "
209 "cpu%d (%ld)\n",
210 xpq_idx, done, curcpu()->ci_index, curcpu()->ci_cpuid);
211
212 if (done != 0) {
213 xpq_queue += done;
214 xpq_idx -= done;
215 done = 0;
216 goto retry;
217 }
218
219 panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
220 }
221 xpq_idx_array[curcpu()->ci_cpuid] = 0;
222 }
223
224 static inline void
225 xpq_increment_idx(void)
226 {
227
228 if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE))
229 xpq_flush_queue();
230 }
231
232 void
233 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
234 {
235 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
236 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
237
238 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
239 xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT;
240 xpq_increment_idx();
241 }
242
243 void
244 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
245 {
246 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
247 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
248
249 xpq_queue[xpq_idx].ptr = ptr | MMU_NORMAL_PT_UPDATE;
250 xpq_queue[xpq_idx].val = val;
251 xpq_increment_idx();
252 }
253
254 void
255 xpq_queue_pt_switch(paddr_t pa)
256 {
257 struct mmuext_op op;
258
259 xpq_flush_queue();
260
261 op.cmd = MMUEXT_NEW_BASEPTR;
262 op.arg1.mfn = pa >> PAGE_SHIFT;
263 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
264 panic(__func__);
265 }
266
267 void
268 xpq_queue_pin_table(paddr_t pa, int lvl)
269 {
270 struct mmuext_op op;
271
272 xpq_flush_queue();
273
274 op.cmd = lvl;
275 op.arg1.mfn = pa >> PAGE_SHIFT;
276 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
277 panic(__func__);
278 }
279
280 void
281 xpq_queue_unpin_table(paddr_t pa)
282 {
283 struct mmuext_op op;
284
285 xpq_flush_queue();
286
287 op.cmd = MMUEXT_UNPIN_TABLE;
288 op.arg1.mfn = pa >> PAGE_SHIFT;
289 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
290 panic(__func__);
291 }
292
293 void
294 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
295 {
296 struct mmuext_op op;
297
298 xpq_flush_queue();
299
300 KASSERT(va == (va & ~PAGE_MASK));
301 op.cmd = MMUEXT_SET_LDT;
302 op.arg1.linear_addr = va;
303 op.arg2.nr_ents = entries;
304 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
305 panic(__func__);
306 }
307
308 void
309 xpq_queue_tlb_flush(void)
310 {
311 struct mmuext_op op;
312
313 xpq_flush_queue();
314
315 op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
316 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
317 panic(__func__);
318 }
319
320 void
321 xpq_flush_cache(void)
322 {
323 int s = splvm();
324
325 xpq_flush_queue();
326
327 asm("wbinvd":::"memory");
328 splx(s); /* XXX: removeme */
329 }
330
331 void
332 xpq_queue_invlpg(vaddr_t va)
333 {
334 struct mmuext_op op;
335
336 xpq_flush_queue();
337
338 op.cmd = MMUEXT_INVLPG_LOCAL;
339 op.arg1.linear_addr = (va & ~PAGE_MASK);
340 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
341 panic(__func__);
342 }
343
344 void
345 xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc)
346 {
347 xcpumask_t xcpumask;
348 mmuext_op_t op;
349
350 kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
351
352 xpq_flush_queue();
353
354 op.cmd = MMUEXT_INVLPG_MULTI;
355 op.arg1.linear_addr = va;
356 op.arg2.vcpumask = &xcpumask.xcpum_xm;
357
358 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
359 panic(__func__);
360 }
361
362 void
363 xen_bcast_invlpg(vaddr_t va)
364 {
365 mmuext_op_t op;
366
367 xpq_flush_queue();
368
369 op.cmd = MMUEXT_INVLPG_ALL;
370 op.arg1.linear_addr = va;
371
372 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
373 panic(__func__);
374 }
375
376 /* This is a synchronous call. */
377 void
378 xen_mcast_tlbflush(kcpuset_t *kc)
379 {
380 xcpumask_t xcpumask;
381 mmuext_op_t op;
382
383 kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
384
385 xpq_flush_queue();
386
387 op.cmd = MMUEXT_TLB_FLUSH_MULTI;
388 op.arg2.vcpumask = &xcpumask.xcpum_xm;
389
390 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
391 panic(__func__);
392 }
393
394 /* This is a synchronous call. */
395 void
396 xen_bcast_tlbflush(void)
397 {
398 mmuext_op_t op;
399
400 xpq_flush_queue();
401
402 op.cmd = MMUEXT_TLB_FLUSH_ALL;
403
404 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
405 panic(__func__);
406 }
407
408 void
409 xen_copy_page(paddr_t srcpa, paddr_t dstpa)
410 {
411 mmuext_op_t op;
412
413 op.cmd = MMUEXT_COPY_PAGE;
414 op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT;
415 op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT;
416
417 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
418 panic(__func__);
419 }
420
421 void
422 xen_pagezero(paddr_t pa)
423 {
424 mmuext_op_t op;
425
426 op.cmd = MMUEXT_CLEAR_PAGE;
427 op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
428
429 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
430 panic(__func__);
431 }
432
433 int
434 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
435 {
436 mmu_update_t op;
437 int ok;
438
439 xpq_flush_queue();
440
441 op.ptr = ptr;
442 op.val = val;
443 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
444 return EFAULT;
445 return 0;
446 }
447
448 #if L2_SLOT_KERNBASE > 0
449 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
450 #else
451 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
452 #endif
453
454 #ifdef PAE
455 /*
456 * For PAE, we consider a single contiguous L2 "superpage" of 4 pages, all of
457 * them mapped by the L3 page. We also need a shadow page for L3[3].
458 */
459 static const int l2_4_count = 6;
460 #elif defined(__x86_64__)
461 static const int l2_4_count = PTP_LEVELS;
462 #else
463 static const int l2_4_count = PTP_LEVELS - 1;
464 #endif
465
466 /*
467 * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page
468 * tables.
469 *
470 * Virtual address space of the kernel when leaving this function:
471 * +--------------+------------------+-------------+------------+---------------
472 * | KERNEL IMAGE | BOOTSTRAP TABLES | PROC0 UAREA | DUMMY PAGE | HYPER. SHARED
473 * +--------------+------------------+-------------+------------+---------------
474 *
475 * ------+-----------------+-------------+
476 * INFO | EARLY ZERO PAGE | ISA I/O MEM |
477 * ------+-----------------+-------------+
478 *
479 * DUMMY PAGE is either a PDG for amd64 or a GDT for i386.
480 *
481 * (HYPER. SHARED INFO + EARLY ZERO PAGE + ISA I/O MEM) have no physical
482 * addresses preallocated.
483 */
484 vaddr_t
485 xen_locore(void)
486 {
487 size_t count, oldcount, mapsize;
488 vaddr_t bootstrap_tables, init_tables;
489 u_int descs[4];
490
491 xen_init_features();
492
493 memset(xpq_idx_array, 0, sizeof(xpq_idx_array));
494
495 xpmap_phys_to_machine_mapping =
496 (unsigned long *)xen_start_info.mfn_list;
497
498 /* Set the NX/XD bit, if available. descs[3] = %edx. */
499 x86_cpuid(0x80000001, descs);
500 xpmap_pg_nx = (descs[3] & CPUID_NOX) ? PG_NX : 0;
501
502 /* Space after Xen boostrap tables should be free */
503 init_tables = xen_start_info.pt_base;
504 bootstrap_tables = init_tables +
505 (xen_start_info.nr_pt_frames * PAGE_SIZE);
506
507 /*
508 * Calculate how much space we need. First, everything mapped before
509 * the Xen bootstrap tables.
510 */
511 mapsize = init_tables - KERNTEXTOFF;
512 /* after the tables we'll have:
513 * - UAREA
514 * - dummy user PGD (x86_64)
515 * - HYPERVISOR_shared_info
516 * - early_zerop
517 * - ISA I/O mem (if needed)
518 */
519 mapsize += UPAGES * PAGE_SIZE;
520 #ifdef __x86_64__
521 mapsize += PAGE_SIZE;
522 #endif
523 mapsize += PAGE_SIZE;
524 mapsize += PAGE_SIZE;
525 #ifdef DOM0OPS
526 if (xendomain_is_dom0()) {
527 mapsize += IOM_SIZE;
528 }
529 #endif
530
531 /*
532 * At this point, mapsize doesn't include the table size.
533 */
534 #ifdef __x86_64__
535 count = TABLE_L2_ENTRIES;
536 #else
537 count = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT;
538 #endif
539
540 /*
541 * Now compute how many L2 pages we need exactly. This is useful only
542 * on i386, since the initial count for amd64 is already enough.
543 */
544 while (KERNTEXTOFF + mapsize + (count + l2_4_count) * PAGE_SIZE >
545 KERNBASE + (count << L2_SHIFT)) {
546 count++;
547 }
548
549 #ifdef i386
550 /*
551 * One more L2 page: we'll allocate several pages after kva_start
552 * in pmap_bootstrap() before pmap_growkernel(), which have not been
553 * counted here. It's not a big issue to allocate one more L2 as
554 * pmap_growkernel() will be called anyway.
555 */
556 count++;
557 nkptp[1] = count;
558 #endif
559
560 /*
561 * Install bootstrap pages. We may need more L2 pages than will
562 * have the final table here, as it's installed after the final table.
563 */
564 oldcount = count;
565
566 bootstrap_again:
567
568 /*
569 * Xen space we'll reclaim may not be enough for our new page tables,
570 * move bootstrap tables if necessary.
571 */
572 if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
573 bootstrap_tables = init_tables +
574 ((count + l2_4_count) * PAGE_SIZE);
575
576 /*
577 * Make sure the number of L2 pages we have is enough to map everything
578 * from KERNBASE to the bootstrap tables themselves.
579 */
580 if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
581 KERNBASE + (oldcount << L2_SHIFT)) {
582 oldcount++;
583 goto bootstrap_again;
584 }
585
586 /* Create temporary tables */
587 xen_bootstrap_tables(init_tables, bootstrap_tables,
588 xen_start_info.nr_pt_frames, oldcount, false);
589
590 /* Create final tables */
591 xen_bootstrap_tables(bootstrap_tables, init_tables,
592 oldcount + l2_4_count, count, true);
593
594 /* Zero out PROC0 UAREA and DUMMY PAGE. */
595 memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
596 (UPAGES + 1) * PAGE_SIZE);
597
598 /* Finally, flush TLB. */
599 xpq_queue_tlb_flush();
600
601 return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
602 }
603
604 /*
605 * Build a new table and switch to it.
606 * old_count is # of old tables (including PGD, PDTPE and PDE).
607 * new_count is # of new tables (PTE only).
608 * We assume the areas don't overlap.
609 */
610 static void
611 xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
612 size_t new_count, bool final)
613 {
614 pd_entry_t *pdtpe, *pde, *pte;
615 pd_entry_t *bt_pgd;
616 paddr_t addr;
617 vaddr_t page, avail, map_end;
618 int i;
619 extern char __rodata_start;
620 extern char __data_start;
621 extern char __kernel_end;
622 extern char *early_zerop; /* from pmap.c */
623 #ifdef i386
624 extern union descriptor tmpgdt[];
625 #endif
626
627 /*
628 * Layout of RW area after the kernel image:
629 * xencons_interface (if present)
630 * xenstore_interface (if present)
631 * table pages (new_count + l2_4_count entries)
632 * Extra mappings (only when final is true):
633 * UAREA
634 * dummy user PGD (x86_64 only) / GDT page (i386 only)
635 * HYPERVISOR_shared_info
636 * early_zerop
637 * ISA I/O mem (if needed)
638 */
639 map_end = new_pgd + ((new_count + l2_4_count) * PAGE_SIZE);
640 if (final) {
641 map_end += UPAGES * PAGE_SIZE;
642 xen_dummy_page = (vaddr_t)map_end;
643 map_end += PAGE_SIZE;
644 HYPERVISOR_shared_info = (shared_info_t *)map_end;
645 map_end += PAGE_SIZE;
646 early_zerop = (char *)map_end;
647 map_end += PAGE_SIZE;
648 }
649
650 /*
651 * We always set atdevbase, as it's used by init386 to find the first
652 * available VA. map_end is updated only if we are dom0, so
653 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
654 * this case.
655 */
656 if (final) {
657 atdevbase = map_end;
658 #ifdef DOM0OPS
659 if (xendomain_is_dom0()) {
660 /* ISA I/O mem */
661 map_end += IOM_SIZE;
662 }
663 #endif
664 }
665
666 __PRINTK(("xen_bootstrap_tables map_end 0x%lx\n", map_end));
667 __PRINTK(("console %#lx ", xen_start_info.console_mfn));
668 __PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
669
670 /*
671 * Create bootstrap page tables. What we need:
672 * - a PGD (level 4)
673 * - a PDTPE (level 3)
674 * - a PDE (level 2)
675 * - some PTEs (level 1)
676 */
677
678 bt_pgd = (pd_entry_t *)new_pgd;
679 memset(bt_pgd, 0, PAGE_SIZE);
680 avail = new_pgd + PAGE_SIZE;
681
682 #if PTP_LEVELS > 3
683 /* Per-cpu L4 */
684 pd_entry_t *bt_cpu_pgd = bt_pgd;
685 /* pmap_kernel() "shadow" L4 */
686 bt_pgd = (pd_entry_t *)avail;
687 memset(bt_pgd, 0, PAGE_SIZE);
688 avail += PAGE_SIZE;
689
690 /* Install L3 */
691 pdtpe = (pd_entry_t *)avail;
692 memset(pdtpe, 0, PAGE_SIZE);
693 avail += PAGE_SIZE;
694
695 addr = ((u_long)pdtpe) - KERNBASE;
696 bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] =
697 xpmap_ptom_masked(addr) | PG_V | PG_RW;
698 #else
699 pdtpe = bt_pgd;
700 #endif
701
702 #if PTP_LEVELS > 2
703 /* Level 2 */
704 pde = (pd_entry_t *)avail;
705 memset(pde, 0, PAGE_SIZE);
706 avail += PAGE_SIZE;
707
708 addr = ((u_long)pde) - KERNBASE;
709 pdtpe[pl3_pi(KERNTEXTOFF)] =
710 xpmap_ptom_masked(addr) | PG_V | PG_RW;
711 #elif defined(PAE)
712 /*
713 * Our PAE-style level 2, 5 contiguous pages (4 L2 + 1 shadow).
714 * +-----------------+----------------+---------+
715 * Physical layout: | 3 * USERLAND L2 | L2 KERN SHADOW | L2 KERN |
716 * +-----------------+----------------+---------+
717 * However, we enter pdtpte[3] into L2 KERN, and not L2 KERN SHADOW.
718 * This way, pde[L2_SLOT_KERN] always points to the shadow.
719 */
720 pde = (pd_entry_t *)avail;
721 memset(pde, 0, PAGE_SIZE * 5);
722 avail += PAGE_SIZE * 5;
723
724 /*
725 * Link L2 pages in L3, with a special case for L2 KERN. Xen doesn't
726 * want RW permissions in L3 entries, it'll add them itself.
727 */
728 addr = ((u_long)pde) - KERNBASE;
729 for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
730 pdtpe[i] = xpmap_ptom_masked(addr) | PG_V;
731 }
732 addr += PAGE_SIZE;
733 pdtpe[3] = xpmap_ptom_masked(addr) | PG_V;
734 #else
735 pde = bt_pgd;
736 #endif
737
738 /* Level 1 */
739 page = KERNTEXTOFF;
740 for (i = 0; i < new_count; i ++) {
741 vaddr_t cur_page = page;
742
743 pte = (pd_entry_t *)avail;
744 avail += PAGE_SIZE;
745
746 memset(pte, 0, PAGE_SIZE);
747 while (pl2_pi(page) == pl2_pi(cur_page)) {
748 if (page >= map_end) {
749 /* not mapped at all */
750 pte[pl1_pi(page)] = 0;
751 page += PAGE_SIZE;
752 continue;
753 }
754 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
755 if (page == (vaddr_t)HYPERVISOR_shared_info) {
756 pte[pl1_pi(page)] = xen_start_info.shared_info;
757 }
758 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
759 == xen_start_info.console.domU.mfn) {
760 xencons_interface = (void *)page;
761 pte[pl1_pi(page)] = xen_start_info.console_mfn;
762 pte[pl1_pi(page)] <<= PAGE_SHIFT;
763 }
764 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
765 == xen_start_info.store_mfn) {
766 xenstore_interface = (void *)page;
767 pte[pl1_pi(page)] = xen_start_info.store_mfn;
768 pte[pl1_pi(page)] <<= PAGE_SHIFT;
769 }
770 #ifdef DOM0OPS
771 if (page >= (vaddr_t)atdevbase &&
772 page < (vaddr_t)atdevbase + IOM_SIZE) {
773 pte[pl1_pi(page)] =
774 IOM_BEGIN + (page - (vaddr_t)atdevbase);
775 pte[pl1_pi(page)] |= xpmap_pg_nx;
776 }
777 #endif
778
779 pte[pl1_pi(page)] |= PG_V;
780 if (page < (vaddr_t)&__rodata_start) {
781 /* Map the kernel text RX. */
782 pte[pl1_pi(page)] |= PG_RO;
783 } else if (page >= (vaddr_t)&__rodata_start &&
784 page < (vaddr_t)&__data_start) {
785 /* Map the kernel rodata R. */
786 pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
787 } else if (page >= old_pgd &&
788 page < old_pgd + (old_count * PAGE_SIZE)) {
789 /* Map the old page tables R. */
790 pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
791 } else if (page >= new_pgd &&
792 page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
793 /* Map the new page tables R. */
794 pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
795 #ifdef i386
796 } else if (page == (vaddr_t)tmpgdt) {
797 /*
798 * Map bootstrap gdt R/O. Later, we will re-add
799 * this page to uvm after making it writable.
800 */
801 pte[pl1_pi(page)] = 0;
802 page += PAGE_SIZE;
803 continue;
804 #endif
805 } else if (page >= (vaddr_t)&__data_start &&
806 page < (vaddr_t)&__kernel_end) {
807 /* Map the kernel data+bss RW. */
808 pte[pl1_pi(page)] |= PG_RW | xpmap_pg_nx;
809 } else {
810 /* Map the page RW. */
811 pte[pl1_pi(page)] |= PG_RW | xpmap_pg_nx;
812 }
813
814 page += PAGE_SIZE;
815 }
816
817 addr = ((u_long)pte) - KERNBASE;
818 pde[pl2_pi(cur_page)] =
819 xpmap_ptom_masked(addr) | PG_RW | PG_V;
820
821 /* Mark readonly */
822 xen_bt_set_readonly((vaddr_t)pte);
823 }
824
825 /* Install recursive page tables mapping */
826 #ifdef PAE
827 /* Copy L2 KERN into L2 KERN SHADOW, and reference the latter in cpu0. */
828 memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
829 cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG];
830 cpu_info_primary.ci_kpm_pdirpa =
831 (vaddr_t)cpu_info_primary.ci_kpm_pdir - KERNBASE;
832
833 /*
834 * We don't enter a recursive entry from the L3 PD. Instead, we enter
835 * the first 4 L2 pages, which includes the kernel's L2 shadow. But we
836 * have to enter the shadow after switching %cr3, or Xen will refcount
837 * some PTEs with the wrong type.
838 */
839 addr = (u_long)pde - KERNBASE;
840 for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
841 pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_V |
842 xpmap_pg_nx;
843 }
844
845 /* Mark tables RO, and pin L2 KERN SHADOW. */
846 addr = (u_long)pde - KERNBASE;
847 for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
848 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
849 }
850 if (final) {
851 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
852 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
853 }
854 #else /* PAE */
855
856 /* Recursive entry in pmap_kernel(). */
857 bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE)
858 | PG_RO | PG_V | xpmap_pg_nx;
859 #ifdef __x86_64__
860 /* Recursive entry in higher-level per-cpu PD. */
861 bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE)
862 | PG_RO | PG_V | xpmap_pg_nx;
863 #endif
864
865 /* Mark tables RO */
866 xen_bt_set_readonly((vaddr_t)pde);
867 #endif /* PAE */
868
869 #if PTP_LEVELS > 2 || defined(PAE)
870 xen_bt_set_readonly((vaddr_t)pdtpe);
871 #endif
872 #if PTP_LEVELS > 3
873 xen_bt_set_readonly(new_pgd);
874 #endif
875
876 /* Pin the PGD */
877 #ifdef __x86_64__
878 xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
879 #elif PAE
880 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
881 #else
882 xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
883 #endif
884
885 /* Save phys. addr of PDP, for libkvm. */
886 #ifdef PAE
887 PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
888 #else
889 PDPpaddr = (u_long)bt_pgd - KERNBASE;
890 #endif
891
892 /* Switch to new tables */
893 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
894
895 #ifdef PAE
896 if (final) {
897 /* Save the address of the L3 page */
898 cpu_info_primary.ci_pae_l3_pdir = pdtpe;
899 cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
900
901 /* Now enter the kernel's PTE mappings */
902 addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
903 xpq_queue_pte_update(
904 xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
905 xpmap_ptom_masked(addr) | PG_V);
906 xpq_flush_queue();
907 }
908 #elif defined(__x86_64__)
909 if (final) {
910 /* Save the address of the real per-cpu L4 page. */
911 cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd;
912 cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)bt_cpu_pgd - KERNBASE);
913 }
914 #endif
915 __USE(pdtpe);
916
917 /*
918 * Now we can safely reclaim the space taken by the old tables.
919 */
920
921 /* Unpin old PGD */
922 xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
923
924 /* Mark old tables RW */
925 page = old_pgd;
926 addr = xpmap_mtop((paddr_t)pde[pl2_pi(page)] & PG_FRAME);
927 pte = (pd_entry_t *)((u_long)addr + KERNBASE);
928 pte += pl1_pi(page);
929 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
930 addr = xpmap_ptom(((u_long)pte) - KERNBASE);
931 xpq_queue_pte_update(addr, *pte | PG_RW);
932 page += PAGE_SIZE;
933 /*
934 * Our PTEs are contiguous so it's safe to just "++" here.
935 */
936 pte++;
937 }
938 xpq_flush_queue();
939 }
940
941 /*
942 * Mark a page read-only, assuming vaddr = paddr + KERNBASE.
943 */
944 static void
945 xen_bt_set_readonly(vaddr_t page)
946 {
947 pt_entry_t entry;
948
949 entry = xpmap_ptom_masked(page - KERNBASE);
950 entry |= PG_V | xpmap_pg_nx;
951
952 HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG);
953 }
954
955 #ifdef __x86_64__
956 void
957 xen_set_user_pgd(paddr_t page)
958 {
959 struct mmuext_op op;
960 int s = splvm();
961
962 xpq_flush_queue();
963 op.cmd = MMUEXT_NEW_USER_BASEPTR;
964 op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT;
965 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
966 panic("xen_set_user_pgd: failed to install new user page"
967 " directory %#" PRIxPADDR, page);
968 splx(s);
969 }
970 #endif /* __x86_64__ */
971