Home | History | Annotate | Line # | Download | only in x86
x86_xpmap.c revision 1.74
      1 /*	$NetBSD: x86_xpmap.c,v 1.74 2017/09/16 09:28:38 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2017 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
     34  *
     35  * Permission to use, copy, modify, and distribute this software for any
     36  * purpose with or without fee is hereby granted, provided that the above
     37  * copyright notice and this permission notice appear in all copies.
     38  *
     39  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     40  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     41  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     42  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     43  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     44  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     45  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     46  */
     47 
     48 /*
     49  * Copyright (c) 2006, 2007 Manuel Bouyer.
     50  *
     51  * Redistribution and use in source and binary forms, with or without
     52  * modification, are permitted provided that the following conditions
     53  * are met:
     54  * 1. Redistributions of source code must retain the above copyright
     55  *    notice, this list of conditions and the following disclaimer.
     56  * 2. Redistributions in binary form must reproduce the above copyright
     57  *    notice, this list of conditions and the following disclaimer in the
     58  *    documentation and/or other materials provided with the distribution.
     59  *
     60  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     61  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     62  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     63  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     64  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     65  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     66  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     67  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     68  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     69  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     70  */
     71 
     72 /*
     73  * Copyright (c) 2004 Christian Limpach.
     74  * All rights reserved.
     75  *
     76  * Redistribution and use in source and binary forms, with or without
     77  * modification, are permitted provided that the following conditions
     78  * are met:
     79  * 1. Redistributions of source code must retain the above copyright
     80  *    notice, this list of conditions and the following disclaimer.
     81  * 2. Redistributions in binary form must reproduce the above copyright
     82  *    notice, this list of conditions and the following disclaimer in the
     83  *    documentation and/or other materials provided with the distribution.
     84  *
     85  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     86  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     87  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     88  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     89  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     90  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     91  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     92  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     93  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     94  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     95  */
     96 
     97 #include <sys/cdefs.h>
     98 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.74 2017/09/16 09:28:38 maxv Exp $");
     99 
    100 #include "opt_xen.h"
    101 #include "opt_ddb.h"
    102 #include "ksyms.h"
    103 
    104 #include <sys/param.h>
    105 #include <sys/systm.h>
    106 #include <sys/mutex.h>
    107 #include <sys/cpu.h>
    108 
    109 #include <uvm/uvm.h>
    110 
    111 #include <x86/pmap.h>
    112 #include <machine/gdt.h>
    113 #include <xen/xenfunc.h>
    114 
    115 #include <dev/isa/isareg.h>
    116 #include <machine/isa_machdep.h>
    117 
    118 #undef	XENDEBUG
    119 
    120 #ifdef XENDEBUG
    121 #define	XENPRINTF(x) printf x
    122 #else
    123 #define	XENPRINTF(x)
    124 #endif
    125 
    126 /* Xen requires the start_info struct to be page aligned */
    127 union start_info_union start_info_union __aligned(PAGE_SIZE);
    128 
    129 volatile shared_info_t *HYPERVISOR_shared_info __read_mostly;
    130 unsigned long *xpmap_phys_to_machine_mapping __read_mostly;
    131 kmutex_t pte_lock __cacheline_aligned;
    132 vaddr_t xen_dummy_page;
    133 pt_entry_t xpmap_pg_nx __read_mostly;
    134 
    135 #define XPQUEUE_SIZE 2048
    136 static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
    137 
    138 void xen_failsafe_handler(void);
    139 
    140 extern volatile struct xencons_interface *xencons_interface; /* XXX */
    141 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
    142 
    143 static void xen_bt_set_readonly(vaddr_t);
    144 static void xen_bootstrap_tables(vaddr_t, vaddr_t, size_t, size_t, bool);
    145 
    146 vaddr_t xen_locore(void);
    147 
    148 /*
    149  * kcpuset internally uses an array of uint32_t while xen uses an array of
    150  * u_long. As we're little-endian we can cast one to the other.
    151  */
    152 typedef union {
    153 #ifdef _LP64
    154 	uint32_t xcpum_km[2];
    155 #else
    156 	uint32_t xcpum_km[1];
    157 #endif
    158 	u_long xcpum_xm;
    159 } xcpumask_t;
    160 
    161 void
    162 xen_failsafe_handler(void)
    163 {
    164 
    165 	panic("xen_failsafe_handler called!\n");
    166 }
    167 
    168 void
    169 xen_set_ldt(vaddr_t base, uint32_t entries)
    170 {
    171 	vaddr_t va;
    172 	vaddr_t end;
    173 	pt_entry_t *ptp;
    174 	int s;
    175 
    176 #ifdef __x86_64__
    177 	end = base + (entries << 3);
    178 #else
    179 	end = base + entries * sizeof(union descriptor);
    180 #endif
    181 
    182 	for (va = base; va < end; va += PAGE_SIZE) {
    183 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    184 		ptp = kvtopte(va);
    185 		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
    186 		    base, entries, ptp));
    187 		pmap_pte_clearbits(ptp, PG_RW);
    188 	}
    189 	s = splvm();
    190 	xpq_queue_set_ldt(base, entries);
    191 	splx(s);
    192 }
    193 
    194 void
    195 xpq_flush_queue(void)
    196 {
    197 	mmu_update_t *xpq_queue;
    198 	int done = 0, ret;
    199 	size_t xpq_idx;
    200 
    201 	xpq_idx = curcpu()->ci_xpq_idx;
    202 	xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    203 
    204 retry:
    205 	ret = HYPERVISOR_mmu_update(xpq_queue, xpq_idx, &done, DOMID_SELF);
    206 
    207 	if (ret < 0 && xpq_idx != 0) {
    208 		printf("xpq_flush_queue: %zu entries (%d successful) on "
    209 		    "cpu%d (%ld)\n",
    210 		    xpq_idx, done, curcpu()->ci_index, curcpu()->ci_cpuid);
    211 
    212 		if (done != 0) {
    213 			xpq_queue += done;
    214 			xpq_idx -= done;
    215 			done = 0;
    216 			goto retry;
    217 		}
    218 
    219 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
    220 	}
    221 	curcpu()->ci_xpq_idx = 0;
    222 }
    223 
    224 static inline void
    225 xpq_increment_idx(void)
    226 {
    227 
    228 	if (__predict_false(++curcpu()->ci_xpq_idx == XPQUEUE_SIZE))
    229 		xpq_flush_queue();
    230 }
    231 
    232 void
    233 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
    234 {
    235 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    236 	size_t xpq_idx = curcpu()->ci_xpq_idx;
    237 
    238 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
    239 	xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT;
    240 	xpq_increment_idx();
    241 }
    242 
    243 void
    244 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
    245 {
    246 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    247 	size_t xpq_idx = curcpu()->ci_xpq_idx;
    248 
    249 	xpq_queue[xpq_idx].ptr = ptr | MMU_NORMAL_PT_UPDATE;
    250 	xpq_queue[xpq_idx].val = val;
    251 	xpq_increment_idx();
    252 }
    253 
    254 void
    255 xpq_queue_pt_switch(paddr_t pa)
    256 {
    257 	struct mmuext_op op;
    258 
    259 	xpq_flush_queue();
    260 
    261 	op.cmd = MMUEXT_NEW_BASEPTR;
    262 	op.arg1.mfn = pa >> PAGE_SHIFT;
    263 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    264 		panic(__func__);
    265 }
    266 
    267 void
    268 xpq_queue_pin_table(paddr_t pa, int lvl)
    269 {
    270 	struct mmuext_op op;
    271 
    272 	xpq_flush_queue();
    273 
    274 	op.cmd = lvl;
    275 	op.arg1.mfn = pa >> PAGE_SHIFT;
    276 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    277 		panic(__func__);
    278 }
    279 
    280 void
    281 xpq_queue_unpin_table(paddr_t pa)
    282 {
    283 	struct mmuext_op op;
    284 
    285 	xpq_flush_queue();
    286 
    287 	op.cmd = MMUEXT_UNPIN_TABLE;
    288 	op.arg1.mfn = pa >> PAGE_SHIFT;
    289 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    290 		panic(__func__);
    291 }
    292 
    293 void
    294 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    295 {
    296 	struct mmuext_op op;
    297 
    298 	xpq_flush_queue();
    299 
    300 	KASSERT(va == (va & ~PAGE_MASK));
    301 	op.cmd = MMUEXT_SET_LDT;
    302 	op.arg1.linear_addr = va;
    303 	op.arg2.nr_ents = entries;
    304 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    305 		panic(__func__);
    306 }
    307 
    308 void
    309 xpq_queue_tlb_flush(void)
    310 {
    311 	struct mmuext_op op;
    312 
    313 	xpq_flush_queue();
    314 
    315 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
    316 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    317 		panic(__func__);
    318 }
    319 
    320 void
    321 xpq_flush_cache(void)
    322 {
    323 	int s = splvm();
    324 
    325 	xpq_flush_queue();
    326 
    327 	asm("wbinvd":::"memory");
    328 	splx(s); /* XXX: removeme */
    329 }
    330 
    331 void
    332 xpq_queue_invlpg(vaddr_t va)
    333 {
    334 	struct mmuext_op op;
    335 
    336 	xpq_flush_queue();
    337 
    338 	op.cmd = MMUEXT_INVLPG_LOCAL;
    339 	op.arg1.linear_addr = (va & ~PAGE_MASK);
    340 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    341 		panic(__func__);
    342 }
    343 
    344 void
    345 xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc)
    346 {
    347 	xcpumask_t xcpumask;
    348 	mmuext_op_t op;
    349 
    350 	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
    351 
    352 	xpq_flush_queue();
    353 
    354 	op.cmd = MMUEXT_INVLPG_MULTI;
    355 	op.arg1.linear_addr = va;
    356 	op.arg2.vcpumask = &xcpumask.xcpum_xm;
    357 
    358 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    359 		panic(__func__);
    360 }
    361 
    362 void
    363 xen_bcast_invlpg(vaddr_t va)
    364 {
    365 	mmuext_op_t op;
    366 
    367 	xpq_flush_queue();
    368 
    369 	op.cmd = MMUEXT_INVLPG_ALL;
    370 	op.arg1.linear_addr = va;
    371 
    372 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    373 		panic(__func__);
    374 }
    375 
    376 /* This is a synchronous call. */
    377 void
    378 xen_mcast_tlbflush(kcpuset_t *kc)
    379 {
    380 	xcpumask_t xcpumask;
    381 	mmuext_op_t op;
    382 
    383 	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
    384 
    385 	xpq_flush_queue();
    386 
    387 	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
    388 	op.arg2.vcpumask = &xcpumask.xcpum_xm;
    389 
    390 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    391 		panic(__func__);
    392 }
    393 
    394 /* This is a synchronous call. */
    395 void
    396 xen_bcast_tlbflush(void)
    397 {
    398 	mmuext_op_t op;
    399 
    400 	xpq_flush_queue();
    401 
    402 	op.cmd = MMUEXT_TLB_FLUSH_ALL;
    403 
    404 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    405 		panic(__func__);
    406 }
    407 
    408 void
    409 xen_copy_page(paddr_t srcpa, paddr_t dstpa)
    410 {
    411 	mmuext_op_t op;
    412 
    413 	op.cmd = MMUEXT_COPY_PAGE;
    414 	op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT;
    415 	op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT;
    416 
    417 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    418 		panic(__func__);
    419 }
    420 
    421 void
    422 xen_pagezero(paddr_t pa)
    423 {
    424 	mmuext_op_t op;
    425 
    426 	op.cmd = MMUEXT_CLEAR_PAGE;
    427 	op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
    428 
    429 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    430 		panic(__func__);
    431 }
    432 
    433 int
    434 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
    435 {
    436 	mmu_update_t op;
    437 	int ok;
    438 
    439 	xpq_flush_queue();
    440 
    441 	op.ptr = ptr;
    442 	op.val = val;
    443 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
    444 		return EFAULT;
    445 	return 0;
    446 }
    447 
    448 #if L2_SLOT_KERNBASE > 0
    449 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
    450 #else
    451 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
    452 #endif
    453 
    454 #ifdef PAE
    455 /*
    456  * For PAE, we consider a single contiguous L2 "superpage" of 4 pages, all of
    457  * them mapped by the L3 page. We also need a shadow page for L3[3].
    458  */
    459 static const int l2_4_count = 6;
    460 #elif defined(__x86_64__)
    461 static const int l2_4_count = PTP_LEVELS;
    462 #else
    463 static const int l2_4_count = PTP_LEVELS - 1;
    464 #endif
    465 
    466 /*
    467  * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page
    468  * tables.
    469  *
    470  * Virtual address space of the kernel when leaving this function:
    471  * +--------------+------------------+-------------+------------+---------------
    472  * | KERNEL IMAGE | BOOTSTRAP TABLES | PROC0 UAREA | DUMMY PAGE | HYPER. SHARED
    473  * +--------------+------------------+-------------+------------+---------------
    474  *
    475  * ------+-----------------+-------------+
    476  *  INFO | EARLY ZERO PAGE | ISA I/O MEM |
    477  * ------+-----------------+-------------+
    478  *
    479  * DUMMY PAGE is either a PDG for amd64 or a GDT for i386.
    480  *
    481  * (HYPER. SHARED INFO + EARLY ZERO PAGE + ISA I/O MEM) have no physical
    482  * addresses preallocated.
    483  */
    484 vaddr_t
    485 xen_locore(void)
    486 {
    487 	size_t count, oldcount, mapsize;
    488 	vaddr_t bootstrap_tables, init_tables;
    489 	u_int descs[4];
    490 
    491 	xen_init_features();
    492 
    493 	xpmap_phys_to_machine_mapping =
    494 	    (unsigned long *)xen_start_info.mfn_list;
    495 
    496 	/* Set the NX/XD bit, if available. descs[3] = %edx. */
    497 	x86_cpuid(0x80000001, descs);
    498 	xpmap_pg_nx = (descs[3] & CPUID_NOX) ? PG_NX : 0;
    499 
    500 	/* Space after Xen boostrap tables should be free */
    501 	init_tables = xen_start_info.pt_base;
    502 	bootstrap_tables = init_tables +
    503 	    (xen_start_info.nr_pt_frames * PAGE_SIZE);
    504 
    505 	/*
    506 	 * Calculate how much space we need. First, everything mapped before
    507 	 * the Xen bootstrap tables.
    508 	 */
    509 	mapsize = init_tables - KERNTEXTOFF;
    510 	/* after the tables we'll have:
    511 	 *  - UAREA
    512 	 *  - dummy user PGD (x86_64)
    513 	 *  - HYPERVISOR_shared_info
    514 	 *  - early_zerop
    515 	 *  - ISA I/O mem (if needed)
    516 	 */
    517 	mapsize += UPAGES * PAGE_SIZE;
    518 #ifdef __x86_64__
    519 	mapsize += PAGE_SIZE;
    520 #endif
    521 	mapsize += PAGE_SIZE;
    522 	mapsize += PAGE_SIZE;
    523 #ifdef DOM0OPS
    524 	if (xendomain_is_dom0()) {
    525 		mapsize += IOM_SIZE;
    526 	}
    527 #endif
    528 
    529 	/*
    530 	 * At this point, mapsize doesn't include the table size.
    531 	 */
    532 #ifdef __x86_64__
    533 	count = TABLE_L2_ENTRIES;
    534 #else
    535 	count = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT;
    536 #endif
    537 
    538 	/*
    539 	 * Now compute how many L2 pages we need exactly. This is useful only
    540 	 * on i386, since the initial count for amd64 is already enough.
    541 	 */
    542 	while (KERNTEXTOFF + mapsize + (count + l2_4_count) * PAGE_SIZE >
    543 	    KERNBASE + (count << L2_SHIFT)) {
    544 		count++;
    545 	}
    546 
    547 #ifdef i386
    548 	/*
    549 	 * One more L2 page: we'll allocate several pages after kva_start
    550 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
    551 	 * counted here. It's not a big issue to allocate one more L2 as
    552 	 * pmap_growkernel() will be called anyway.
    553 	 */
    554 	count++;
    555 	nkptp[1] = count;
    556 #endif
    557 
    558 	/*
    559 	 * Install bootstrap pages. We may need more L2 pages than will
    560 	 * have the final table here, as it's installed after the final table.
    561 	 */
    562 	oldcount = count;
    563 
    564 bootstrap_again:
    565 
    566 	/*
    567 	 * Xen space we'll reclaim may not be enough for our new page tables,
    568 	 * move bootstrap tables if necessary.
    569 	 */
    570 	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
    571 		bootstrap_tables = init_tables +
    572 		    ((count + l2_4_count) * PAGE_SIZE);
    573 
    574 	/*
    575 	 * Make sure the number of L2 pages we have is enough to map everything
    576 	 * from KERNBASE to the bootstrap tables themselves.
    577 	 */
    578 	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
    579 	    KERNBASE + (oldcount << L2_SHIFT)) {
    580 		oldcount++;
    581 		goto bootstrap_again;
    582 	}
    583 
    584 	/* Create temporary tables */
    585 	xen_bootstrap_tables(init_tables, bootstrap_tables,
    586 	    xen_start_info.nr_pt_frames, oldcount, false);
    587 
    588 	/* Create final tables */
    589 	xen_bootstrap_tables(bootstrap_tables, init_tables,
    590 	    oldcount + l2_4_count, count, true);
    591 
    592 	/* Zero out PROC0 UAREA and DUMMY PAGE. */
    593 	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
    594 	    (UPAGES + 1) * PAGE_SIZE);
    595 
    596 	/* Finally, flush TLB. */
    597 	xpq_queue_tlb_flush();
    598 
    599 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
    600 }
    601 
    602 /*
    603  * Build a new table and switch to it.
    604  * old_count is # of old tables (including PGD, PDTPE and PDE).
    605  * new_count is # of new tables (PTE only).
    606  * We assume the areas don't overlap.
    607  */
    608 static void
    609 xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
    610     size_t new_count, bool final)
    611 {
    612 	pd_entry_t *pdtpe, *pde, *pte;
    613 	pd_entry_t *bt_pgd;
    614 	paddr_t addr;
    615 	vaddr_t page, avail, map_end;
    616 	int i;
    617 	extern char __rodata_start;
    618 	extern char __data_start;
    619 	extern char __kernel_end;
    620 	extern char *early_zerop; /* from pmap.c */
    621 #ifdef i386
    622 	extern union descriptor tmpgdt[];
    623 #endif
    624 
    625 	/*
    626 	 * Layout of RW area after the kernel image:
    627 	 *     xencons_interface (if present)
    628 	 *     xenstore_interface (if present)
    629 	 *     table pages (new_count + l2_4_count entries)
    630 	 * Extra mappings (only when final is true):
    631 	 *     UAREA
    632 	 *     dummy user PGD (x86_64 only) / GDT page (i386 only)
    633 	 *     HYPERVISOR_shared_info
    634 	 *     early_zerop
    635 	 *     ISA I/O mem (if needed)
    636 	 */
    637 	map_end = new_pgd + ((new_count + l2_4_count) * PAGE_SIZE);
    638 	if (final) {
    639 		map_end += UPAGES * PAGE_SIZE;
    640 		xen_dummy_page = (vaddr_t)map_end;
    641 		map_end += PAGE_SIZE;
    642 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
    643 		map_end += PAGE_SIZE;
    644 		early_zerop = (char *)map_end;
    645 		map_end += PAGE_SIZE;
    646 	}
    647 
    648 	/*
    649 	 * We always set atdevbase, as it's used by init386 to find the first
    650 	 * available VA. map_end is updated only if we are dom0, so
    651 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
    652 	 * this case.
    653 	 */
    654 	if (final) {
    655 		atdevbase = map_end;
    656 #ifdef DOM0OPS
    657 		if (xendomain_is_dom0()) {
    658 			/* ISA I/O mem */
    659 			map_end += IOM_SIZE;
    660 		}
    661 #endif
    662 	}
    663 
    664 	__PRINTK(("xen_bootstrap_tables map_end 0x%lx\n", map_end));
    665 	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
    666 	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
    667 
    668 	/*
    669 	 * Create bootstrap page tables. What we need:
    670 	 * - a PGD (level 4)
    671 	 * - a PDTPE (level 3)
    672 	 * - a PDE (level 2)
    673 	 * - some PTEs (level 1)
    674 	 */
    675 
    676 	bt_pgd = (pd_entry_t *)new_pgd;
    677 	memset(bt_pgd, 0, PAGE_SIZE);
    678 	avail = new_pgd + PAGE_SIZE;
    679 
    680 #if PTP_LEVELS > 3
    681 	/* Per-cpu L4 */
    682 	pd_entry_t *bt_cpu_pgd = bt_pgd;
    683 	/* pmap_kernel() "shadow" L4 */
    684 	bt_pgd = (pd_entry_t *)avail;
    685 	memset(bt_pgd, 0, PAGE_SIZE);
    686 	avail += PAGE_SIZE;
    687 
    688 	/* Install L3 */
    689 	pdtpe = (pd_entry_t *)avail;
    690 	memset(pdtpe, 0, PAGE_SIZE);
    691 	avail += PAGE_SIZE;
    692 
    693 	addr = ((u_long)pdtpe) - KERNBASE;
    694 	bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] =
    695 	    xpmap_ptom_masked(addr) | PG_V | PG_RW;
    696 #else
    697 	pdtpe = bt_pgd;
    698 #endif
    699 
    700 #if PTP_LEVELS > 2
    701 	/* Level 2 */
    702 	pde = (pd_entry_t *)avail;
    703 	memset(pde, 0, PAGE_SIZE);
    704 	avail += PAGE_SIZE;
    705 
    706 	addr = ((u_long)pde) - KERNBASE;
    707 	pdtpe[pl3_pi(KERNTEXTOFF)] =
    708 	    xpmap_ptom_masked(addr) | PG_V | PG_RW;
    709 #elif defined(PAE)
    710 	/*
    711 	 * Our PAE-style level 2, 5 contiguous pages (4 L2 + 1 shadow).
    712 	 *                  +-----------------+----------------+---------+
    713 	 * Physical layout: | 3 * USERLAND L2 | L2 KERN SHADOW | L2 KERN |
    714 	 *                  +-----------------+----------------+---------+
    715 	 * However, we enter pdtpte[3] into L2 KERN, and not L2 KERN SHADOW.
    716 	 * This way, pde[L2_SLOT_KERN] always points to the shadow.
    717 	 */
    718 	pde = (pd_entry_t *)avail;
    719 	memset(pde, 0, PAGE_SIZE * 5);
    720 	avail += PAGE_SIZE * 5;
    721 
    722 	/*
    723 	 * Link L2 pages in L3, with a special case for L2 KERN. Xen doesn't
    724 	 * want RW permissions in L3 entries, it'll add them itself.
    725 	 */
    726 	addr = ((u_long)pde) - KERNBASE;
    727 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    728 		pdtpe[i] = xpmap_ptom_masked(addr) | PG_V;
    729 	}
    730 	addr += PAGE_SIZE;
    731 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_V;
    732 #else
    733 	pde = bt_pgd;
    734 #endif
    735 
    736 	/* Level 1 */
    737 	page = KERNTEXTOFF;
    738 	for (i = 0; i < new_count; i ++) {
    739 		vaddr_t cur_page = page;
    740 
    741 		pte = (pd_entry_t *)avail;
    742 		avail += PAGE_SIZE;
    743 
    744 		memset(pte, 0, PAGE_SIZE);
    745 		while (pl2_pi(page) == pl2_pi(cur_page)) {
    746 			if (page >= map_end) {
    747 				/* not mapped at all */
    748 				pte[pl1_pi(page)] = 0;
    749 				page += PAGE_SIZE;
    750 				continue;
    751 			}
    752 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
    753 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
    754 				pte[pl1_pi(page)] = xen_start_info.shared_info;
    755 			}
    756 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    757 			    == xen_start_info.console.domU.mfn) {
    758 				xencons_interface = (void *)page;
    759 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
    760 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    761 			}
    762 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    763 			    == xen_start_info.store_mfn) {
    764 				xenstore_interface = (void *)page;
    765 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
    766 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    767 			}
    768 #ifdef DOM0OPS
    769 			if (page >= (vaddr_t)atdevbase &&
    770 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
    771 				pte[pl1_pi(page)] =
    772 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
    773 				pte[pl1_pi(page)] |= xpmap_pg_nx;
    774 			}
    775 #endif
    776 
    777 			pte[pl1_pi(page)] |= PG_V;
    778 			if (page < (vaddr_t)&__rodata_start) {
    779 				/* Map the kernel text RX. */
    780 				pte[pl1_pi(page)] |= PG_RO;
    781 			} else if (page >= (vaddr_t)&__rodata_start &&
    782 			    page < (vaddr_t)&__data_start) {
    783 				/* Map the kernel rodata R. */
    784 				pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
    785 			} else if (page >= old_pgd &&
    786 			    page < old_pgd + (old_count * PAGE_SIZE)) {
    787 				/* Map the old page tables R. */
    788 				pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
    789 			} else if (page >= new_pgd &&
    790 			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
    791 				/* Map the new page tables R. */
    792 				pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
    793 #ifdef i386
    794 			} else if (page == (vaddr_t)tmpgdt) {
    795 				/*
    796 				 * Map bootstrap gdt R/O. Later, we will re-add
    797 				 * this page to uvm after making it writable.
    798 				 */
    799 				pte[pl1_pi(page)] = 0;
    800 				page += PAGE_SIZE;
    801 				continue;
    802 #endif
    803 			} else if (page >= (vaddr_t)&__data_start &&
    804 			    page < (vaddr_t)&__kernel_end) {
    805 				/* Map the kernel data+bss RW. */
    806 				pte[pl1_pi(page)] |= PG_RW | xpmap_pg_nx;
    807 			} else {
    808 				/* Map the page RW. */
    809 				pte[pl1_pi(page)] |= PG_RW | xpmap_pg_nx;
    810 			}
    811 
    812 			page += PAGE_SIZE;
    813 		}
    814 
    815 		addr = ((u_long)pte) - KERNBASE;
    816 		pde[pl2_pi(cur_page)] =
    817 		    xpmap_ptom_masked(addr) | PG_RW | PG_V;
    818 
    819 		/* Mark readonly */
    820 		xen_bt_set_readonly((vaddr_t)pte);
    821 	}
    822 
    823 	/* Install recursive page tables mapping */
    824 #ifdef PAE
    825 	/* Copy L2 KERN into L2 KERN SHADOW, and reference the latter in cpu0. */
    826 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
    827 	cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG];
    828 	cpu_info_primary.ci_kpm_pdirpa =
    829 	    (vaddr_t)cpu_info_primary.ci_kpm_pdir - KERNBASE;
    830 
    831 	/*
    832 	 * We don't enter a recursive entry from the L3 PD. Instead, we enter
    833 	 * the first 4 L2 pages, which includes the kernel's L2 shadow. But we
    834 	 * have to enter the shadow after switching %cr3, or Xen will refcount
    835 	 * some PTEs with the wrong type.
    836 	 */
    837 	addr = (u_long)pde - KERNBASE;
    838 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    839 		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_V |
    840 		    xpmap_pg_nx;
    841 	}
    842 
    843 	/* Mark tables RO, and pin L2 KERN SHADOW. */
    844 	addr = (u_long)pde - KERNBASE;
    845 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
    846 		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
    847 	}
    848 	if (final) {
    849 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
    850 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    851 	}
    852 #else /* PAE */
    853 
    854 	/* Recursive entry in pmap_kernel(). */
    855 	bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE)
    856 	    | PG_RO | PG_V | xpmap_pg_nx;
    857 #ifdef __x86_64__
    858 	/* Recursive entry in higher-level per-cpu PD. */
    859 	bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE)
    860 	    | PG_RO | PG_V | xpmap_pg_nx;
    861 #endif
    862 
    863 	/* Mark tables RO */
    864 	xen_bt_set_readonly((vaddr_t)pde);
    865 #endif /* PAE */
    866 
    867 #if PTP_LEVELS > 2 || defined(PAE)
    868 	xen_bt_set_readonly((vaddr_t)pdtpe);
    869 #endif
    870 #if PTP_LEVELS > 3
    871 	xen_bt_set_readonly(new_pgd);
    872 #endif
    873 
    874 	/* Pin the PGD */
    875 #ifdef __x86_64__
    876 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    877 #elif PAE
    878 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    879 #else
    880 	xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    881 #endif
    882 
    883 	/* Save phys. addr of PDP, for libkvm. */
    884 #ifdef PAE
    885 	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
    886 #else
    887 	PDPpaddr = (u_long)bt_pgd - KERNBASE;
    888 #endif
    889 
    890 	/* Switch to new tables */
    891 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
    892 
    893 #ifdef PAE
    894 	if (final) {
    895 		/* Save the address of the L3 page */
    896 		cpu_info_primary.ci_pae_l3_pdir = pdtpe;
    897 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
    898 
    899 		/* Now enter the kernel's PTE mappings */
    900 		addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
    901 		xpq_queue_pte_update(
    902 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
    903 		    xpmap_ptom_masked(addr) | PG_V);
    904 		xpq_flush_queue();
    905 	}
    906 #elif defined(__x86_64__)
    907 	if (final) {
    908 		/* Save the address of the real per-cpu L4 page. */
    909 		cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd;
    910 		cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)bt_cpu_pgd - KERNBASE);
    911 	}
    912 #endif
    913 	__USE(pdtpe);
    914 
    915 	/*
    916 	 * Now we can safely reclaim the space taken by the old tables.
    917 	 */
    918 
    919 	/* Unpin old PGD */
    920 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
    921 
    922 	/* Mark old tables RW */
    923 	page = old_pgd;
    924 	addr = xpmap_mtop((paddr_t)pde[pl2_pi(page)] & PG_FRAME);
    925 	pte = (pd_entry_t *)((u_long)addr + KERNBASE);
    926 	pte += pl1_pi(page);
    927 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
    928 		addr = xpmap_ptom(((u_long)pte) - KERNBASE);
    929 		xpq_queue_pte_update(addr, *pte | PG_RW);
    930 		page += PAGE_SIZE;
    931 		/*
    932 		 * Our PTEs are contiguous so it's safe to just "++" here.
    933 		 */
    934 		pte++;
    935 	}
    936 	xpq_flush_queue();
    937 }
    938 
    939 /*
    940  * Mark a page read-only, assuming vaddr = paddr + KERNBASE.
    941  */
    942 static void
    943 xen_bt_set_readonly(vaddr_t page)
    944 {
    945 	pt_entry_t entry;
    946 
    947 	entry = xpmap_ptom_masked(page - KERNBASE);
    948 	entry |= PG_V | xpmap_pg_nx;
    949 
    950 	HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG);
    951 }
    952 
    953 #ifdef __x86_64__
    954 void
    955 xen_set_user_pgd(paddr_t page)
    956 {
    957 	struct mmuext_op op;
    958 	int s = splvm();
    959 
    960 	xpq_flush_queue();
    961 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
    962 	op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT;
    963 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    964 		panic("xen_set_user_pgd: failed to install new user page"
    965 			" directory %#" PRIxPADDR, page);
    966 	splx(s);
    967 }
    968 #endif /* __x86_64__ */
    969