Home | History | Annotate | Line # | Download | only in x86
x86_xpmap.c revision 1.86
      1 /*	$NetBSD: x86_xpmap.c,v 1.86 2020/05/02 16:44:36 bouyer Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2017 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
     34  *
     35  * Permission to use, copy, modify, and distribute this software for any
     36  * purpose with or without fee is hereby granted, provided that the above
     37  * copyright notice and this permission notice appear in all copies.
     38  *
     39  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     40  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     41  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     42  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     43  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     44  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     45  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     46  */
     47 
     48 /*
     49  * Copyright (c) 2006, 2007 Manuel Bouyer.
     50  *
     51  * Redistribution and use in source and binary forms, with or without
     52  * modification, are permitted provided that the following conditions
     53  * are met:
     54  * 1. Redistributions of source code must retain the above copyright
     55  *    notice, this list of conditions and the following disclaimer.
     56  * 2. Redistributions in binary form must reproduce the above copyright
     57  *    notice, this list of conditions and the following disclaimer in the
     58  *    documentation and/or other materials provided with the distribution.
     59  *
     60  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     61  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     62  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     63  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     64  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     65  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     66  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     67  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     68  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     69  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     70  */
     71 
     72 /*
     73  * Copyright (c) 2004 Christian Limpach.
     74  * All rights reserved.
     75  *
     76  * Redistribution and use in source and binary forms, with or without
     77  * modification, are permitted provided that the following conditions
     78  * are met:
     79  * 1. Redistributions of source code must retain the above copyright
     80  *    notice, this list of conditions and the following disclaimer.
     81  * 2. Redistributions in binary form must reproduce the above copyright
     82  *    notice, this list of conditions and the following disclaimer in the
     83  *    documentation and/or other materials provided with the distribution.
     84  *
     85  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     86  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     87  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     88  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     89  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     90  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     91  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     92  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     93  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     94  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     95  */
     96 
     97 #include <sys/cdefs.h>
     98 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.86 2020/05/02 16:44:36 bouyer Exp $");
     99 
    100 #include "opt_xen.h"
    101 #include "opt_ddb.h"
    102 #include "ksyms.h"
    103 
    104 #include <sys/param.h>
    105 #include <sys/systm.h>
    106 #include <sys/mutex.h>
    107 #include <sys/cpu.h>
    108 
    109 #include <uvm/uvm.h>
    110 
    111 #include <x86/pmap.h>
    112 #include <machine/gdt.h>
    113 #include <xen/xenfunc.h>
    114 
    115 #include <dev/isa/isareg.h>
    116 #include <machine/isa_machdep.h>
    117 
    118 #ifdef XENDEBUG
    119 #define	__PRINTK(x) printk x
    120 #else
    121 #define	__PRINTK(x)
    122 #endif
    123 
    124 /* Xen requires the start_info struct to be page aligned */
    125 union start_info_union start_info_union __aligned(PAGE_SIZE);
    126 
    127 volatile shared_info_t *HYPERVISOR_shared_info __read_mostly;
    128 unsigned long *xpmap_phys_to_machine_mapping __read_mostly;
    129 kmutex_t pte_lock __cacheline_aligned;
    130 vaddr_t xen_dummy_page;
    131 pt_entry_t xpmap_pg_nx __read_mostly;
    132 
    133 #define XPQUEUE_SIZE 2048
    134 static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
    135 
    136 void xen_failsafe_handler(void);
    137 
    138 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
    139 
    140 static void xen_bt_set_readonly(vaddr_t);
    141 static void xen_bootstrap_tables(vaddr_t, vaddr_t, size_t, size_t, bool);
    142 
    143 vaddr_t xen_locore(void);
    144 
    145 /*
    146  * kcpuset internally uses an array of uint32_t while xen uses an array of
    147  * u_long. As we're little-endian we can cast one to the other.
    148  */
    149 typedef union {
    150 #ifdef _LP64
    151 	uint32_t xcpum_km[2];
    152 #else
    153 	uint32_t xcpum_km[1];
    154 #endif
    155 	u_long xcpum_xm;
    156 } xcpumask_t;
    157 
    158 void
    159 xen_failsafe_handler(void)
    160 {
    161 
    162 	panic("xen_failsafe_handler called!\n");
    163 }
    164 
    165 void
    166 xen_set_ldt(vaddr_t base, uint32_t entries)
    167 {
    168 	vaddr_t va;
    169 	vaddr_t end;
    170 	pt_entry_t *ptp;
    171 	int s;
    172 
    173 #ifdef __x86_64__
    174 	end = base + (entries << 3);
    175 #else
    176 	end = base + entries * sizeof(union descriptor);
    177 #endif
    178 
    179 	for (va = base; va < end; va += PAGE_SIZE) {
    180 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    181 		ptp = kvtopte(va);
    182 		pmap_pte_clearbits(ptp, PTE_W);
    183 	}
    184 	s = splvm(); /* XXXSMP */
    185 	xpq_queue_set_ldt(base, entries);
    186 	splx(s);
    187 }
    188 
    189 void
    190 xpq_flush_queue(void)
    191 {
    192 	mmu_update_t *xpq_queue;
    193 	int done = 0, ret;
    194 	size_t xpq_idx;
    195 
    196 	xpq_idx = curcpu()->ci_xpq_idx;
    197 	xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    198 
    199 retry:
    200 	ret = HYPERVISOR_mmu_update(xpq_queue, xpq_idx, &done, DOMID_SELF);
    201 
    202 	if (ret < 0 && xpq_idx != 0) {
    203 		printf("xpq_flush_queue: %zu entries (%d successful) on "
    204 		    "cpu%d (%ld)\n",
    205 		    xpq_idx, done, curcpu()->ci_index, curcpu()->ci_cpuid);
    206 
    207 		if (done != 0) {
    208 			xpq_queue += done;
    209 			xpq_idx -= done;
    210 			done = 0;
    211 			goto retry;
    212 		}
    213 
    214 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
    215 	}
    216 	curcpu()->ci_xpq_idx = 0;
    217 }
    218 
    219 static inline void
    220 xpq_increment_idx(void)
    221 {
    222 
    223 	if (__predict_false(++curcpu()->ci_xpq_idx == XPQUEUE_SIZE))
    224 		xpq_flush_queue();
    225 }
    226 
    227 void
    228 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
    229 {
    230 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    231 	size_t xpq_idx = curcpu()->ci_xpq_idx;
    232 
    233 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
    234 	xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT;
    235 	xpq_increment_idx();
    236 }
    237 
    238 void
    239 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
    240 {
    241 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    242 	size_t xpq_idx = curcpu()->ci_xpq_idx;
    243 
    244 	xpq_queue[xpq_idx].ptr = ptr | MMU_NORMAL_PT_UPDATE;
    245 	xpq_queue[xpq_idx].val = val;
    246 	xpq_increment_idx();
    247 }
    248 
    249 void
    250 xpq_queue_pt_switch(paddr_t pa)
    251 {
    252 	struct mmuext_op op;
    253 
    254 	xpq_flush_queue();
    255 
    256 	op.cmd = MMUEXT_NEW_BASEPTR;
    257 	op.arg1.mfn = pa >> PAGE_SHIFT;
    258 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    259 		panic(__func__);
    260 }
    261 
    262 void
    263 xpq_queue_pin_table(paddr_t pa, int lvl)
    264 {
    265 	struct mmuext_op op;
    266 
    267 	xpq_flush_queue();
    268 
    269 	op.cmd = lvl;
    270 	op.arg1.mfn = pa >> PAGE_SHIFT;
    271 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    272 		panic(__func__);
    273 }
    274 
    275 void
    276 xpq_queue_unpin_table(paddr_t pa)
    277 {
    278 	struct mmuext_op op;
    279 
    280 	xpq_flush_queue();
    281 
    282 	op.cmd = MMUEXT_UNPIN_TABLE;
    283 	op.arg1.mfn = pa >> PAGE_SHIFT;
    284 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    285 		panic(__func__);
    286 }
    287 
    288 void
    289 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    290 {
    291 	struct mmuext_op op;
    292 
    293 	xpq_flush_queue();
    294 
    295 	KASSERT(va == (va & ~PAGE_MASK));
    296 	op.cmd = MMUEXT_SET_LDT;
    297 	op.arg1.linear_addr = va;
    298 	op.arg2.nr_ents = entries;
    299 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    300 		panic(__func__);
    301 }
    302 
    303 void
    304 xpq_queue_tlb_flush(void)
    305 {
    306 	struct mmuext_op op;
    307 
    308 	xpq_flush_queue();
    309 
    310 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
    311 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    312 		panic(__func__);
    313 }
    314 
    315 void
    316 xpq_flush_cache(void)
    317 {
    318 	int s = splvm(); /* XXXSMP */
    319 
    320 	xpq_flush_queue();
    321 
    322 	asm("wbinvd":::"memory");
    323 	splx(s); /* XXX: removeme */
    324 }
    325 
    326 void
    327 xpq_queue_invlpg(vaddr_t va)
    328 {
    329 	struct mmuext_op op;
    330 
    331 	xpq_flush_queue();
    332 
    333 	op.cmd = MMUEXT_INVLPG_LOCAL;
    334 	op.arg1.linear_addr = (va & ~PAGE_MASK);
    335 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    336 		panic(__func__);
    337 }
    338 
    339 void
    340 xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc)
    341 {
    342 	xcpumask_t xcpumask;
    343 	mmuext_op_t op;
    344 
    345 	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
    346 
    347 	xpq_flush_queue();
    348 
    349 	op.cmd = MMUEXT_INVLPG_MULTI;
    350 	op.arg1.linear_addr = va;
    351 	set_xen_guest_handle(op.arg2.vcpumask, &xcpumask.xcpum_xm);
    352 
    353 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    354 		panic(__func__);
    355 }
    356 
    357 void
    358 xen_bcast_invlpg(vaddr_t va)
    359 {
    360 	mmuext_op_t op;
    361 
    362 	xpq_flush_queue();
    363 
    364 	op.cmd = MMUEXT_INVLPG_ALL;
    365 	op.arg1.linear_addr = va;
    366 
    367 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    368 		panic(__func__);
    369 }
    370 
    371 /* This is a synchronous call. */
    372 void
    373 xen_mcast_tlbflush(kcpuset_t *kc)
    374 {
    375 	xcpumask_t xcpumask;
    376 	mmuext_op_t op;
    377 
    378 	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
    379 
    380 	xpq_flush_queue();
    381 
    382 	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
    383 	set_xen_guest_handle(op.arg2.vcpumask, &xcpumask.xcpum_xm);
    384 
    385 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    386 		panic(__func__);
    387 }
    388 
    389 /* This is a synchronous call. */
    390 void
    391 xen_bcast_tlbflush(void)
    392 {
    393 	mmuext_op_t op;
    394 
    395 	xpq_flush_queue();
    396 
    397 	op.cmd = MMUEXT_TLB_FLUSH_ALL;
    398 
    399 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    400 		panic(__func__);
    401 }
    402 
    403 void
    404 xen_copy_page(paddr_t srcpa, paddr_t dstpa)
    405 {
    406 	mmuext_op_t op;
    407 
    408 	op.cmd = MMUEXT_COPY_PAGE;
    409 	op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT;
    410 	op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT;
    411 
    412 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    413 		panic(__func__);
    414 }
    415 
    416 void
    417 xen_pagezero(paddr_t pa)
    418 {
    419 	mmuext_op_t op;
    420 
    421 	op.cmd = MMUEXT_CLEAR_PAGE;
    422 	op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
    423 
    424 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    425 		panic(__func__);
    426 }
    427 
    428 int
    429 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
    430 {
    431 	mmu_update_t op;
    432 	int ok;
    433 
    434 	xpq_flush_queue();
    435 
    436 	op.ptr = ptr;
    437 	op.val = val;
    438 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
    439 		return EFAULT;
    440 	return 0;
    441 }
    442 
    443 #if L2_SLOT_KERNBASE > 0
    444 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
    445 #else
    446 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
    447 #endif
    448 
    449 #ifdef __x86_64__
    450 #define PDIRSZ	PTP_LEVELS
    451 #else
    452 /*
    453  * For PAE, we need an L3 page, a single contiguous L2 "superpage" of 4 pages
    454  * (all of them mapped by the L3 page), and a shadow page for L3[3].
    455  */
    456 #define PDIRSZ	(1 + 4 + 1)
    457 #endif
    458 
    459 /*
    460  * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page
    461  * tables.
    462  *
    463  * Virtual address space of the kernel when leaving this function:
    464  * +--------------+------------------+-------------+------------+---------------
    465  * | KERNEL IMAGE | BOOTSTRAP TABLES | PROC0 UAREA | DUMMY PAGE | HYPER. SHARED
    466  * +--------------+------------------+-------------+------------+---------------
    467  *
    468  * ------+-----------------+-------------+
    469  *  INFO | EARLY ZERO PAGE | ISA I/O MEM |
    470  * ------+-----------------+-------------+
    471  *
    472  * DUMMY PAGE is either a PDG for amd64 or a GDT for i386.
    473  *
    474  * (HYPER. SHARED INFO + EARLY ZERO PAGE + ISA I/O MEM) have no physical
    475  * addresses preallocated.
    476  */
    477 vaddr_t
    478 xen_locore(void)
    479 {
    480 	size_t nL2, oldcount, mapsize;
    481 	vaddr_t our_tables, xen_tables;
    482 	u_int descs[4];
    483 
    484 	xen_init_features();
    485 
    486 	xpmap_phys_to_machine_mapping =
    487 	    (unsigned long *)xen_start_info.mfn_list;
    488 
    489 	/* Set the NX/XD bit, if available. descs[3] = %edx. */
    490 	x86_cpuid(0x80000001, descs);
    491 	xpmap_pg_nx = (descs[3] & CPUID_NOX) ? PTE_NX : 0;
    492 
    493 	/* Space after Xen boostrap tables should be free */
    494 	xen_tables = xen_start_info.pt_base;
    495 	our_tables = xen_tables + (xen_start_info.nr_pt_frames * PAGE_SIZE);
    496 
    497 	/*
    498 	 * Calculate how much space we need. First, everything mapped before
    499 	 * the Xen bootstrap tables.
    500 	 */
    501 	mapsize = xen_tables - KERNTEXTOFF;
    502 
    503 	/* After the tables we'll have:
    504 	 *  - UAREA
    505 	 *  - dummy user PGD (x86_64)
    506 	 *  - HYPERVISOR_shared_info
    507 	 *  - early_zerop
    508 	 *  - ISA I/O mem (if needed)
    509 	 */
    510 	mapsize += UPAGES * PAGE_SIZE;
    511 #ifdef __x86_64__
    512 	mapsize += PAGE_SIZE;
    513 #endif
    514 	mapsize += PAGE_SIZE;
    515 	mapsize += PAGE_SIZE;
    516 #ifdef DOM0OPS
    517 	if (xendomain_is_dom0()) {
    518 		mapsize += IOM_SIZE;
    519 	}
    520 #endif
    521 
    522 	/*
    523 	 * At this point, mapsize doesn't include the table size.
    524 	 */
    525 #ifdef __x86_64__
    526 	nL2 = TABLE_L2_ENTRIES;
    527 #else
    528 	nL2 = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT;
    529 #endif
    530 
    531 	/*
    532 	 * Now compute how many L2 pages we need exactly. This is useful only
    533 	 * on i386, since the initial count for amd64 is already enough.
    534 	 */
    535 	while (KERNTEXTOFF + mapsize + (nL2 + PDIRSZ) * PAGE_SIZE >
    536 	    KERNBASE + (nL2 << L2_SHIFT)) {
    537 		nL2++;
    538 	}
    539 
    540 #ifdef i386
    541 	/*
    542 	 * One more L2 page: we'll allocate several pages after kva_start
    543 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
    544 	 * counted here. It's not a big issue to allocate one more L2 as
    545 	 * pmap_growkernel() will be called anyway.
    546 	 */
    547 	nL2++;
    548 	nkptp[1] = nL2;
    549 #endif
    550 
    551 	/*
    552 	 * Install bootstrap pages. We may need more L2 pages than will
    553 	 * have the final table here, as it's installed after the final table.
    554 	 */
    555 	oldcount = nL2;
    556 
    557 bootstrap_again:
    558 
    559 	/*
    560 	 * Xen space we'll reclaim may not be enough for our new page tables,
    561 	 * move bootstrap tables if necessary.
    562 	 */
    563 	if (our_tables < xen_tables + ((nL2 + PDIRSZ) * PAGE_SIZE))
    564 		our_tables = xen_tables + ((nL2 + PDIRSZ) * PAGE_SIZE);
    565 
    566 	/*
    567 	 * Make sure the number of L2 pages we have is enough to map everything
    568 	 * from KERNBASE to the bootstrap tables themselves.
    569 	 */
    570 	if (our_tables + ((oldcount + PDIRSZ) * PAGE_SIZE) >
    571 	    KERNBASE + (oldcount << L2_SHIFT)) {
    572 		oldcount++;
    573 		goto bootstrap_again;
    574 	}
    575 
    576 	/* Create temporary tables */
    577 	xen_bootstrap_tables(xen_tables, our_tables,
    578 	    xen_start_info.nr_pt_frames, oldcount, false);
    579 
    580 	/* Create final tables */
    581 	xen_bootstrap_tables(our_tables, xen_tables,
    582 	    oldcount + PDIRSZ, nL2, true);
    583 
    584 	/* Zero out PROC0 UAREA and DUMMY PAGE. */
    585 	memset((void *)(xen_tables + ((nL2 + PDIRSZ) * PAGE_SIZE)), 0,
    586 	    (UPAGES + 1) * PAGE_SIZE);
    587 
    588 	/* Finally, flush TLB. */
    589 	xpq_queue_tlb_flush();
    590 
    591 	return (xen_tables + ((nL2 + PDIRSZ) * PAGE_SIZE));
    592 }
    593 
    594 /*
    595  * Build a new table and switch to it.
    596  * old_count is # of old tables (including L4, L3 and L2).
    597  * new_count is # of new tables (PTE only).
    598  * We assume the areas don't overlap.
    599  */
    600 static void
    601 xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
    602     size_t new_count, bool final)
    603 {
    604 	pd_entry_t *L4cpu, *L4, *L3, *L2, *pte;
    605 	paddr_t addr;
    606 	vaddr_t page, avail, map_end;
    607 	int i;
    608 	extern char __rodata_start;
    609 	extern char __data_start;
    610 	extern char __kernel_end;
    611 	extern char *early_zerop; /* from pmap.c */
    612 #ifdef i386
    613 	extern union descriptor tmpgdt[];
    614 #endif
    615 
    616 	/*
    617 	 * Layout of RW area after the kernel image:
    618 	 *     xencons_interface (if present)
    619 	 *     xenstore_interface (if present)
    620 	 *     table pages (new_count + PDIRSZ entries)
    621 	 * Extra mappings (only when final is true):
    622 	 *     UAREA
    623 	 *     dummy user PGD (x86_64 only) / GDT page (i386 only)
    624 	 *     HYPERVISOR_shared_info
    625 	 *     early_zerop
    626 	 *     ISA I/O mem (if needed)
    627 	 */
    628 	map_end = new_pgd + ((new_count + PDIRSZ) * PAGE_SIZE);
    629 	if (final) {
    630 		map_end += UPAGES * PAGE_SIZE;
    631 		xen_dummy_page = (vaddr_t)map_end;
    632 		map_end += PAGE_SIZE;
    633 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
    634 		map_end += PAGE_SIZE;
    635 		early_zerop = (char *)map_end;
    636 		map_end += PAGE_SIZE;
    637 	}
    638 
    639 	/*
    640 	 * We always set atdevbase, as it's used by init386 to find the first
    641 	 * available VA. map_end is updated only if we are dom0, so
    642 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
    643 	 * this case.
    644 	 */
    645 	if (final) {
    646 		atdevbase = map_end;
    647 #ifdef DOM0OPS
    648 		if (xendomain_is_dom0()) {
    649 			/* ISA I/O mem */
    650 			map_end += IOM_SIZE;
    651 		}
    652 #endif
    653 	}
    654 
    655 	__PRINTK(("xen_bootstrap_tables map_end 0x%lx\n", map_end));
    656 	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
    657 	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
    658 
    659 	avail = new_pgd;
    660 
    661 	/*
    662 	 * Create our page tables.
    663 	 */
    664 
    665 #ifdef __x86_64__
    666 	/* per-cpu L4 */
    667 	L4cpu = (pd_entry_t *)avail;
    668 	memset(L4cpu, 0, PAGE_SIZE);
    669 	avail += PAGE_SIZE;
    670 
    671 	/* pmap_kernel L4 */
    672 	L4 = (pd_entry_t *)avail;
    673 	memset(L4, 0, PAGE_SIZE);
    674 	avail += PAGE_SIZE;
    675 
    676 	/* L3 */
    677 	L3 = (pd_entry_t *)avail;
    678 	memset(L3, 0, PAGE_SIZE);
    679 	avail += PAGE_SIZE;
    680 
    681 	/* link L4->L3 */
    682 	addr = ((u_long)L3) - KERNBASE;
    683 	L4cpu[pl4_pi(KERNTEXTOFF)] = xpmap_ptom_masked(addr) | PTE_P | PTE_W;
    684 	L4[pl4_pi(KERNTEXTOFF)] = xpmap_ptom_masked(addr) | PTE_P | PTE_W;
    685 
    686 	/* L2 */
    687 	L2 = (pd_entry_t *)avail;
    688 	memset(L2, 0, PAGE_SIZE);
    689 	avail += PAGE_SIZE;
    690 
    691 	/* link L3->L2 */
    692 	addr = ((u_long)L2) - KERNBASE;
    693 	L3[pl3_pi(KERNTEXTOFF)] = xpmap_ptom_masked(addr) | PTE_P | PTE_W;
    694 #else
    695 	/* no L4 on i386PAE */
    696 	__USE(L4cpu);
    697 	__USE(L4);
    698 
    699 	/* L3 */
    700 	L3 = (pd_entry_t *)avail;
    701 	memset(L3, 0, PAGE_SIZE);
    702 	avail += PAGE_SIZE;
    703 
    704 	/*
    705 	 * Our PAE-style level 2, 5 contiguous pages (4 L2 + 1 shadow).
    706 	 *                  +-----------------+----------------+---------+
    707 	 * Physical layout: | 3 * USERLAND L2 | L2 KERN SHADOW | L2 KERN |
    708 	 *                  +-----------------+----------------+---------+
    709 	 * However, we enter L3[3] into L2 KERN, and not L2 KERN SHADOW.
    710 	 * This way, L2[L2_SLOT_KERN] always points to the shadow.
    711 	 */
    712 	L2 = (pd_entry_t *)avail;
    713 	memset(L2, 0, PAGE_SIZE * 5);
    714 	avail += PAGE_SIZE * 5;
    715 
    716 	/*
    717 	 * Link L2 pages in L3, with a special case for L2 KERN. Xen doesn't
    718 	 * want RW permissions in L3 entries, it'll add them itself.
    719 	 */
    720 	addr = ((u_long)L2) - KERNBASE;
    721 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    722 		L3[i] = xpmap_ptom_masked(addr) | PTE_P;
    723 	}
    724 	addr += PAGE_SIZE;
    725 	L3[3] = xpmap_ptom_masked(addr) | PTE_P;
    726 #endif
    727 
    728 	/* Level 1 */
    729 	page = KERNTEXTOFF;
    730 	for (i = 0; i < new_count; i ++) {
    731 		vaddr_t cur_page = page;
    732 
    733 		pte = (pd_entry_t *)avail;
    734 		memset(pte, 0, PAGE_SIZE);
    735 		avail += PAGE_SIZE;
    736 
    737 		while (pl2_pi(page) == pl2_pi(cur_page)) {
    738 			if (page >= map_end) {
    739 				/* not mapped at all */
    740 				pte[pl1_pi(page)] = 0;
    741 				page += PAGE_SIZE;
    742 				continue;
    743 			}
    744 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
    745 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
    746 				pte[pl1_pi(page)] = xen_start_info.shared_info;
    747 			}
    748 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    749 			    == xen_start_info.console.domU.mfn) {
    750 				xencons_interface = (void *)page;
    751 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
    752 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    753 			}
    754 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    755 			    == xen_start_info.store_mfn) {
    756 				xenstore_interface = (void *)page;
    757 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
    758 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    759 			}
    760 #ifdef DOM0OPS
    761 			if (page >= (vaddr_t)atdevbase &&
    762 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
    763 				pte[pl1_pi(page)] =
    764 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
    765 				pte[pl1_pi(page)] |= xpmap_pg_nx;
    766 			}
    767 #endif
    768 
    769 			pte[pl1_pi(page)] |= PTE_P;
    770 			if (page < (vaddr_t)&__rodata_start) {
    771 				/* Map the kernel text RX. Nothing to do. */
    772 			} else if (page >= (vaddr_t)&__rodata_start &&
    773 			    page < (vaddr_t)&__data_start) {
    774 				/* Map the kernel rodata R. */
    775 				pte[pl1_pi(page)] |= xpmap_pg_nx;
    776 			} else if (page >= old_pgd &&
    777 			    page < old_pgd + (old_count * PAGE_SIZE)) {
    778 				/* Map the old page tables R. */
    779 				pte[pl1_pi(page)] |= xpmap_pg_nx;
    780 			} else if (page >= new_pgd &&
    781 			    page < new_pgd + ((new_count + PDIRSZ) * PAGE_SIZE)) {
    782 				/* Map the new page tables R. */
    783 				pte[pl1_pi(page)] |= xpmap_pg_nx;
    784 #ifdef i386
    785 			} else if (page == (vaddr_t)tmpgdt) {
    786 				/*
    787 				 * Map bootstrap gdt R/O. Later, we will re-add
    788 				 * this page to uvm after making it writable.
    789 				 */
    790 				pte[pl1_pi(page)] = 0;
    791 				page += PAGE_SIZE;
    792 				continue;
    793 #endif
    794 			} else if (page >= (vaddr_t)&__data_start &&
    795 			    page < (vaddr_t)&__kernel_end) {
    796 				/* Map the kernel data+bss RW. */
    797 				pte[pl1_pi(page)] |= PTE_W | xpmap_pg_nx;
    798 			} else {
    799 				/* Map the page RW. */
    800 				pte[pl1_pi(page)] |= PTE_W | xpmap_pg_nx;
    801 			}
    802 
    803 			page += PAGE_SIZE;
    804 		}
    805 
    806 		addr = ((u_long)pte) - KERNBASE;
    807 		L2[pl2_pi(cur_page)] = xpmap_ptom_masked(addr) | PTE_W | PTE_P;
    808 
    809 		/* Mark readonly */
    810 		xen_bt_set_readonly((vaddr_t)pte);
    811 	}
    812 
    813 	/* Install recursive page tables mapping */
    814 #ifdef __x86_64__
    815 	/* Recursive entry in pmap_kernel(). */
    816 	L4[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)L4 - KERNBASE)
    817 	    | PTE_P | xpmap_pg_nx;
    818 	/* Recursive entry in higher-level per-cpu PD. */
    819 	L4cpu[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)L4cpu - KERNBASE)
    820 	    | PTE_P | xpmap_pg_nx;
    821 
    822 	/* Mark tables RO */
    823 	xen_bt_set_readonly((vaddr_t)L2);
    824 #else
    825 	/* Copy L2 KERN into L2 KERN SHADOW, and reference the latter in cpu0. */
    826 	memcpy(&L2[L2_SLOT_KERN + NPDPG], &L2[L2_SLOT_KERN], PAGE_SIZE);
    827 	cpu_info_primary.ci_kpm_pdir = &L2[L2_SLOT_KERN + NPDPG];
    828 	cpu_info_primary.ci_kpm_pdirpa =
    829 	    (vaddr_t)cpu_info_primary.ci_kpm_pdir - KERNBASE;
    830 
    831 	/*
    832 	 * We don't enter a recursive entry from the L3 PD. Instead, we enter
    833 	 * the first 4 L2 pages, which includes the kernel's L2 shadow. But we
    834 	 * have to enter the shadow after switching %cr3, or Xen will refcount
    835 	 * some PTEs with the wrong type.
    836 	 */
    837 	addr = (u_long)L2 - KERNBASE;
    838 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    839 		L2[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PTE_P |
    840 		    xpmap_pg_nx;
    841 	}
    842 
    843 	/* Mark tables RO, and pin L2 KERN SHADOW. */
    844 	addr = (u_long)L2 - KERNBASE;
    845 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
    846 		xen_bt_set_readonly(((vaddr_t)L2) + PAGE_SIZE * i);
    847 	}
    848 	if (final) {
    849 		addr = (u_long)L2 - KERNBASE + 3 * PAGE_SIZE;
    850 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    851 	}
    852 #endif
    853 
    854 	xen_bt_set_readonly((vaddr_t)L3);
    855 #ifdef __x86_64__
    856 	xen_bt_set_readonly((vaddr_t)L4cpu);
    857 #endif
    858 
    859 	/* Pin the PGD */
    860 #ifdef __x86_64__
    861 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    862 #else
    863 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    864 #endif
    865 
    866 	/* Save phys. addr of PDP, for libkvm. */
    867 #ifdef __x86_64__
    868 	PDPpaddr = (u_long)L4 - KERNBASE;
    869 #else
    870 	PDPpaddr = (u_long)L2 - KERNBASE; /* PDP is the L2 with PAE */
    871 #endif
    872 
    873 	/* Switch to new tables */
    874 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
    875 
    876 	if (final) {
    877 #ifdef __x86_64__
    878 		/* Save the address of the real per-cpu L4 page. */
    879 		cpu_info_primary.ci_kpm_pdir = L4cpu;
    880 		cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)L4cpu - KERNBASE);
    881 #else
    882 		/* Save the address of the L3 page */
    883 		cpu_info_primary.ci_pae_l3_pdir = L3;
    884 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
    885 
    886 		/* Now enter the kernel's PTE mappings */
    887 		addr = (u_long)L2 - KERNBASE + PAGE_SIZE * 3;
    888 		xpq_queue_pte_update(
    889 		    xpmap_ptom(((vaddr_t)&L2[PDIR_SLOT_PTE + 3]) - KERNBASE),
    890 		    xpmap_ptom_masked(addr) | PTE_P);
    891 		xpq_flush_queue();
    892 #endif
    893 	}
    894 
    895 	/*
    896 	 * Now we can safely reclaim the space taken by the old tables.
    897 	 */
    898 
    899 	/* Unpin old PGD */
    900 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
    901 
    902 	/* Mark old tables RW */
    903 	page = old_pgd;
    904 	addr = xpmap_mtop((paddr_t)L2[pl2_pi(page)] & PTE_4KFRAME);
    905 	pte = (pd_entry_t *)((u_long)addr + KERNBASE);
    906 	pte += pl1_pi(page);
    907 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
    908 		addr = xpmap_ptom(((u_long)pte) - KERNBASE);
    909 		xpq_queue_pte_update(addr, *pte | PTE_W);
    910 		page += PAGE_SIZE;
    911 		/*
    912 		 * Our PTEs are contiguous so it's safe to just "++" here.
    913 		 */
    914 		pte++;
    915 	}
    916 	xpq_flush_queue();
    917 }
    918 
    919 /*
    920  * Mark a page read-only, assuming vaddr = paddr + KERNBASE.
    921  */
    922 static void
    923 xen_bt_set_readonly(vaddr_t page)
    924 {
    925 	pt_entry_t entry;
    926 
    927 	entry = xpmap_ptom_masked(page - KERNBASE);
    928 	entry |= PTE_P | xpmap_pg_nx;
    929 
    930 	HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG);
    931 }
    932 
    933 #ifdef __x86_64__
    934 void
    935 xen_set_user_pgd(paddr_t page)
    936 {
    937 	struct mmuext_op op;
    938 	int s = splvm(); /* XXXSMP */
    939 
    940 	xpq_flush_queue();
    941 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
    942 	op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT;
    943 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    944 		panic("xen_set_user_pgd: failed to install new user page"
    945 			" directory %#" PRIxPADDR, page);
    946 	splx(s);
    947 }
    948 #endif /* __x86_64__ */
    949