Home | History | Annotate | Line # | Download | only in x86
x86_xpmap.c revision 1.26.2.1
      1 /*	$NetBSD: x86_xpmap.c,v 1.26.2.1 2011/06/03 13:27:41 cherry Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
      5  *
      6  * Permission to use, copy, modify, and distribute this software for any
      7  * purpose with or without fee is hereby granted, provided that the above
      8  * copyright notice and this permission notice appear in all copies.
      9  *
     10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     17  */
     18 
     19 /*
     20  * Copyright (c) 2006, 2007 Manuel Bouyer.
     21  *
     22  * Redistribution and use in source and binary forms, with or without
     23  * modification, are permitted provided that the following conditions
     24  * are met:
     25  * 1. Redistributions of source code must retain the above copyright
     26  *    notice, this list of conditions and the following disclaimer.
     27  * 2. Redistributions in binary form must reproduce the above copyright
     28  *    notice, this list of conditions and the following disclaimer in the
     29  *    documentation and/or other materials provided with the distribution.
     30  *
     31  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     32  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     33  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     34  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     40  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     41  *
     42  */
     43 
     44 /*
     45  *
     46  * Copyright (c) 2004 Christian Limpach.
     47  * All rights reserved.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  *
     58  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     59  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     60  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     61  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     62  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     63  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     64  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     65  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     66  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     67  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     68  */
     69 
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.26.2.1 2011/06/03 13:27:41 cherry Exp $");
     73 
     74 #include "opt_xen.h"
     75 #include "opt_ddb.h"
     76 #include "ksyms.h"
     77 
     78 #include <sys/param.h>
     79 #include <sys/systm.h>
     80 
     81 #include <uvm/uvm.h>
     82 
     83 #include <machine/pmap.h>
     84 #include <machine/gdt.h>
     85 #include <xen/xenfunc.h>
     86 
     87 #include <dev/isa/isareg.h>
     88 #include <machine/isa_machdep.h>
     89 
     90 #undef	XENDEBUG
     91 /* #define XENDEBUG_SYNC */
     92 /* #define	XENDEBUG_LOW */
     93 
     94 #ifdef XENDEBUG
     95 #define	XENPRINTF(x) printf x
     96 #define	XENPRINTK(x) printk x
     97 #define	XENPRINTK2(x) /* printk x */
     98 
     99 static char XBUF[256];
    100 #else
    101 #define	XENPRINTF(x)
    102 #define	XENPRINTK(x)
    103 #define	XENPRINTK2(x)
    104 #endif
    105 #define	PRINTF(x) printf x
    106 #define	PRINTK(x) printk x
    107 
    108 /* on x86_64 kernel runs in ring 3 */
    109 #ifdef __x86_64__
    110 #define PG_k PG_u
    111 #else
    112 #define PG_k 0
    113 #endif
    114 
    115 volatile shared_info_t *HYPERVISOR_shared_info;
    116 /* Xen requires the start_info struct to be page aligned */
    117 union start_info_union start_info_union __aligned(PAGE_SIZE);
    118 unsigned long *xpmap_phys_to_machine_mapping;
    119 
    120 void xen_failsafe_handler(void);
    121 
    122 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
    123 	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
    124 
    125 void
    126 xen_failsafe_handler(void)
    127 {
    128 
    129 	panic("xen_failsafe_handler called!\n");
    130 }
    131 
    132 
    133 void
    134 xen_set_ldt(vaddr_t base, uint32_t entries)
    135 {
    136 	vaddr_t va;
    137 	vaddr_t end;
    138 	pt_entry_t *ptp;
    139 	int s;
    140 
    141 #ifdef __x86_64__
    142 	end = base + (entries << 3);
    143 #else
    144 	end = base + entries * sizeof(union descriptor);
    145 #endif
    146 
    147 	for (va = base; va < end; va += PAGE_SIZE) {
    148 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    149 		ptp = kvtopte(va);
    150 		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
    151 		    base, entries, ptp));
    152 		pmap_pte_clearbits(ptp, PG_RW);
    153 	}
    154 	s = splvm();
    155 	xpq_queue_lock();
    156 	xpq_queue_set_ldt(base, entries);
    157 	xpq_queue_unlock();
    158 	splx(s);
    159 }
    160 
    161 #ifdef XENDEBUG
    162 void xpq_debug_dump(void);
    163 #endif
    164 
    165 #define XPQUEUE_SIZE 2048
    166 static mmu_update_t xpq_queue[XPQUEUE_SIZE];
    167 static int xpq_idx = 0;
    168 static struct simplelock xpq_lock = SIMPLELOCK_INITIALIZER;
    169 
    170 void
    171 xpq_queue_lock(void)
    172 {
    173 	simple_lock(&xpq_lock);
    174 }
    175 
    176 void
    177 xpq_queue_unlock(void)
    178 {
    179 	simple_unlock(&xpq_lock);
    180 }
    181 
    182 /* Must be called with xpq_lock held */
    183 void
    184 xpq_flush_queue(void)
    185 {
    186 	int i, ok, ret;
    187 
    188 	KASSERT(simple_lock_held(&xpq_lock));
    189 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
    190 	for (i = 0; i < xpq_idx; i++)
    191 		XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
    192 		    xpq_queue[i].ptr, xpq_queue[i].val));
    193 
    194 	ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
    195 
    196 	if (xpq_idx != 0 && ret < 0) {
    197 		printf("xpq_flush_queue: %d entries (%d successful)\n",
    198 		    xpq_idx, ok);
    199 		for (i = 0; i < xpq_idx; i++)
    200 			printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
    201 			   xpq_queue[i].ptr, xpq_queue[i].val);
    202 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
    203 	}
    204 	xpq_idx = 0;
    205 }
    206 
    207 /* Must be called with xpq_lock held */
    208 static inline void
    209 xpq_increment_idx(void)
    210 {
    211 
    212 	KASSERT(simple_lock_held(&xpq_lock));
    213 	xpq_idx++;
    214 	if (__predict_false(xpq_idx == XPQUEUE_SIZE))
    215 		xpq_flush_queue();
    216 }
    217 
    218 void
    219 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
    220 {
    221 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
    222 	    "\n", (int64_t)ma, (int64_t)pa));
    223 	KASSERT(simple_lock_held(&xpq_lock));
    224 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
    225 	xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
    226 	xpq_increment_idx();
    227 #ifdef XENDEBUG_SYNC
    228 	xpq_flush_queue();
    229 #endif
    230 }
    231 
    232 void
    233 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
    234 {
    235 
    236 	KASSERT((ptr & 3) == 0);
    237 	KASSERT(simple_lock_held(&xpq_lock));
    238 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
    239 	xpq_queue[xpq_idx].val = val;
    240 	xpq_increment_idx();
    241 #ifdef XENDEBUG_SYNC
    242 	xpq_flush_queue();
    243 #endif
    244 }
    245 
    246 void
    247 xpq_queue_pt_switch(paddr_t pa)
    248 {
    249 	struct mmuext_op op;
    250 	KASSERT(simple_lock_held(&xpq_lock));
    251 	xpq_flush_queue();
    252 
    253 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    254 	    (int64_t)pa, (int64_t)pa));
    255 	op.cmd = MMUEXT_NEW_BASEPTR;
    256 	op.arg1.mfn = pa >> PAGE_SHIFT;
    257 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    258 		panic("xpq_queue_pt_switch");
    259 }
    260 
    261 void
    262 xpq_queue_pin_table(paddr_t pa, int lvl)
    263 {
    264 	struct mmuext_op op;
    265 
    266 	KASSERT(simple_lock_held(&xpq_lock));
    267 	xpq_flush_queue();
    268 
    269 	XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
    270 	    lvl + 1, pa));
    271 
    272 	op.arg1.mfn = pa >> PAGE_SHIFT;
    273 	op.cmd = lvl;
    274 
    275 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    276 		panic("xpq_queue_pin_table");
    277 }
    278 
    279 void
    280 xpq_queue_unpin_table(paddr_t pa)
    281 {
    282 	struct mmuext_op op;
    283 
    284 	KASSERT(simple_lock_held(&xpq_lock));
    285 	xpq_flush_queue();
    286 
    287 	XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
    288 	op.arg1.mfn = pa >> PAGE_SHIFT;
    289 	op.cmd = MMUEXT_UNPIN_TABLE;
    290 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    291 		panic("xpq_queue_unpin_table");
    292 }
    293 
    294 void
    295 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    296 {
    297 	struct mmuext_op op;
    298 
    299 	KASSERT(simple_lock_held(&xpq_lock));
    300 	xpq_flush_queue();
    301 
    302 	XENPRINTK2(("xpq_queue_set_ldt\n"));
    303 	KASSERT(va == (va & ~PAGE_MASK));
    304 	op.cmd = MMUEXT_SET_LDT;
    305 	op.arg1.linear_addr = va;
    306 	op.arg2.nr_ents = entries;
    307 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    308 		panic("xpq_queue_set_ldt");
    309 }
    310 
    311 void
    312 xpq_queue_tlb_flush(void)
    313 {
    314 	struct mmuext_op op;
    315 
    316 	KASSERT(simple_lock_held(&xpq_lock));
    317 	xpq_flush_queue();
    318 
    319 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
    320 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
    321 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    322 		panic("xpq_queue_tlb_flush");
    323 }
    324 
    325 void
    326 xpq_flush_cache(void)
    327 {
    328 	struct mmuext_op op;
    329 	int s = splvm(), err;
    330 
    331 	xpq_queue_lock();
    332 	xpq_flush_queue();
    333 
    334 	XENPRINTK2(("xpq_queue_flush_cache\n"));
    335 	op.cmd = MMUEXT_FLUSH_CACHE;
    336 	if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0)
    337 		printf("errno == %d\n", err);
    338 		panic("xpq_flush_cache");
    339 	xpq_queue_unlock();
    340 	splx(s); /* XXX: removeme */
    341 }
    342 
    343 void
    344 xpq_queue_invlpg(vaddr_t va)
    345 {
    346 	struct mmuext_op op;
    347 	KASSERT(simple_lock_held(&xpq_lock));
    348 	xpq_flush_queue();
    349 
    350 	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
    351 	op.cmd = MMUEXT_INVLPG_LOCAL;
    352 	op.arg1.linear_addr = (va & ~PAGE_MASK);
    353 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    354 		panic("xpq_queue_invlpg");
    355 }
    356 
    357 static void
    358 xen_mcast_invlpg(vaddr_t va, uint32_t cpumask)
    359 {
    360 	mmuext_op_t op;
    361 
    362 	KASSERT(simple_lock_held(&xpq_lock));
    363 
    364 	/* Flush pending page updates */
    365 	xpq_flush_queue();
    366 
    367 	op.cmd = MMUEXT_INVLPG_MULTI;
    368 	op.arg1.linear_addr = va;
    369 	op.arg2.vcpumask = &cpumask;
    370 
    371 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    372 		panic("xpq_queue_invlpg_all");
    373 	}
    374 
    375 	return;
    376 }
    377 
    378 static void
    379 xen_bcast_invlpg(vaddr_t va)
    380 {
    381 	mmuext_op_t op;
    382 
    383 	/* Flush pending page updates */
    384 	KASSERT(simple_lock_held(&xpq_lock));
    385 	xpq_flush_queue();
    386 
    387 	op.cmd = MMUEXT_INVLPG_ALL;
    388 	op.arg1.linear_addr = va;
    389 
    390 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    391 		panic("xpq_queue_invlpg_all");
    392 	}
    393 
    394 	return;
    395 }
    396 
    397 /* This is a synchronous call. */
    398 void
    399 xen_mcast_tlbflush(uint32_t cpumask)
    400 {
    401 	mmuext_op_t op;
    402 
    403 	/* Flush pending page updates */
    404 	KASSERT(simple_lock_held(&xpq_lock));
    405 	xpq_flush_queue();
    406 
    407 	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
    408 	op.arg2.vcpumask = &cpumask;
    409 
    410 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    411 		panic("xpq_queue_invlpg_all");
    412 	}
    413 
    414 	return;
    415 }
    416 
    417 /* This is a synchronous call. */
    418 void
    419 xen_bcast_tlbflush(void)
    420 {
    421 	mmuext_op_t op;
    422 
    423 	/* Flush pending page updates */
    424 	KASSERT(simple_lock_held(&xpq_lock));
    425 	xpq_flush_queue();
    426 
    427 	op.cmd = MMUEXT_TLB_FLUSH_ALL;
    428 
    429 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    430 		panic("xpq_queue_invlpg_all");
    431 	}
    432 
    433 	return;
    434 }
    435 
    436 /* This is a synchronous call. */
    437 void
    438 xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, uint32_t cpumask)
    439 {
    440 	KASSERT(eva > sva);
    441 
    442 	/* Flush pending page updates */
    443 	KASSERT(simple_lock_held(&xpq_lock));
    444 	xpq_flush_queue();
    445 
    446 	/* Align to nearest page boundary */
    447 	sva &= ~PAGE_MASK;
    448 	eva &= ~PAGE_MASK;
    449 
    450 	for ( ; sva <= eva; sva += PAGE_SIZE) {
    451 		xen_mcast_invlpg(sva, cpumask);
    452 	}
    453 
    454 	return;
    455 }
    456 
    457 /* This is a synchronous call. */
    458 void
    459 xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
    460 {
    461 	KASSERT(eva > sva);
    462 
    463 	/* Flush pending page updates */
    464 	KASSERT(simple_lock_held(&xpq_lock));
    465 	xpq_flush_queue();
    466 
    467 	/* Align to nearest page boundary */
    468 	sva &= ~PAGE_MASK;
    469 	eva &= ~PAGE_MASK;
    470 
    471 	for ( ; sva <= eva; sva += PAGE_SIZE) {
    472 		xen_bcast_invlpg(sva);
    473 	}
    474 
    475 	return;
    476 }
    477 
    478 int
    479 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
    480 {
    481 	mmu_update_t op;
    482 	int ok;
    483 
    484 	KASSERT(simple_lock_held(&xpq_lock));
    485 	xpq_flush_queue();
    486 
    487 	op.ptr = ptr;
    488 	op.val = val;
    489 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
    490 		return EFAULT;
    491 	return (0);
    492 }
    493 
    494 #ifdef XENDEBUG
    495 void
    496 xpq_debug_dump(void)
    497 {
    498 	int i;
    499 
    500 	XENPRINTK2(("idx: %d\n", xpq_idx));
    501 	for (i = 0; i < xpq_idx; i++) {
    502 		snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
    503 		    xpq_queue[i].ptr, xpq_queue[i].val);
    504 		if (++i < xpq_idx)
    505 			snprintf(XBUF + strlen(XBUF),
    506 			    sizeof(XBUF) - strlen(XBUF),
    507 			    "%" PRIx64 " %08" PRIx64,
    508 			    xpq_queue[i].ptr, xpq_queue[i].val);
    509 		if (++i < xpq_idx)
    510 			snprintf(XBUF + strlen(XBUF),
    511 			    sizeof(XBUF) - strlen(XBUF),
    512 			    "%" PRIx64 " %08" PRIx64,
    513 			    xpq_queue[i].ptr, xpq_queue[i].val);
    514 		if (++i < xpq_idx)
    515 			snprintf(XBUF + strlen(XBUF),
    516 			    sizeof(XBUF) - strlen(XBUF),
    517 			    "%" PRIx64 " %08" PRIx64,
    518 			    xpq_queue[i].ptr, xpq_queue[i].val);
    519 		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
    520 	}
    521 }
    522 #endif
    523 
    524 
    525 extern volatile struct xencons_interface *xencons_interface; /* XXX */
    526 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
    527 
    528 static void xen_bt_set_readonly (vaddr_t);
    529 static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
    530 
    531 /* How many PDEs ? */
    532 #if L2_SLOT_KERNBASE > 0
    533 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
    534 #else
    535 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
    536 #endif
    537 
    538 /*
    539  * Construct and switch to new pagetables
    540  * first_avail is the first vaddr we can use after
    541  * we get rid of Xen pagetables
    542  */
    543 
    544 vaddr_t xen_pmap_bootstrap (void);
    545 
    546 /*
    547  * Function to get rid of Xen bootstrap tables
    548  */
    549 
    550 /* How many PDP do we need: */
    551 #ifdef PAE
    552 /*
    553  * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
    554  * all of them mapped by the L3 page. We also need a shadow page
    555  * for L3[3].
    556  */
    557 static const int l2_4_count = 6;
    558 #else
    559 static const int l2_4_count = PTP_LEVELS - 1;
    560 #endif
    561 
    562 vaddr_t
    563 xen_pmap_bootstrap(void)
    564 {
    565 	int count, oldcount;
    566 	long mapsize;
    567 	vaddr_t bootstrap_tables, init_tables;
    568 
    569 	xpmap_phys_to_machine_mapping =
    570 	    (unsigned long *)xen_start_info.mfn_list;
    571 	init_tables = xen_start_info.pt_base;
    572 	__PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
    573 
    574 	/* Space after Xen boostrap tables should be free */
    575 	bootstrap_tables = xen_start_info.pt_base +
    576 		(xen_start_info.nr_pt_frames * PAGE_SIZE);
    577 
    578 	/*
    579 	 * Calculate how many space we need
    580 	 * first everything mapped before the Xen bootstrap tables
    581 	 */
    582 	mapsize = init_tables - KERNTEXTOFF;
    583 	/* after the tables we'll have:
    584 	 *  - UAREA
    585 	 *  - dummy user PGD (x86_64)
    586 	 *  - HYPERVISOR_shared_info
    587 	 *  - ISA I/O mem (if needed)
    588 	 */
    589 	mapsize += UPAGES * NBPG;
    590 #ifdef __x86_64__
    591 	mapsize += NBPG;
    592 #endif
    593 	mapsize += NBPG;
    594 
    595 #ifdef DOM0OPS
    596 	if (xendomain_is_dom0()) {
    597 		/* space for ISA I/O mem */
    598 		mapsize += IOM_SIZE;
    599 	}
    600 #endif
    601 	/* at this point mapsize doens't include the table size */
    602 
    603 #ifdef __x86_64__
    604 	count = TABLE_L2_ENTRIES;
    605 #else
    606 	count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
    607 #endif /* __x86_64__ */
    608 
    609 	/* now compute how many L2 pages we need exactly */
    610 	XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
    611 	while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
    612 	    ((long)count << L2_SHIFT) + KERNBASE) {
    613 		count++;
    614 	}
    615 #ifndef __x86_64__
    616 	/*
    617 	 * one more L2 page: we'll alocate several pages after kva_start
    618 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
    619 	 * counted here. It's not a big issue to allocate one more L2 as
    620 	 * pmap_growkernel() will be called anyway.
    621 	 */
    622 	count++;
    623 	nkptp[1] = count;
    624 #endif
    625 
    626 	/*
    627 	 * install bootstrap pages. We may need more L2 pages than will
    628 	 * have the final table here, as it's installed after the final table
    629 	 */
    630 	oldcount = count;
    631 
    632 bootstrap_again:
    633 	XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
    634 	/*
    635 	 * Xen space we'll reclaim may not be enough for our new page tables,
    636 	 * move bootstrap tables if necessary
    637 	 */
    638 	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
    639 		bootstrap_tables = init_tables +
    640 					((count + l2_4_count) * PAGE_SIZE);
    641 	/* make sure we have enough to map the bootstrap_tables */
    642 	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
    643 	    ((long)oldcount << L2_SHIFT) + KERNBASE) {
    644 		oldcount++;
    645 		goto bootstrap_again;
    646 	}
    647 
    648 	/* Create temporary tables */
    649 	xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
    650 		xen_start_info.nr_pt_frames, oldcount, 0);
    651 
    652 	/* Create final tables */
    653 	xen_bootstrap_tables(bootstrap_tables, init_tables,
    654 	    oldcount + l2_4_count, count, 1);
    655 
    656 	/* zero out free space after tables */
    657 	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
    658 	    (UPAGES + 1) * NBPG);
    659 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
    660 }
    661 
    662 
    663 /*
    664  * Build a new table and switch to it
    665  * old_count is # of old tables (including PGD, PDTPE and PDE)
    666  * new_count is # of new tables (PTE only)
    667  * we assume areas don't overlap
    668  */
    669 
    670 
    671 static void
    672 xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
    673 	int old_count, int new_count, int final)
    674 {
    675 	pd_entry_t *pdtpe, *pde, *pte;
    676 	pd_entry_t *cur_pgd, *bt_pgd;
    677 	paddr_t addr;
    678 	vaddr_t page, avail, text_end, map_end;
    679 	int i;
    680 	extern char __data_start;
    681 
    682 	xpq_queue_lock();
    683 
    684 	__PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR ","
    685 	    " %d, %d)\n",
    686 	    old_pgd, new_pgd, old_count, new_count));
    687 	text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
    688 	/*
    689 	 * size of R/W area after kernel text:
    690 	 *  xencons_interface (if present)
    691 	 *  xenstore_interface (if present)
    692 	 *  table pages (new_count + l2_4_count entries)
    693 	 * extra mappings (only when final is true):
    694 	 *  UAREA
    695 	 *  dummy user PGD (x86_64 only)/gdt page (i386 only)
    696 	 *  HYPERVISOR_shared_info
    697 	 *  ISA I/O mem (if needed)
    698 	 */
    699 	map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
    700 	if (final) {
    701 		map_end += (UPAGES + 1) * NBPG;
    702 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
    703 		map_end += NBPG;
    704 	}
    705 	/*
    706 	 * we always set atdevbase, as it's used by init386 to find the first
    707 	 * available VA. map_end is updated only if we are dom0, so
    708 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
    709 	 * this case.
    710 	 */
    711 	if (final)
    712 		atdevbase = map_end;
    713 #ifdef DOM0OPS
    714 	if (final && xendomain_is_dom0()) {
    715 		/* ISA I/O mem */
    716 		map_end += IOM_SIZE;
    717 	}
    718 #endif /* DOM0OPS */
    719 
    720 	__PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
    721 	    text_end, map_end));
    722 	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
    723 	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
    724 
    725 	/*
    726 	 * Create bootstrap page tables
    727 	 * What we need:
    728 	 * - a PGD (level 4)
    729 	 * - a PDTPE (level 3)
    730 	 * - a PDE (level2)
    731 	 * - some PTEs (level 1)
    732 	 */
    733 
    734 	cur_pgd = (pd_entry_t *) old_pgd;
    735 	bt_pgd = (pd_entry_t *) new_pgd;
    736 	memset (bt_pgd, 0, PAGE_SIZE);
    737 	avail = new_pgd + PAGE_SIZE;
    738 #if PTP_LEVELS > 3
    739 	/* Install level 3 */
    740 	pdtpe = (pd_entry_t *) avail;
    741 	memset (pdtpe, 0, PAGE_SIZE);
    742 	avail += PAGE_SIZE;
    743 
    744 	addr = ((u_long) pdtpe) - KERNBASE;
    745 	bt_pgd[pl4_pi(KERNTEXTOFF)] =
    746 	    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    747 
    748 	__PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    749 	    " -> L4[%#x]\n",
    750 	    pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF)));
    751 #else
    752 	pdtpe = bt_pgd;
    753 #endif /* PTP_LEVELS > 3 */
    754 
    755 #if PTP_LEVELS > 2
    756 	/* Level 2 */
    757 	pde = (pd_entry_t *) avail;
    758 	memset(pde, 0, PAGE_SIZE);
    759 	avail += PAGE_SIZE;
    760 
    761 	addr = ((u_long) pde) - KERNBASE;
    762 	pdtpe[pl3_pi(KERNTEXTOFF)] =
    763 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
    764 	__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    765 	    " -> L3[%#x]\n",
    766 	    pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF)));
    767 #elif defined(PAE)
    768 	/* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
    769 	pde = (pd_entry_t *) avail;
    770 	memset(pde, 0, PAGE_SIZE * 5);
    771 	avail += PAGE_SIZE * 5;
    772 	addr = ((u_long) pde) - KERNBASE;
    773 	/*
    774 	 * enter L2 pages in the L3.
    775 	 * The real L2 kernel PD will be the last one (so that
    776 	 * pde[L2_SLOT_KERN] always point to the shadow).
    777 	 */
    778 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    779 		/*
    780 		 * Xen doesn't want R/W mappings in L3 entries, it'll add it
    781 		 * itself.
    782 		 */
    783 		pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    784 		__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    785 		    " -> L3[%#x]\n",
    786 		    (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i));
    787 	}
    788 	addr += PAGE_SIZE;
    789 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    790 	__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    791 	    " -> L3[%#x]\n",
    792 	    (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3));
    793 
    794 #else /* PAE */
    795 	pde = bt_pgd;
    796 #endif /* PTP_LEVELS > 2 */
    797 
    798 	/* Level 1 */
    799 	page = KERNTEXTOFF;
    800 	for (i = 0; i < new_count; i ++) {
    801 		vaddr_t cur_page = page;
    802 
    803 		pte = (pd_entry_t *) avail;
    804 		avail += PAGE_SIZE;
    805 
    806 		memset(pte, 0, PAGE_SIZE);
    807 		while (pl2_pi(page) == pl2_pi (cur_page)) {
    808 			if (page >= map_end) {
    809 				/* not mapped at all */
    810 				pte[pl1_pi(page)] = 0;
    811 				page += PAGE_SIZE;
    812 				continue;
    813 			}
    814 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
    815 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
    816 				pte[pl1_pi(page)] = xen_start_info.shared_info;
    817 				__PRINTK(("HYPERVISOR_shared_info "
    818 				    "va %#lx pte %#" PRIxPADDR "\n",
    819 				    HYPERVISOR_shared_info, pte[pl1_pi(page)]));
    820 			}
    821 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    822 			    == xen_start_info.console.domU.mfn) {
    823 				xencons_interface = (void *)page;
    824 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
    825 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    826 				__PRINTK(("xencons_interface "
    827 				    "va %#lx pte %#" PRIxPADDR "\n",
    828 				    xencons_interface, pte[pl1_pi(page)]));
    829 			}
    830 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    831 			    == xen_start_info.store_mfn) {
    832 				xenstore_interface = (void *)page;
    833 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
    834 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    835 				__PRINTK(("xenstore_interface "
    836 				    "va %#lx pte %#" PRIxPADDR "\n",
    837 				    xenstore_interface, pte[pl1_pi(page)]));
    838 			}
    839 #ifdef DOM0OPS
    840 			if (page >= (vaddr_t)atdevbase &&
    841 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
    842 				pte[pl1_pi(page)] =
    843 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
    844 			}
    845 #endif
    846 			pte[pl1_pi(page)] |= PG_k | PG_V;
    847 			if (page < text_end) {
    848 				/* map kernel text RO */
    849 				pte[pl1_pi(page)] |= 0;
    850 			} else if (page >= old_pgd
    851 			    && page < old_pgd + (old_count * PAGE_SIZE)) {
    852 				/* map old page tables RO */
    853 				pte[pl1_pi(page)] |= 0;
    854 			} else if (page >= new_pgd &&
    855 			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
    856 				/* map new page tables RO */
    857 				pte[pl1_pi(page)] |= 0;
    858 			} else {
    859 				/* map page RW */
    860 				pte[pl1_pi(page)] |= PG_RW;
    861 			}
    862 
    863 			if ((page  >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
    864 			    || page >= new_pgd) {
    865 				__PRINTK(("va %#lx pa %#lx "
    866 				    "entry 0x%" PRIxPADDR " -> L1[%#x]\n",
    867 				    page, page - KERNBASE,
    868 				    pte[pl1_pi(page)], pl1_pi(page)));
    869 			}
    870 			page += PAGE_SIZE;
    871 		}
    872 
    873 		addr = ((u_long) pte) - KERNBASE;
    874 		pde[pl2_pi(cur_page)] =
    875 		    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    876 		__PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    877 		    " -> L2[%#x]\n",
    878 		    pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
    879 		/* Mark readonly */
    880 		xen_bt_set_readonly((vaddr_t) pte);
    881 	}
    882 
    883 	/* Install recursive page tables mapping */
    884 #ifdef PAE
    885 	/*
    886 	 * we need a shadow page for the kernel's L2 page
    887 	 * The real L2 kernel PD will be the last one (so that
    888 	 * pde[L2_SLOT_KERN] always point to the shadow.
    889 	 */
    890 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
    891 	pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG];
    892 	pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE;
    893 
    894 	/*
    895 	 * We don't enter a recursive entry from the L3 PD. Instead,
    896 	 * we enter the first 4 L2 pages, which includes the kernel's L2
    897 	 * shadow. But we have to entrer the shadow after switching
    898 	 * %cr3, or Xen will refcount some PTE with the wrong type.
    899 	 */
    900 	addr = (u_long)pde - KERNBASE;
    901 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    902 		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    903 		__PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR
    904 		    " entry %#" PRIxPADDR "\n",
    905 		    (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i,
    906 		    addr, pde[PDIR_SLOT_PTE + i]));
    907 	}
    908 #if 0
    909 	addr += PAGE_SIZE; /* point to shadow L2 */
    910 	pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    911 	__PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
    912 	    (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
    913 	    (int64_t)pde[PDIR_SLOT_PTE + 3]));
    914 #endif
    915 	/* Mark tables RO, and pin the kernel's shadow as L2 */
    916 	addr = (u_long)pde - KERNBASE;
    917 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
    918 		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
    919 		if (i == 2 || i == 3)
    920 			continue;
    921 #if 0
    922 		__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
    923 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    924 #endif
    925 	}
    926 	if (final) {
    927 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
    928 		__PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr));
    929 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    930 	}
    931 #if 0
    932 	addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
    933 	__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
    934 	xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    935 #endif
    936 #else /* PAE */
    937 	/* recursive entry in higher-level PD */
    938 	bt_pgd[PDIR_SLOT_PTE] =
    939 	    xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
    940 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR
    941 	    " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE,
    942 	    bt_pgd[PDIR_SLOT_PTE]));
    943 	/* Mark tables RO */
    944 	xen_bt_set_readonly((vaddr_t) pde);
    945 #endif
    946 #if PTP_LEVELS > 2 || defined(PAE)
    947 	xen_bt_set_readonly((vaddr_t) pdtpe);
    948 #endif
    949 #if PTP_LEVELS > 3
    950 	xen_bt_set_readonly(new_pgd);
    951 #endif
    952 	/* Pin the PGD */
    953 	__PRINTK(("pin PGD: %"PRIxVADDR"\n", new_pgd - KERNBASE));
    954 #ifdef __x86_64__
    955 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    956 #elif PAE
    957 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    958 #else
    959 	xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    960 #endif
    961 	/* Save phys. addr of PDP, for libkvm. */
    962 #ifdef PAE
    963 	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
    964 #else
    965 	PDPpaddr = (u_long)new_pgd - KERNBASE;
    966 #endif
    967 
    968 	/* Switch to new tables */
    969 	__PRINTK(("switch to PGD\n"));
    970 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
    971 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n",
    972 	    bt_pgd[PDIR_SLOT_PTE]));
    973 
    974 #ifdef PAE
    975 	if (final) {
    976 		/* save the address of the L3 page */
    977 		cpu_info_primary.ci_pae_l3_pdir = pdtpe;
    978 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
    979 
    980 		/* now enter kernel's PTE mappings */
    981 		addr =  (u_long)pde - KERNBASE + PAGE_SIZE * 3;
    982 		xpq_queue_pte_update(
    983 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
    984 		    xpmap_ptom_masked(addr) | PG_k | PG_V);
    985 		xpq_flush_queue();
    986 	}
    987 #endif
    988 
    989 	/* Now we can safely reclaim space taken by old tables */
    990 
    991 	__PRINTK(("unpin old PGD\n"));
    992 	/* Unpin old PGD */
    993 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
    994 	/* Mark old tables RW */
    995 	page = old_pgd;
    996 	addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
    997 	addr = xpmap_mtop(addr);
    998 	pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
    999 	pte += pl1_pi(page);
   1000 	__PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n",
   1001 	    pde[pl2_pi(page)], addr, (long)pte));
   1002 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
   1003 		addr = xpmap_ptom(((u_long) pte) - KERNBASE);
   1004 		XENPRINTK(("addr %#" PRIxPADDR " pte %#lx "
   1005 		   "*pte %#" PRIxPADDR "\n",
   1006 		   addr, (long)pte, *pte));
   1007 		xpq_queue_pte_update(addr, *pte | PG_RW);
   1008 		page += PAGE_SIZE;
   1009 		/*
   1010 		 * Our ptes are contiguous
   1011 		 * so it's safe to just "++" here
   1012 		 */
   1013 		pte++;
   1014 	}
   1015 	xpq_flush_queue();
   1016 	xpq_queue_unlock();
   1017 }
   1018 
   1019 
   1020 /*
   1021  * Bootstrap helper functions
   1022  */
   1023 
   1024 /*
   1025  * Mark a page readonly
   1026  * XXX: assuming vaddr = paddr + KERNBASE
   1027  */
   1028 
   1029 static void
   1030 xen_bt_set_readonly (vaddr_t page)
   1031 {
   1032 	pt_entry_t entry;
   1033 
   1034 	entry = xpmap_ptom_masked(page - KERNBASE);
   1035 	entry |= PG_k | PG_V;
   1036 
   1037 	HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
   1038 }
   1039 
   1040 #ifdef __x86_64__
   1041 void
   1042 xen_set_user_pgd(paddr_t page)
   1043 {
   1044 	struct mmuext_op op;
   1045 	int s = splvm();
   1046 
   1047 	KASSERT(simple_lock_held(&xpq_lock));
   1048 	xpq_flush_queue();
   1049 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
   1050 	op.arg1.mfn = xpmap_phys_to_machine_mapping[page >> PAGE_SHIFT];
   1051         if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
   1052 		panic("xen_set_user_pgd: failed to install new user page"
   1053 			" directory %#" PRIxPADDR, page);
   1054 	splx(s);
   1055 }
   1056 #endif /* __x86_64__ */
   1057