Home | History | Annotate | Line # | Download | only in x86
x86_xpmap.c revision 1.12.4.8
      1 /*	$NetBSD: x86_xpmap.c,v 1.12.4.8 2010/10/24 22:48:22 jym Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
      5  *
      6  * Permission to use, copy, modify, and distribute this software for any
      7  * purpose with or without fee is hereby granted, provided that the above
      8  * copyright notice and this permission notice appear in all copies.
      9  *
     10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     17  */
     18 
     19 /*
     20  * Copyright (c) 2006, 2007 Manuel Bouyer.
     21  *
     22  * Redistribution and use in source and binary forms, with or without
     23  * modification, are permitted provided that the following conditions
     24  * are met:
     25  * 1. Redistributions of source code must retain the above copyright
     26  *    notice, this list of conditions and the following disclaimer.
     27  * 2. Redistributions in binary form must reproduce the above copyright
     28  *    notice, this list of conditions and the following disclaimer in the
     29  *    documentation and/or other materials provided with the distribution.
     30  *
     31  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     32  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     33  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     34  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     40  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     41  *
     42  */
     43 
     44 /*
     45  *
     46  * Copyright (c) 2004 Christian Limpach.
     47  * All rights reserved.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  *
     58  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     59  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     60  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     61  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     62  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     63  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     64  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     65  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     66  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     67  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     68  */
     69 
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.12.4.8 2010/10/24 22:48:22 jym Exp $");
     73 
     74 #include "opt_xen.h"
     75 #include "opt_ddb.h"
     76 #include "ksyms.h"
     77 
     78 #include <sys/param.h>
     79 #include <sys/systm.h>
     80 
     81 #include <uvm/uvm.h>
     82 
     83 #include <machine/pmap.h>
     84 #include <machine/gdt.h>
     85 #include <xen/xenfunc.h>
     86 
     87 #include <dev/isa/isareg.h>
     88 #include <machine/isa_machdep.h>
     89 
     90 #undef	XENDEBUG
     91 /* #define XENDEBUG_SYNC */
     92 /* #define	XENDEBUG_LOW */
     93 
     94 #ifdef XENDEBUG
     95 #define	XENPRINTF(x) printf x
     96 #define	XENPRINTK(x) printk x
     97 #define	XENPRINTK2(x) /* printk x */
     98 
     99 static char XBUF[256];
    100 #else
    101 #define	XENPRINTF(x)
    102 #define	XENPRINTK(x)
    103 #define	XENPRINTK2(x)
    104 #endif
    105 #define	PRINTF(x) printf x
    106 #define	PRINTK(x) printk x
    107 
    108 /* on x86_64 kernel runs in ring 3 */
    109 #ifdef __x86_64__
    110 #define PG_k PG_u
    111 #else
    112 #define PG_k 0
    113 #endif
    114 
    115 volatile shared_info_t *HYPERVISOR_shared_info;
    116 /* Xen requires the start_info struct to be page aligned */
    117 union start_info_union start_info_union __aligned(PAGE_SIZE);
    118 unsigned long *xpmap_phys_to_machine_mapping;
    119 
    120 void xen_failsafe_handler(void);
    121 
    122 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
    123 	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
    124 
    125 void
    126 xen_failsafe_handler(void)
    127 {
    128 
    129 	panic("xen_failsafe_handler called!\n");
    130 }
    131 
    132 
    133 void
    134 xen_set_ldt(vaddr_t base, uint32_t entries)
    135 {
    136 	vaddr_t va;
    137 	vaddr_t end;
    138 	pt_entry_t *ptp;
    139 	int s;
    140 
    141 #ifdef __x86_64__
    142 	end = base + (entries << 3);
    143 #else
    144 	end = base + entries * sizeof(union descriptor);
    145 #endif
    146 
    147 	for (va = base; va < end; va += PAGE_SIZE) {
    148 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    149 		ptp = kvtopte(va);
    150 		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
    151 		    base, entries, ptp));
    152 		pmap_pte_clearbits(ptp, PG_RW);
    153 	}
    154 	s = splvm();
    155 	xpq_queue_set_ldt(base, entries);
    156 	splx(s);
    157 }
    158 
    159 #ifdef XENDEBUG
    160 void xpq_debug_dump(void);
    161 #endif
    162 
    163 #define XPQUEUE_SIZE 2048
    164 static mmu_update_t xpq_queue[XPQUEUE_SIZE];
    165 static int xpq_idx = 0;
    166 
    167 void
    168 xpq_flush_queue(void)
    169 {
    170 	int i, ok;
    171 
    172 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
    173 	for (i = 0; i < xpq_idx; i++)
    174 		XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
    175 		    xpq_queue[i].ptr, xpq_queue[i].val));
    176 	if (xpq_idx != 0 &&
    177 	    HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok) < 0) {
    178 		printf("xpq_flush_queue: %d entries \n", xpq_idx);
    179 		for (i = 0; i < xpq_idx; i++)
    180 			printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
    181 			   xpq_queue[i].ptr, xpq_queue[i].val);
    182 		panic("HYPERVISOR_mmu_update failed\n");
    183 	}
    184 	xpq_idx = 0;
    185 }
    186 
    187 static inline void
    188 xpq_increment_idx(void)
    189 {
    190 
    191 	xpq_idx++;
    192 	if (__predict_false(xpq_idx == XPQUEUE_SIZE))
    193 		xpq_flush_queue();
    194 }
    195 
    196 void
    197 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
    198 {
    199 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
    200 	    "\n", (int64_t)ma, (int64_t)pa));
    201 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
    202 	xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
    203 	xpq_increment_idx();
    204 #ifdef XENDEBUG_SYNC
    205 	xpq_flush_queue();
    206 #endif
    207 }
    208 
    209 void
    210 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
    211 {
    212 
    213 	KASSERT((ptr & 3) == 0);
    214 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
    215 	xpq_queue[xpq_idx].val = val;
    216 	xpq_increment_idx();
    217 #ifdef XENDEBUG_SYNC
    218 	xpq_flush_queue();
    219 #endif
    220 }
    221 
    222 void
    223 xpq_queue_pt_switch(paddr_t pa)
    224 {
    225 	struct mmuext_op op;
    226 	xpq_flush_queue();
    227 
    228 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    229 	    (int64_t)pa, (int64_t)pa));
    230 	op.cmd = MMUEXT_NEW_BASEPTR;
    231 	op.arg1.mfn = pa >> PAGE_SHIFT;
    232 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    233 		panic("xpq_queue_pt_switch");
    234 }
    235 
    236 void
    237 xpq_queue_pin_table(paddr_t pa, unsigned int level)
    238 {
    239 	struct mmuext_op op;
    240 	xpq_flush_queue();
    241 
    242 	XENPRINTK2(("xpq_queue_pin_table: level %u %#"PRIx64"\n",
    243 	    level, (int64_t)pa));
    244 
    245 	op.arg1.mfn = pa >> PAGE_SHIFT;
    246 	op.cmd = level;
    247 
    248 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    249 		panic("xpq_queue_pin_table: level %u %#"PRIx64"\n",
    250 		    level, (int64_t)pa);
    251 }
    252 
    253 void
    254 xpq_queue_unpin_table(paddr_t pa)
    255 {
    256 	struct mmuext_op op;
    257 	xpq_flush_queue();
    258 
    259 	XENPRINTK2(("xpq_queue_unpin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    260 	    (int64_t)pa, (int64_t)pa));
    261 	op.arg1.mfn = pa >> PAGE_SHIFT;
    262 	op.cmd = MMUEXT_UNPIN_TABLE;
    263 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    264 		panic("xpq_queue_unpin_table");
    265 }
    266 
    267 void
    268 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    269 {
    270 	struct mmuext_op op;
    271 	xpq_flush_queue();
    272 
    273 	XENPRINTK2(("xpq_queue_set_ldt\n"));
    274 	KASSERT(va == (va & ~PAGE_MASK));
    275 	op.cmd = MMUEXT_SET_LDT;
    276 	op.arg1.linear_addr = va;
    277 	op.arg2.nr_ents = entries;
    278 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    279 		panic("xpq_queue_set_ldt");
    280 }
    281 
    282 void
    283 xpq_queue_tlb_flush(void)
    284 {
    285 	struct mmuext_op op;
    286 	xpq_flush_queue();
    287 
    288 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
    289 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
    290 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    291 		panic("xpq_queue_tlb_flush");
    292 }
    293 
    294 void
    295 xpq_flush_cache(void)
    296 {
    297 	struct mmuext_op op;
    298 	int s = splvm();
    299 	xpq_flush_queue();
    300 
    301 	XENPRINTK2(("xpq_queue_flush_cache\n"));
    302 	op.cmd = MMUEXT_FLUSH_CACHE;
    303 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    304 		panic("xpq_flush_cache");
    305 	splx(s);
    306 }
    307 
    308 void
    309 xpq_queue_invlpg(vaddr_t va)
    310 {
    311 	struct mmuext_op op;
    312 	xpq_flush_queue();
    313 
    314 	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
    315 	op.cmd = MMUEXT_INVLPG_LOCAL;
    316 	op.arg1.linear_addr = (va & ~PAGE_MASK);
    317 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    318 		panic("xpq_queue_invlpg");
    319 }
    320 
    321 int
    322 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
    323 {
    324 	mmu_update_t op;
    325 	int ok;
    326 	xpq_flush_queue();
    327 
    328 	op.ptr = ptr;
    329 	op.val = val;
    330 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
    331 		return EFAULT;
    332 	return (0);
    333 }
    334 
    335 #ifdef XENDEBUG
    336 void
    337 xpq_debug_dump(void)
    338 {
    339 	int i;
    340 
    341 	XENPRINTK2(("idx: %d\n", xpq_idx));
    342 	for (i = 0; i < xpq_idx; i++) {
    343 		snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
    344 		    xpq_queue[i].ptr, xpq_queue[i].val);
    345 		if (++i < xpq_idx)
    346 			snprintf(XBUF + strlen(XBUF),
    347 			    sizeof(XBUF) - strlen(XBUF),
    348 			    "%" PRIx64 " %08" PRIx64,
    349 			    xpq_queue[i].ptr, xpq_queue[i].val);
    350 		if (++i < xpq_idx)
    351 			snprintf(XBUF + strlen(XBUF),
    352 			    sizeof(XBUF) - strlen(XBUF),
    353 			    "%" PRIx64 " %08" PRIx64,
    354 			    xpq_queue[i].ptr, xpq_queue[i].val);
    355 		if (++i < xpq_idx)
    356 			snprintf(XBUF + strlen(XBUF),
    357 			    sizeof(XBUF) - strlen(XBUF),
    358 			    "%" PRIx64 " %08" PRIx64,
    359 			    xpq_queue[i].ptr, xpq_queue[i].val);
    360 		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
    361 	}
    362 }
    363 #endif
    364 
    365 
    366 extern volatile struct xencons_interface *xencons_interface; /* XXX */
    367 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
    368 
    369 static void xen_bt_set_readonly (vaddr_t);
    370 static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
    371 
    372 /* How many PDEs ? */
    373 #if L2_SLOT_KERNBASE > 0
    374 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
    375 #else
    376 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
    377 #endif
    378 
    379 /*
    380  * Construct and switch to new pagetables
    381  * first_avail is the first vaddr we can use after
    382  * we get rid of Xen pagetables
    383  */
    384 
    385 vaddr_t xen_pmap_bootstrap (void);
    386 
    387 /*
    388  * Function to get rid of Xen bootstrap tables
    389  */
    390 
    391 /* How many PDP do we need: */
    392 #ifdef PAE
    393 /*
    394  * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
    395  * all of them mapped by the L3 page. We also need a shadow page
    396  * for L3[3].
    397  */
    398 static const int l2_4_count = 6;
    399 #else
    400 static const int l2_4_count = PTP_LEVELS - 1;
    401 #endif
    402 
    403 vaddr_t
    404 xen_pmap_bootstrap(void)
    405 {
    406 	int count, oldcount;
    407 	long mapsize;
    408 	vaddr_t bootstrap_tables, init_tables;
    409 
    410 	xpmap_phys_to_machine_mapping =
    411 	    (unsigned long *)xen_start_info.mfn_list;
    412 	init_tables = xen_start_info.pt_base;
    413 	__PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
    414 
    415 	/* Space after Xen boostrap tables should be free */
    416 	bootstrap_tables = xen_start_info.pt_base +
    417 		(xen_start_info.nr_pt_frames * PAGE_SIZE);
    418 
    419 	/*
    420 	 * Calculate how many space we need
    421 	 * first everything mapped before the Xen bootstrap tables
    422 	 */
    423 	mapsize = init_tables - KERNTEXTOFF;
    424 	/* after the tables we'll have:
    425 	 *  - UAREA
    426 	 *  - dummy user PGD (x86_64)
    427 	 *  - HYPERVISOR_shared_info
    428 	 *  - ISA I/O mem (if needed)
    429 	 */
    430 	mapsize += UPAGES * NBPG;
    431 #ifdef __x86_64__
    432 	mapsize += NBPG;
    433 #endif
    434 	mapsize += NBPG;
    435 
    436 #ifdef DOM0OPS
    437 	if (xendomain_is_dom0()) {
    438 		/* space for ISA I/O mem */
    439 		mapsize += IOM_SIZE;
    440 	}
    441 #endif
    442 	/* at this point mapsize doens't include the table size */
    443 
    444 #ifdef __x86_64__
    445 	count = TABLE_L2_ENTRIES;
    446 #else
    447 	count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
    448 #endif /* __x86_64__ */
    449 
    450 	/* now compute how many L2 pages we need exactly */
    451 	XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
    452 	while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
    453 	    ((long)count << L2_SHIFT) + KERNBASE) {
    454 		count++;
    455 	}
    456 #ifndef __x86_64__
    457 	/*
    458 	 * one more L2 page: we'll alocate several pages after kva_start
    459 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
    460 	 * counted here. It's not a big issue to allocate one more L2 as
    461 	 * pmap_growkernel() will be called anyway.
    462 	 */
    463 	count++;
    464 	nkptp[1] = count;
    465 #endif
    466 
    467 	/*
    468 	 * install bootstrap pages. We may need more L2 pages than will
    469 	 * have the final table here, as it's installed after the final table
    470 	 */
    471 	oldcount = count;
    472 
    473 bootstrap_again:
    474 	XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
    475 	/*
    476 	 * Xen space we'll reclaim may not be enough for our new page tables,
    477 	 * move bootstrap tables if necessary
    478 	 */
    479 	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
    480 		bootstrap_tables = init_tables +
    481 					((count + l2_4_count) * PAGE_SIZE);
    482 	/* make sure we have enough to map the bootstrap_tables */
    483 	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
    484 	    ((long)oldcount << L2_SHIFT) + KERNBASE) {
    485 		oldcount++;
    486 		goto bootstrap_again;
    487 	}
    488 
    489 	/* Create temporary tables */
    490 	xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
    491 		xen_start_info.nr_pt_frames, oldcount, 0);
    492 
    493 	/* Create final tables */
    494 	xen_bootstrap_tables(bootstrap_tables, init_tables,
    495 	    oldcount + l2_4_count, count, 1);
    496 
    497 	/* zero out free space after tables */
    498 	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
    499 	    (UPAGES + 1) * NBPG);
    500 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
    501 }
    502 
    503 
    504 /*
    505  * Build a new table and switch to it
    506  * old_count is # of old tables (including PGD, PDTPE and PDE)
    507  * new_count is # of new tables (PTE only)
    508  * we assume areas don't overlap
    509  */
    510 
    511 
    512 static void
    513 xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
    514 	int old_count, int new_count, int final)
    515 {
    516 	pd_entry_t *pdtpe, *pde, *pte;
    517 	pd_entry_t *cur_pgd, *bt_pgd;
    518 	paddr_t addr;
    519 	vaddr_t page, avail, text_end, map_end;
    520 	int i;
    521 	extern char __data_start;
    522 
    523 	__PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR ","
    524 	    " %d, %d)\n",
    525 	    old_pgd, new_pgd, old_count, new_count));
    526 	text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
    527 	/*
    528 	 * size of R/W area after kernel text:
    529 	 *  xencons_interface (if present)
    530 	 *  xenstore_interface (if present)
    531 	 *  table pages (new_count + l2_4_count entries)
    532 	 * extra mappings (only when final is true):
    533 	 *  UAREA
    534 	 *  dummy user PGD (x86_64 only)/gdt page (i386 only)
    535 	 *  HYPERVISOR_shared_info
    536 	 *  ISA I/O mem (if needed)
    537 	 */
    538 	map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
    539 	if (final) {
    540 		map_end += (UPAGES + 1) * NBPG;
    541 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
    542 		map_end += NBPG;
    543 	}
    544 	/*
    545 	 * we always set atdevbase, as it's used by init386 to find the first
    546 	 * available VA. map_end is updated only if we are dom0, so
    547 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
    548 	 * this case.
    549 	 */
    550 	if (final)
    551 		atdevbase = map_end;
    552 #ifdef DOM0OPS
    553 	if (final && xendomain_is_dom0()) {
    554 		/* ISA I/O mem */
    555 		map_end += IOM_SIZE;
    556 	}
    557 #endif /* DOM0OPS */
    558 
    559 	__PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
    560 	    text_end, map_end));
    561 	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
    562 	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
    563 
    564 	/*
    565 	 * Create bootstrap page tables
    566 	 * What we need:
    567 	 * - a PGD (level 4)
    568 	 * - a PDTPE (level 3)
    569 	 * - a PDE (level2)
    570 	 * - some PTEs (level 1)
    571 	 */
    572 
    573 	cur_pgd = (pd_entry_t *) old_pgd;
    574 	bt_pgd = (pd_entry_t *) new_pgd;
    575 	memset (bt_pgd, 0, PAGE_SIZE);
    576 	avail = new_pgd + PAGE_SIZE;
    577 #if PTP_LEVELS > 3
    578 	/* Install level 3 */
    579 	pdtpe = (pd_entry_t *) avail;
    580 	memset (pdtpe, 0, PAGE_SIZE);
    581 	avail += PAGE_SIZE;
    582 
    583 	addr = ((u_long) pdtpe) - KERNBASE;
    584 	bt_pgd[pl4_pi(KERNTEXTOFF)] =
    585 	    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    586 
    587 	__PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    588 	    " -> L4[%#x]\n",
    589 	    pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF)));
    590 #else
    591 	pdtpe = bt_pgd;
    592 #endif /* PTP_LEVELS > 3 */
    593 
    594 #if PTP_LEVELS > 2
    595 	/* Level 2 */
    596 	pde = (pd_entry_t *) avail;
    597 	memset(pde, 0, PAGE_SIZE);
    598 	avail += PAGE_SIZE;
    599 
    600 	addr = ((u_long) pde) - KERNBASE;
    601 	pdtpe[pl3_pi(KERNTEXTOFF)] =
    602 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
    603 	__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    604 	    " -> L3[%#x]\n",
    605 	    pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF)));
    606 #elif defined(PAE)
    607 	/* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
    608 	pde = (pd_entry_t *) avail;
    609 	memset(pde, 0, PAGE_SIZE * 5);
    610 	avail += PAGE_SIZE * 5;
    611 	addr = ((u_long) pde) - KERNBASE;
    612 	/*
    613 	 * enter L2 pages in the L3.
    614 	 * The real L2 kernel PD will be the last one (so that
    615 	 * pde[L2_SLOT_KERN] always point to the shadow).
    616 	 */
    617 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    618 		/*
    619 		 * Xen doesn't want R/W mappings in L3 entries, it'll add it
    620 		 * itself.
    621 		 */
    622 		pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    623 		__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    624 		    " -> L3[%#x]\n",
    625 		    (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i));
    626 	}
    627 	addr += PAGE_SIZE;
    628 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    629 	__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    630 	    " -> L3[%#x]\n",
    631 	    (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3));
    632 
    633 #else /* PAE */
    634 	pde = bt_pgd;
    635 #endif /* PTP_LEVELS > 2 */
    636 
    637 	/* Level 1 */
    638 	page = KERNTEXTOFF;
    639 	for (i = 0; i < new_count; i ++) {
    640 		vaddr_t cur_page = page;
    641 
    642 		pte = (pd_entry_t *) avail;
    643 		avail += PAGE_SIZE;
    644 
    645 		memset(pte, 0, PAGE_SIZE);
    646 		while (pl2_pi(page) == pl2_pi (cur_page)) {
    647 			if (page >= map_end) {
    648 				/* not mapped at all */
    649 				pte[pl1_pi(page)] = 0;
    650 				page += PAGE_SIZE;
    651 				continue;
    652 			}
    653 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
    654 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
    655 				pte[pl1_pi(page)] = xen_start_info.shared_info;
    656 				__PRINTK(("HYPERVISOR_shared_info "
    657 				    "va %#lx pte %#" PRIxPADDR "\n",
    658 				    HYPERVISOR_shared_info, pte[pl1_pi(page)]));
    659 			}
    660 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    661 			    == xen_start_info.console.domU.mfn) {
    662 				xencons_interface = (void *)page;
    663 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
    664 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    665 				__PRINTK(("xencons_interface "
    666 				    "va %#lx pte %#" PRIxPADDR "\n",
    667 				    xencons_interface, pte[pl1_pi(page)]));
    668 			}
    669 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    670 			    == xen_start_info.store_mfn) {
    671 				xenstore_interface = (void *)page;
    672 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
    673 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    674 				__PRINTK(("xenstore_interface "
    675 				    "va %#lx pte %#" PRIxPADDR "\n",
    676 				    xenstore_interface, pte[pl1_pi(page)]));
    677 			}
    678 #ifdef DOM0OPS
    679 			if (page >= (vaddr_t)atdevbase &&
    680 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
    681 				pte[pl1_pi(page)] =
    682 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
    683 			}
    684 #endif
    685 			pte[pl1_pi(page)] |= PG_k | PG_V;
    686 			if (page < text_end) {
    687 				/* map kernel text RO */
    688 				pte[pl1_pi(page)] |= 0;
    689 			} else if (page >= old_pgd
    690 			    && page < old_pgd + (old_count * PAGE_SIZE)) {
    691 				/* map old page tables RO */
    692 				pte[pl1_pi(page)] |= 0;
    693 			} else if (page >= new_pgd &&
    694 			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
    695 				/* map new page tables RO */
    696 				pte[pl1_pi(page)] |= 0;
    697 			} else {
    698 				/* map page RW */
    699 				pte[pl1_pi(page)] |= PG_RW;
    700 			}
    701 
    702 			if ((page  >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
    703 			    || page >= new_pgd) {
    704 				__PRINTK(("va %#lx pa %#lx "
    705 				    "entry 0x%" PRIxPADDR " -> L1[%#x]\n",
    706 				    page, page - KERNBASE,
    707 				    pte[pl1_pi(page)], pl1_pi(page)));
    708 			}
    709 			page += PAGE_SIZE;
    710 		}
    711 
    712 		addr = ((u_long) pte) - KERNBASE;
    713 		pde[pl2_pi(cur_page)] =
    714 		    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    715 		__PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    716 		    " -> L2[%#x]\n",
    717 		    pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
    718 		/* Mark readonly */
    719 		xen_bt_set_readonly((vaddr_t) pte);
    720 	}
    721 
    722 	/* Install recursive page tables mapping */
    723 #ifdef PAE
    724 	/*
    725 	 * we need a shadow page for the kernel's L2 page
    726 	 * The real L2 kernel PD will be the last one (so that
    727 	 * pde[L2_SLOT_KERN] always point to the shadow.
    728 	 */
    729 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
    730 	pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG];
    731 	pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE;
    732 
    733 	/*
    734 	 * We don't enter a recursive entry from the L3 PD. Instead,
    735 	 * we enter the first 4 L2 pages, which includes the kernel's L2
    736 	 * shadow. But we have to entrer the shadow after switching
    737 	 * %cr3, or Xen will refcount some PTE with the wrong type.
    738 	 */
    739 	addr = (u_long)pde - KERNBASE;
    740 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    741 		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    742 		__PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR
    743 		    " entry %#" PRIxPADDR "\n",
    744 		    (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i,
    745 		    addr, pde[PDIR_SLOT_PTE + i]));
    746 	}
    747 #if 0
    748 	addr += PAGE_SIZE; /* point to shadow L2 */
    749 	pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    750 	__PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
    751 	    (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
    752 	    (int64_t)pde[PDIR_SLOT_PTE + 3]));
    753 #endif
    754 	/* Mark tables RO, and pin the kernel's shadow as L2 */
    755 	addr = (u_long)pde - KERNBASE;
    756 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
    757 		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
    758 		if (i == 2 || i == 3)
    759 			continue;
    760 #if 0
    761 		__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
    762 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    763 #endif
    764 	}
    765 	if (final) {
    766 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
    767 		__PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr));
    768 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    769 	}
    770 #if 0
    771 	addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
    772 	__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
    773 	xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    774 #endif
    775 #else /* PAE */
    776 	/* recursive entry in higher-level PD */
    777 	bt_pgd[PDIR_SLOT_PTE] =
    778 	    xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
    779 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR
    780 	    " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE,
    781 	    bt_pgd[PDIR_SLOT_PTE]));
    782 	/* Mark tables RO */
    783 	xen_bt_set_readonly((vaddr_t) pde);
    784 #endif
    785 #if PTP_LEVELS > 2 || defined(PAE)
    786 	xen_bt_set_readonly((vaddr_t) pdtpe);
    787 #endif
    788 #if PTP_LEVELS > 3
    789 	xen_bt_set_readonly(new_pgd);
    790 #endif
    791 	/* Pin the PGD */
    792 	__PRINTK(("pin PGD\n"));
    793 #ifdef PAE
    794 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    795 #elif defined(__x86_64__)
    796 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    797 #else
    798 	xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    799 #endif
    800 
    801 	/* Save phys. addr of PDP, for libkvm. */
    802 #ifdef PAE
    803 	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
    804 #else
    805 	PDPpaddr = (u_long)new_pgd - KERNBASE;
    806 #endif
    807 
    808 	/* Switch to new tables */
    809 	__PRINTK(("switch to PGD\n"));
    810 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
    811 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n",
    812 	    bt_pgd[PDIR_SLOT_PTE]));
    813 
    814 #ifdef PAE
    815 	if (final) {
    816 		/* save the address of the L3 page */
    817 		cpu_info_primary.ci_pae_l3_pdir = pdtpe;
    818 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
    819 
    820 		/* now enter kernel's PTE mappings */
    821 		addr =  (u_long)pde - KERNBASE + PAGE_SIZE * 3;
    822 		xpq_queue_pte_update(
    823 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
    824 		    xpmap_ptom_masked(addr) | PG_k | PG_V);
    825 		xpq_flush_queue();
    826 	}
    827 #endif
    828 
    829 	/* Now we can safely reclaim space taken by old tables */
    830 
    831 	__PRINTK(("unpin old PGD\n"));
    832 	/* Unpin old PGD */
    833 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
    834 	/* Mark old tables RW */
    835 	page = old_pgd;
    836 	addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
    837 	addr = xpmap_mtop(addr);
    838 	pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
    839 	pte += pl1_pi(page);
    840 	__PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n",
    841 	    pde[pl2_pi(page)], addr, (long)pte));
    842 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
    843 		addr = xpmap_ptom(((u_long) pte) - KERNBASE);
    844 		XENPRINTK(("addr %#" PRIxPADDR " pte %#lx "
    845 		   "*pte %#" PRIxPADDR "\n",
    846 		   addr, (long)pte, *pte));
    847 		xpq_queue_pte_update(addr, *pte | PG_RW);
    848 		page += PAGE_SIZE;
    849 		/*
    850 		 * Our ptes are contiguous
    851 		 * so it's safe to just "++" here
    852 		 */
    853 		pte++;
    854 	}
    855 	xpq_flush_queue();
    856 }
    857 
    858 
    859 /*
    860  * Bootstrap helper functions
    861  */
    862 
    863 /*
    864  * Mark a page readonly
    865  * XXX: assuming vaddr = paddr + KERNBASE
    866  */
    867 
    868 static void
    869 xen_bt_set_readonly (vaddr_t page)
    870 {
    871 	pt_entry_t entry;
    872 
    873 	entry = xpmap_ptom_masked(page - KERNBASE);
    874 	entry |= PG_k | PG_V;
    875 
    876 	HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
    877 }
    878 
    879 #ifdef __x86_64__
    880 void
    881 xen_set_user_pgd(paddr_t page)
    882 {
    883 	struct mmuext_op op;
    884 	int s = splvm();
    885 
    886 	xpq_flush_queue();
    887 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
    888 	op.arg1.mfn = pfn_to_mfn(page >> PAGE_SHIFT);
    889         if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    890 		panic("xen_set_user_pgd: failed to install new user page"
    891 			" directory %#" PRIxPADDR, page);
    892 	splx(s);
    893 }
    894 #endif /* __x86_64__ */
    895