Home | History | Annotate | Line # | Download | only in x86
x86_xpmap.c revision 1.16
      1 /*	$NetBSD: x86_xpmap.c,v 1.16 2009/10/19 18:41:11 bouyer Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
      5  *
      6  * Permission to use, copy, modify, and distribute this software for any
      7  * purpose with or without fee is hereby granted, provided that the above
      8  * copyright notice and this permission notice appear in all copies.
      9  *
     10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     17  */
     18 
     19 /*
     20  * Copyright (c) 2006, 2007 Manuel Bouyer.
     21  *
     22  * Redistribution and use in source and binary forms, with or without
     23  * modification, are permitted provided that the following conditions
     24  * are met:
     25  * 1. Redistributions of source code must retain the above copyright
     26  *    notice, this list of conditions and the following disclaimer.
     27  * 2. Redistributions in binary form must reproduce the above copyright
     28  *    notice, this list of conditions and the following disclaimer in the
     29  *    documentation and/or other materials provided with the distribution.
     30  *
     31  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     32  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     33  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     34  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     40  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     41  *
     42  */
     43 
     44 /*
     45  *
     46  * Copyright (c) 2004 Christian Limpach.
     47  * All rights reserved.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. All advertising materials mentioning features or use of this software
     58  *    must display the following acknowledgement:
     59  *      This product includes software developed by Christian Limpach.
     60  * 4. The name of the author may not be used to endorse or promote products
     61  *    derived from this software without specific prior written permission.
     62  *
     63  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     64  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     65  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     66  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     67  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     68  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     69  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     70  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     71  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     72  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     73  */
     74 
     75 
     76 #include <sys/cdefs.h>
     77 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.16 2009/10/19 18:41:11 bouyer Exp $");
     78 
     79 #include "opt_xen.h"
     80 #include "opt_ddb.h"
     81 #include "ksyms.h"
     82 
     83 #include <sys/param.h>
     84 #include <sys/systm.h>
     85 
     86 #include <uvm/uvm.h>
     87 
     88 #include <machine/pmap.h>
     89 #include <machine/gdt.h>
     90 #include <xen/xenfunc.h>
     91 
     92 #include <dev/isa/isareg.h>
     93 #include <machine/isa_machdep.h>
     94 
     95 #undef	XENDEBUG
     96 /* #define XENDEBUG_SYNC */
     97 /* #define	XENDEBUG_LOW */
     98 
     99 #ifdef XENDEBUG
    100 #define	XENPRINTF(x) printf x
    101 #define	XENPRINTK(x) printk x
    102 #define	XENPRINTK2(x) /* printk x */
    103 
    104 static char XBUF[256];
    105 #else
    106 #define	XENPRINTF(x)
    107 #define	XENPRINTK(x)
    108 #define	XENPRINTK2(x)
    109 #endif
    110 #define	PRINTF(x) printf x
    111 #define	PRINTK(x) printk x
    112 
    113 /* on x86_64 kernel runs in ring 3 */
    114 #ifdef __x86_64__
    115 #define PG_k PG_u
    116 #else
    117 #define PG_k 0
    118 #endif
    119 
    120 volatile shared_info_t *HYPERVISOR_shared_info;
    121 /* Xen requires the start_info struct to be page aligned */
    122 union start_info_union start_info_union __aligned(PAGE_SIZE);
    123 unsigned long *xpmap_phys_to_machine_mapping;
    124 
    125 void xen_failsafe_handler(void);
    126 
    127 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
    128 	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
    129 
    130 void
    131 xen_failsafe_handler(void)
    132 {
    133 
    134 	panic("xen_failsafe_handler called!\n");
    135 }
    136 
    137 
    138 void
    139 xen_set_ldt(vaddr_t base, uint32_t entries)
    140 {
    141 	vaddr_t va;
    142 	vaddr_t end;
    143 	pt_entry_t *ptp;
    144 	int s;
    145 
    146 #ifdef __x86_64__
    147 	end = base + (entries << 3);
    148 #else
    149 	end = base + entries * sizeof(union descriptor);
    150 #endif
    151 
    152 	for (va = base; va < end; va += PAGE_SIZE) {
    153 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    154 		ptp = kvtopte(va);
    155 		XENPRINTF(("xen_set_ldt %p %d %p\n", (void *)base,
    156 			      entries, ptp));
    157 		pmap_pte_clearbits(ptp, PG_RW);
    158 	}
    159 	s = splvm();
    160 	xpq_queue_set_ldt(base, entries);
    161 	xpq_flush_queue();
    162 	splx(s);
    163 }
    164 
    165 #ifdef XENDEBUG
    166 void xpq_debug_dump(void);
    167 #endif
    168 
    169 #define XPQUEUE_SIZE 2048
    170 static mmu_update_t xpq_queue[XPQUEUE_SIZE];
    171 static int xpq_idx = 0;
    172 
    173 void
    174 xpq_flush_queue(void)
    175 {
    176 	int i, ok;
    177 
    178 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
    179 	for (i = 0; i < xpq_idx; i++)
    180 		XENPRINTK2(("%d: %p %08" PRIx64 "\n", i,
    181 		    (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val));
    182 	if (xpq_idx != 0 &&
    183 	    HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok) < 0) {
    184 		printf("xpq_flush_queue: %d entries \n", xpq_idx);
    185 		for (i = 0; i < xpq_idx; i++)
    186 			printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
    187 			   (uint64_t)xpq_queue[i].ptr,
    188 			   (uint64_t)xpq_queue[i].val);
    189 		panic("HYPERVISOR_mmu_update failed\n");
    190 	}
    191 	xpq_idx = 0;
    192 }
    193 
    194 static inline void
    195 xpq_increment_idx(void)
    196 {
    197 
    198 	xpq_idx++;
    199 	if (__predict_false(xpq_idx == XPQUEUE_SIZE))
    200 		xpq_flush_queue();
    201 }
    202 
    203 void
    204 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
    205 {
    206 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
    207 	    "\n", (int64_t)ma, (int64_t)pa));
    208 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
    209 	xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
    210 	xpq_increment_idx();
    211 #ifdef XENDEBUG_SYNC
    212 	xpq_flush_queue();
    213 #endif
    214 }
    215 
    216 void
    217 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
    218 {
    219 
    220 	KASSERT((ptr & 3) == 0);
    221 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
    222 	xpq_queue[xpq_idx].val = val;
    223 	xpq_increment_idx();
    224 #ifdef XENDEBUG_SYNC
    225 	xpq_flush_queue();
    226 #endif
    227 }
    228 
    229 void
    230 xpq_queue_pt_switch(paddr_t pa)
    231 {
    232 	struct mmuext_op op;
    233 	xpq_flush_queue();
    234 
    235 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    236 	    (int64_t)pa, (int64_t)pa));
    237 	op.cmd = MMUEXT_NEW_BASEPTR;
    238 	op.arg1.mfn = pa >> PAGE_SHIFT;
    239 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    240 		panic("xpq_queue_pt_switch");
    241 }
    242 
    243 void
    244 xpq_queue_pin_table(paddr_t pa)
    245 {
    246 	struct mmuext_op op;
    247 	xpq_flush_queue();
    248 
    249 	XENPRINTK2(("xpq_queue_pin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    250 	    (int64_t)pa, (int64_t)pa));
    251 	op.arg1.mfn = pa >> PAGE_SHIFT;
    252 
    253 #if defined(__x86_64__)
    254 	op.cmd = MMUEXT_PIN_L4_TABLE;
    255 #else
    256 	op.cmd = MMUEXT_PIN_L2_TABLE;
    257 #endif
    258 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    259 		panic("xpq_queue_pin_table");
    260 }
    261 
    262 #ifdef PAE
    263 static void
    264 xpq_queue_pin_l3_table(paddr_t pa)
    265 {
    266 	struct mmuext_op op;
    267 	xpq_flush_queue();
    268 
    269 	XENPRINTK2(("xpq_queue_pin_l2_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    270 	    (int64_t)pa, (int64_t)pa));
    271 	op.arg1.mfn = pa >> PAGE_SHIFT;
    272 
    273 	op.cmd = MMUEXT_PIN_L3_TABLE;
    274 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    275 		panic("xpq_queue_pin_table");
    276 }
    277 #endif
    278 
    279 void
    280 xpq_queue_unpin_table(paddr_t pa)
    281 {
    282 	struct mmuext_op op;
    283 	xpq_flush_queue();
    284 
    285 	XENPRINTK2(("xpq_queue_unpin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    286 	    (int64_t)pa, (int64_t)pa));
    287 	op.arg1.mfn = pa >> PAGE_SHIFT;
    288 	op.cmd = MMUEXT_UNPIN_TABLE;
    289 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    290 		panic("xpq_queue_unpin_table");
    291 }
    292 
    293 void
    294 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    295 {
    296 	struct mmuext_op op;
    297 	xpq_flush_queue();
    298 
    299 	XENPRINTK2(("xpq_queue_set_ldt\n"));
    300 	KASSERT(va == (va & ~PAGE_MASK));
    301 	op.cmd = MMUEXT_SET_LDT;
    302 	op.arg1.linear_addr = va;
    303 	op.arg2.nr_ents = entries;
    304 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    305 		panic("xpq_queue_set_ldt");
    306 }
    307 
    308 void
    309 xpq_queue_tlb_flush(void)
    310 {
    311 	struct mmuext_op op;
    312 	xpq_flush_queue();
    313 
    314 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
    315 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
    316 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    317 		panic("xpq_queue_tlb_flush");
    318 }
    319 
    320 void
    321 xpq_flush_cache(void)
    322 {
    323 	struct mmuext_op op;
    324 	int s = splvm();
    325 	xpq_flush_queue();
    326 
    327 	XENPRINTK2(("xpq_queue_flush_cache\n"));
    328 	op.cmd = MMUEXT_FLUSH_CACHE;
    329 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    330 		panic("xpq_flush_cache");
    331 	splx(s);
    332 }
    333 
    334 void
    335 xpq_queue_invlpg(vaddr_t va)
    336 {
    337 	struct mmuext_op op;
    338 	xpq_flush_queue();
    339 
    340 	XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
    341 	op.cmd = MMUEXT_INVLPG_LOCAL;
    342 	op.arg1.linear_addr = (va & ~PAGE_MASK);
    343 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    344 		panic("xpq_queue_invlpg");
    345 }
    346 
    347 int
    348 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
    349 {
    350 	mmu_update_t op;
    351 	int ok;
    352 	xpq_flush_queue();
    353 
    354 	op.ptr = ptr;
    355 	op.val = val;
    356 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
    357 		return EFAULT;
    358 	return (0);
    359 }
    360 
    361 #ifdef XENDEBUG
    362 void
    363 xpq_debug_dump(void)
    364 {
    365 	int i;
    366 
    367 	XENPRINTK2(("idx: %d\n", xpq_idx));
    368 	for (i = 0; i < xpq_idx; i++) {
    369 		snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
    370 		    (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val);
    371 		if (++i < xpq_idx)
    372 			snprintf(XBUF + strlen(XBUF),
    373 			    sizeof(XBUF) - strlen(XBUF),
    374 			    "%" PRIx64 " %08" PRIx64,
    375 			    (uint64_t)xpq_queue[i].ptr,
    376 			    (uint64_t)xpq_queue[i].val);
    377 		if (++i < xpq_idx)
    378 			snprintf(XBUF + strlen(XBUF),
    379 			    sizeof(XBUF) - strlen(XBUF),
    380 			    "%" PRIx64 " %08" PRIx64,
    381 			    (uint64_t)xpq_queue[i].ptr,
    382 			    (uint64_t)xpq_queue[i].val);
    383 		if (++i < xpq_idx)
    384 			snprintf(XBUF + strlen(XBUF),
    385 			    sizeof(XBUF) - strlen(XBUF),
    386 			    "%" PRIx64 " %08" PRIx64,
    387 			    (uint64_t)xpq_queue[i].ptr,
    388 			    (uint64_t)xpq_queue[i].val);
    389 		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
    390 	}
    391 }
    392 #endif
    393 
    394 
    395 extern volatile struct xencons_interface *xencons_interface; /* XXX */
    396 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
    397 
    398 static void xen_bt_set_readonly (vaddr_t);
    399 static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
    400 
    401 /* How many PDEs ? */
    402 #if L2_SLOT_KERNBASE > 0
    403 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
    404 #else
    405 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
    406 #endif
    407 
    408 /*
    409  * Construct and switch to new pagetables
    410  * first_avail is the first vaddr we can use after
    411  * we get rid of Xen pagetables
    412  */
    413 
    414 vaddr_t xen_pmap_bootstrap (void);
    415 
    416 /*
    417  * Function to get rid of Xen bootstrap tables
    418  */
    419 
    420 /* How many PDP do we need: */
    421 #ifdef PAE
    422 /*
    423  * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
    424  * all of them mapped by the L3 page. We also need a shadow page
    425  * for L3[3].
    426  */
    427 static const int l2_4_count = 6;
    428 #else
    429 static const int l2_4_count = PTP_LEVELS - 1;
    430 #endif
    431 
    432 vaddr_t
    433 xen_pmap_bootstrap(void)
    434 {
    435 	int count, oldcount;
    436 	long mapsize;
    437 	vaddr_t bootstrap_tables, init_tables;
    438 
    439 	xpmap_phys_to_machine_mapping =
    440 	    (unsigned long *)xen_start_info.mfn_list;
    441 	init_tables = xen_start_info.pt_base;
    442 	__PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
    443 
    444 	/* Space after Xen boostrap tables should be free */
    445 	bootstrap_tables = xen_start_info.pt_base +
    446 		(xen_start_info.nr_pt_frames * PAGE_SIZE);
    447 
    448 	/*
    449 	 * Calculate how many space we need
    450 	 * first everything mapped before the Xen bootstrap tables
    451 	 */
    452 	mapsize = init_tables - KERNTEXTOFF;
    453 	/* after the tables we'll have:
    454 	 *  - UAREA
    455 	 *  - dummy user PGD (x86_64)
    456 	 *  - HYPERVISOR_shared_info
    457 	 *  - ISA I/O mem (if needed)
    458 	 */
    459 	mapsize += UPAGES * NBPG;
    460 #ifdef __x86_64__
    461 	mapsize += NBPG;
    462 #endif
    463 	mapsize += NBPG;
    464 
    465 #ifdef DOM0OPS
    466 	if (xendomain_is_dom0()) {
    467 		/* space for ISA I/O mem */
    468 		mapsize += IOM_SIZE;
    469 	}
    470 #endif
    471 	/* at this point mapsize doens't include the table size */
    472 
    473 #ifdef __x86_64__
    474 	count = TABLE_L2_ENTRIES;
    475 #else
    476 	count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
    477 #endif /* __x86_64__ */
    478 
    479 	/* now compute how many L2 pages we need exactly */
    480 	XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
    481 	while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
    482 	    ((long)count << L2_SHIFT) + KERNBASE) {
    483 		count++;
    484 	}
    485 #ifndef __x86_64__
    486 	/*
    487 	 * one more L2 page: we'll alocate several pages after kva_start
    488 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
    489 	 * counted here. It's not a big issue to allocate one more L2 as
    490 	 * pmap_growkernel() will be called anyway.
    491 	 */
    492 	count++;
    493 	nkptp[1] = count;
    494 #endif
    495 
    496 	/*
    497 	 * install bootstrap pages. We may need more L2 pages than will
    498 	 * have the final table here, as it's installed after the final table
    499 	 */
    500 	oldcount = count;
    501 
    502 bootstrap_again:
    503 	XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
    504 	/*
    505 	 * Xen space we'll reclaim may not be enough for our new page tables,
    506 	 * move bootstrap tables if necessary
    507 	 */
    508 	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
    509 		bootstrap_tables = init_tables +
    510 					((count + l2_4_count) * PAGE_SIZE);
    511 	/* make sure we have enough to map the bootstrap_tables */
    512 	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
    513 	    ((long)oldcount << L2_SHIFT) + KERNBASE) {
    514 		oldcount++;
    515 		goto bootstrap_again;
    516 	}
    517 
    518 	/* Create temporary tables */
    519 	xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
    520 		xen_start_info.nr_pt_frames, oldcount, 0);
    521 
    522 	/* Create final tables */
    523 	xen_bootstrap_tables(bootstrap_tables, init_tables,
    524 	    oldcount + l2_4_count, count, 1);
    525 
    526 	/* zero out free space after tables */
    527 	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
    528 	    (UPAGES + 1) * NBPG);
    529 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
    530 }
    531 
    532 
    533 /*
    534  * Build a new table and switch to it
    535  * old_count is # of old tables (including PGD, PDTPE and PDE)
    536  * new_count is # of new tables (PTE only)
    537  * we assume areas don't overlap
    538  */
    539 
    540 
    541 static void
    542 xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
    543 	int old_count, int new_count, int final)
    544 {
    545 	pd_entry_t *pdtpe, *pde, *pte;
    546 	pd_entry_t *cur_pgd, *bt_pgd;
    547 	paddr_t addr;
    548 	vaddr_t page, avail, text_end, map_end;
    549 	int i;
    550 	extern char __data_start;
    551 
    552 	__PRINTK(("xen_bootstrap_tables(0x%lx, 0x%lx, %d, %d)\n",
    553 	    old_pgd, new_pgd, old_count, new_count));
    554 	text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
    555 	/*
    556 	 * size of R/W area after kernel text:
    557 	 *  xencons_interface (if present)
    558 	 *  xenstore_interface (if present)
    559 	 *  table pages (new_count + l2_4_count entries)
    560 	 * extra mappings (only when final is true):
    561 	 *  UAREA
    562 	 *  dummy user PGD (x86_64 only)/gdt page (i386 only)
    563 	 *  HYPERVISOR_shared_info
    564 	 *  ISA I/O mem (if needed)
    565 	 */
    566 	map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
    567 	if (final) {
    568 		map_end += (UPAGES + 1) * NBPG;
    569 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
    570 		map_end += NBPG;
    571 	}
    572 	/*
    573 	 * we always set atdevbase, as it's used by init386 to find the first
    574 	 * available VA. map_end is updated only if we are dom0, so
    575 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
    576 	 * this case.
    577 	 */
    578 	if (final)
    579 		atdevbase = map_end;
    580 #ifdef DOM0OPS
    581 	if (final && xendomain_is_dom0()) {
    582 		/* ISA I/O mem */
    583 		map_end += IOM_SIZE;
    584 	}
    585 #endif /* DOM0OPS */
    586 
    587 	__PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
    588 	    text_end, map_end));
    589 	__PRINTK(("console 0x%lx ", xen_start_info.console.domU.mfn));
    590 	__PRINTK(("xenstore 0x%lx\n", xen_start_info.store_mfn));
    591 
    592 	/*
    593 	 * Create bootstrap page tables
    594 	 * What we need:
    595 	 * - a PGD (level 4)
    596 	 * - a PDTPE (level 3)
    597 	 * - a PDE (level2)
    598 	 * - some PTEs (level 1)
    599 	 */
    600 
    601 	cur_pgd = (pd_entry_t *) old_pgd;
    602 	bt_pgd = (pd_entry_t *) new_pgd;
    603 	memset (bt_pgd, 0, PAGE_SIZE);
    604 	avail = new_pgd + PAGE_SIZE;
    605 #if PTP_LEVELS > 3
    606 	/* Install level 3 */
    607 	pdtpe = (pd_entry_t *) avail;
    608 	memset (pdtpe, 0, PAGE_SIZE);
    609 	avail += PAGE_SIZE;
    610 
    611 	addr = ((u_long) pdtpe) - KERNBASE;
    612 	bt_pgd[pl4_pi(KERNTEXTOFF)] =
    613 	    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    614 
    615 	__PRINTK(("L3 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64 " -> L4[0x%x]\n",
    616 	    pdtpe, (uint64_t)addr, (uint64_t)bt_pgd[pl4_pi(KERNTEXTOFF)],
    617 	    pl4_pi(KERNTEXTOFF)));
    618 #else
    619 	pdtpe = bt_pgd;
    620 #endif /* PTP_LEVELS > 3 */
    621 
    622 #if PTP_LEVELS > 2
    623 	/* Level 2 */
    624 	pde = (pd_entry_t *) avail;
    625 	memset(pde, 0, PAGE_SIZE);
    626 	avail += PAGE_SIZE;
    627 
    628 	addr = ((u_long) pde) - KERNBASE;
    629 	pdtpe[pl3_pi(KERNTEXTOFF)] =
    630 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
    631 	__PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64 " -> L3[0x%x]\n",
    632 	    pde, (int64_t)addr, (int64_t)pdtpe[pl3_pi(KERNTEXTOFF)],
    633 	    pl3_pi(KERNTEXTOFF)));
    634 #elif defined(PAE)
    635 	/* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
    636 	pde = (pd_entry_t *) avail;
    637 	memset(pde, 0, PAGE_SIZE * 5);
    638 	avail += PAGE_SIZE * 5;
    639 	addr = ((u_long) pde) - KERNBASE;
    640 	/*
    641 	 * enter L2 pages in the L3.
    642 	 * The real L2 kernel PD will be the last one (so that
    643 	 * pde[L2_SLOT_KERN] always point to the shadow).
    644 	 */
    645 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    646 		/*
    647 		 * Xen doens't want R/W mappings in L3 entries, it'll add it
    648 		 * itself.
    649 		 */
    650 		pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    651 		__PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
    652 		    " -> L3[0x%x]\n", (vaddr_t)pde + PAGE_SIZE * i,
    653 		    (int64_t)addr, (int64_t)pdtpe[i], i));
    654 	}
    655 	addr += PAGE_SIZE;
    656 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    657 	__PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
    658 	    " -> L3[0x%x]\n", (vaddr_t)pde + PAGE_SIZE * 4,
    659 	    (int64_t)addr, (int64_t)pdtpe[3], 3));
    660 
    661 #else /* PAE */
    662 	pde = bt_pgd;
    663 #endif /* PTP_LEVELS > 2 */
    664 
    665 	/* Level 1 */
    666 	page = KERNTEXTOFF;
    667 	for (i = 0; i < new_count; i ++) {
    668 		vaddr_t cur_page = page;
    669 
    670 		pte = (pd_entry_t *) avail;
    671 		avail += PAGE_SIZE;
    672 
    673 		memset(pte, 0, PAGE_SIZE);
    674 		while (pl2_pi(page) == pl2_pi (cur_page)) {
    675 			if (page >= map_end) {
    676 				/* not mapped at all */
    677 				pte[pl1_pi(page)] = 0;
    678 				page += PAGE_SIZE;
    679 				continue;
    680 			}
    681 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
    682 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
    683 				pte[pl1_pi(page)] = xen_start_info.shared_info;
    684 				__PRINTK(("HYPERVISOR_shared_info "
    685 				    "va 0x%lx pte 0x%" PRIx64 "\n",
    686 				    HYPERVISOR_shared_info, (int64_t)pte[pl1_pi(page)]));
    687 			}
    688 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    689 			    == xen_start_info.console.domU.mfn) {
    690 				xencons_interface = (void *)page;
    691 				pte[pl1_pi(page)] = xen_start_info.console.domU.mfn;
    692 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    693 				__PRINTK(("xencons_interface "
    694 				    "va 0x%lx pte 0x%" PRIx64 "\n",
    695 				    xencons_interface, (int64_t)pte[pl1_pi(page)]));
    696 			}
    697 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    698 			    == xen_start_info.store_mfn) {
    699 				xenstore_interface = (void *)page;
    700 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
    701 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    702 				__PRINTK(("xenstore_interface "
    703 				    "va 0x%lx pte 0x%" PRIx64 "\n",
    704 				    xenstore_interface, (int64_t)pte[pl1_pi(page)]));
    705 			}
    706 #ifdef DOM0OPS
    707 			if (page >= (vaddr_t)atdevbase &&
    708 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
    709 				pte[pl1_pi(page)] =
    710 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
    711 			}
    712 #endif
    713 			pte[pl1_pi(page)] |= PG_k | PG_V;
    714 			if (page < text_end) {
    715 				/* map kernel text RO */
    716 				pte[pl1_pi(page)] |= 0;
    717 			} else if (page >= old_pgd
    718 			    && page < old_pgd + (old_count * PAGE_SIZE)) {
    719 				/* map old page tables RO */
    720 				pte[pl1_pi(page)] |= 0;
    721 			} else if (page >= new_pgd &&
    722 			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
    723 				/* map new page tables RO */
    724 				pte[pl1_pi(page)] |= 0;
    725 			} else {
    726 				/* map page RW */
    727 				pte[pl1_pi(page)] |= PG_RW;
    728 			}
    729 
    730 			if ((page  >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
    731 			    || page >= new_pgd) {
    732 				__PRINTK(("va 0x%lx pa 0x%lx "
    733 				    "entry 0x%" PRIx64 " -> L1[0x%x]\n",
    734 				    page, page - KERNBASE,
    735 				    (int64_t)pte[pl1_pi(page)], pl1_pi(page)));
    736 			}
    737 			page += PAGE_SIZE;
    738 		}
    739 
    740 		addr = ((u_long) pte) - KERNBASE;
    741 		pde[pl2_pi(cur_page)] =
    742 		    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    743 		__PRINTK(("L1 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
    744 		    " -> L2[0x%x]\n", pte, (int64_t)addr,
    745 		    (int64_t)pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
    746 		/* Mark readonly */
    747 		xen_bt_set_readonly((vaddr_t) pte);
    748 	}
    749 
    750 	/* Install recursive page tables mapping */
    751 #ifdef PAE
    752 	/*
    753 	 * we need a shadow page for the kernel's L2 page
    754 	 * The real L2 kernel PD will be the last one (so that
    755 	 * pde[L2_SLOT_KERN] always point to the shadow.
    756 	 */
    757 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
    758 	pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG];
    759 	pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE;
    760 
    761 	/*
    762 	 * We don't enter a recursive entry from the L3 PD. Instead,
    763 	 * we enter the first 4 L2 pages, which includes the kernel's L2
    764 	 * shadow. But we have to entrer the shadow after switching
    765 	 * %cr3, or Xen will refcount some PTE with the wrong type.
    766 	 */
    767 	addr = (u_long)pde - KERNBASE;
    768 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    769 		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    770 		__PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
    771 		    (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i, (long)addr,
    772 		    (int64_t)pde[PDIR_SLOT_PTE + i]));
    773 	}
    774 #if 0
    775 	addr += PAGE_SIZE; /* point to shadow L2 */
    776 	pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    777 	__PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
    778 	    (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
    779 	    (int64_t)pde[PDIR_SLOT_PTE + 3]));
    780 #endif
    781 	/* Mark tables RO, and pin the kernel's shadow as L2 */
    782 	addr = (u_long)pde - KERNBASE;
    783 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
    784 		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
    785 		if (i == 2 || i == 3)
    786 			continue;
    787 #if 0
    788 		__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
    789 		xpq_queue_pin_table(xpmap_ptom_masked(addr));
    790 #endif
    791 	}
    792 	if (final) {
    793 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
    794 		__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
    795 		xpq_queue_pin_table(xpmap_ptom_masked(addr));
    796 	}
    797 #if 0
    798 	addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
    799 	__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
    800 	xpq_queue_pin_table(xpmap_ptom_masked(addr));
    801 #endif
    802 #else /* PAE */
    803 	/* recursive entry in higher-level PD */
    804 	bt_pgd[PDIR_SLOT_PTE] =
    805 	    xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
    806 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] va 0x%lx pa 0x%" PRIx64
    807 	    " entry 0x%" PRIx64 "\n", new_pgd, (int64_t)new_pgd - KERNBASE,
    808 	    (int64_t)bt_pgd[PDIR_SLOT_PTE]));
    809 	/* Mark tables RO */
    810 	xen_bt_set_readonly((vaddr_t) pde);
    811 #endif
    812 #if PTP_LEVELS > 2 || defined(PAE)
    813 	xen_bt_set_readonly((vaddr_t) pdtpe);
    814 #endif
    815 #if PTP_LEVELS > 3
    816 	xen_bt_set_readonly(new_pgd);
    817 #endif
    818 	/* Pin the PGD */
    819 	__PRINTK(("pin PGD\n"));
    820 #ifdef PAE
    821 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    822 #else
    823 	xpq_queue_pin_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    824 #endif
    825 #ifdef __i386__
    826 	/* Save phys. addr of PDP, for libkvm. */
    827 	PDPpaddr = (long)pde;
    828 #ifdef PAE
    829 	/* also save the address of the L3 page */
    830 	pmap_l3pd = pdtpe;
    831 	pmap_l3paddr = (new_pgd - KERNBASE);
    832 #endif /* PAE */
    833 #endif /* i386 */
    834 	/* Switch to new tables */
    835 	__PRINTK(("switch to PGD\n"));
    836 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
    837 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry 0x%" PRIx64 "\n",
    838 	    (int64_t)bt_pgd[PDIR_SLOT_PTE]));
    839 #ifdef PAE
    840 	if (final) {
    841 		/* now enter kernel's PTE mappings */
    842 		addr =  (u_long)pde - KERNBASE + PAGE_SIZE * 3;
    843 		xpq_queue_pte_update(
    844 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
    845 		    xpmap_ptom_masked(addr) | PG_k | PG_V);
    846 		xpq_flush_queue();
    847 	}
    848 #endif
    849 
    850 
    851 
    852 	/* Now we can safely reclaim space taken by old tables */
    853 
    854 	__PRINTK(("unpin old PGD\n"));
    855 	/* Unpin old PGD */
    856 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
    857 	/* Mark old tables RW */
    858 	page = old_pgd;
    859 	addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
    860 	addr = xpmap_mtop(addr);
    861 	pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
    862 	pte += pl1_pi(page);
    863 	__PRINTK(("*pde 0x%" PRIx64 " addr 0x%" PRIx64 " pte 0x%lx\n",
    864 	    (int64_t)pde[pl2_pi(page)], (int64_t)addr, (long)pte));
    865 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
    866 		addr = xpmap_ptom(((u_long) pte) - KERNBASE);
    867 		XENPRINTK(("addr 0x%" PRIx64 " pte 0x%lx *pte 0x%" PRIx64 "\n",
    868 		   (int64_t)addr, (long)pte, (int64_t)*pte));
    869 		xpq_queue_pte_update(addr, *pte | PG_RW);
    870 		page += PAGE_SIZE;
    871 		/*
    872 		 * Our ptes are contiguous
    873 		 * so it's safe to just "++" here
    874 		 */
    875 		pte++;
    876 	}
    877 	xpq_flush_queue();
    878 }
    879 
    880 
    881 /*
    882  * Bootstrap helper functions
    883  */
    884 
    885 /*
    886  * Mark a page readonly
    887  * XXX: assuming vaddr = paddr + KERNBASE
    888  */
    889 
    890 static void
    891 xen_bt_set_readonly (vaddr_t page)
    892 {
    893 	pt_entry_t entry;
    894 
    895 	entry = xpmap_ptom_masked(page - KERNBASE);
    896 	entry |= PG_k | PG_V;
    897 
    898 	HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
    899 }
    900 
    901 #ifdef __x86_64__
    902 void
    903 xen_set_user_pgd(paddr_t page)
    904 {
    905 	struct mmuext_op op;
    906 	int s = splvm();
    907 
    908 	xpq_flush_queue();
    909 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
    910 	op.arg1.mfn = xpmap_phys_to_machine_mapping[page >> PAGE_SHIFT];
    911         if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    912 		panic("xen_set_user_pgd: failed to install new user page"
    913 			" directory %lx", page);
    914 	splx(s);
    915 }
    916 #endif /* __x86_64__ */
    917