Home | History | Annotate | Line # | Download | only in x86
x86_xpmap.c revision 1.53.4.4
      1  1.53.4.4     skrll /*	$NetBSD: x86_xpmap.c,v 1.53.4.4 2017/02/05 13:40:23 skrll Exp $	*/
      2       1.2    bouyer 
      3       1.2    bouyer /*
      4       1.2    bouyer  * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
      5       1.2    bouyer  *
      6       1.2    bouyer  * Permission to use, copy, modify, and distribute this software for any
      7       1.2    bouyer  * purpose with or without fee is hereby granted, provided that the above
      8       1.2    bouyer  * copyright notice and this permission notice appear in all copies.
      9       1.2    bouyer  *
     10       1.2    bouyer  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     11       1.2    bouyer  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     12       1.2    bouyer  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     13       1.2    bouyer  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     14       1.2    bouyer  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     15       1.2    bouyer  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     16       1.2    bouyer  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     17       1.2    bouyer  */
     18       1.2    bouyer 
     19       1.2    bouyer /*
     20       1.2    bouyer  * Copyright (c) 2006, 2007 Manuel Bouyer.
     21       1.2    bouyer  *
     22       1.2    bouyer  * Redistribution and use in source and binary forms, with or without
     23       1.2    bouyer  * modification, are permitted provided that the following conditions
     24       1.2    bouyer  * are met:
     25       1.2    bouyer  * 1. Redistributions of source code must retain the above copyright
     26       1.2    bouyer  *    notice, this list of conditions and the following disclaimer.
     27       1.2    bouyer  * 2. Redistributions in binary form must reproduce the above copyright
     28       1.2    bouyer  *    notice, this list of conditions and the following disclaimer in the
     29       1.2    bouyer  *    documentation and/or other materials provided with the distribution.
     30       1.2    bouyer  *
     31       1.2    bouyer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     32       1.2    bouyer  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     33       1.2    bouyer  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     34       1.2    bouyer  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     35       1.2    bouyer  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36       1.2    bouyer  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     37       1.2    bouyer  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     38       1.2    bouyer  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     39       1.2    bouyer  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     40       1.2    bouyer  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     41       1.2    bouyer  */
     42       1.2    bouyer 
     43       1.2    bouyer /*
     44       1.2    bouyer  * Copyright (c) 2004 Christian Limpach.
     45       1.2    bouyer  * All rights reserved.
     46       1.2    bouyer  *
     47       1.2    bouyer  * Redistribution and use in source and binary forms, with or without
     48       1.2    bouyer  * modification, are permitted provided that the following conditions
     49       1.2    bouyer  * are met:
     50       1.2    bouyer  * 1. Redistributions of source code must retain the above copyright
     51       1.2    bouyer  *    notice, this list of conditions and the following disclaimer.
     52       1.2    bouyer  * 2. Redistributions in binary form must reproduce the above copyright
     53       1.2    bouyer  *    notice, this list of conditions and the following disclaimer in the
     54       1.2    bouyer  *    documentation and/or other materials provided with the distribution.
     55       1.2    bouyer  *
     56       1.2    bouyer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     57       1.2    bouyer  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     58       1.2    bouyer  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     59       1.2    bouyer  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     60       1.2    bouyer  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     61       1.2    bouyer  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     62       1.2    bouyer  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     63       1.2    bouyer  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     64       1.2    bouyer  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     65       1.2    bouyer  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     66       1.2    bouyer  */
     67       1.2    bouyer 
     68       1.2    bouyer #include <sys/cdefs.h>
     69  1.53.4.4     skrll __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.53.4.4 2017/02/05 13:40:23 skrll Exp $");
     70       1.2    bouyer 
     71       1.2    bouyer #include "opt_xen.h"
     72       1.4    bouyer #include "opt_ddb.h"
     73       1.4    bouyer #include "ksyms.h"
     74       1.2    bouyer 
     75       1.2    bouyer #include <sys/param.h>
     76       1.2    bouyer #include <sys/systm.h>
     77      1.38    cherry #include <sys/mutex.h>
     78      1.42    bouyer #include <sys/cpu.h>
     79       1.2    bouyer 
     80       1.2    bouyer #include <uvm/uvm.h>
     81       1.2    bouyer 
     82      1.42    bouyer #include <x86/pmap.h>
     83       1.2    bouyer #include <machine/gdt.h>
     84       1.2    bouyer #include <xen/xenfunc.h>
     85       1.2    bouyer 
     86       1.2    bouyer #include <dev/isa/isareg.h>
     87       1.2    bouyer #include <machine/isa_machdep.h>
     88       1.2    bouyer 
     89       1.2    bouyer #undef	XENDEBUG
     90       1.2    bouyer /* #define XENDEBUG_SYNC */
     91       1.2    bouyer 
     92       1.2    bouyer #ifdef XENDEBUG
     93       1.2    bouyer #define	XENPRINTF(x) printf x
     94       1.2    bouyer #define	XENPRINTK2(x) /* printk x */
     95       1.2    bouyer static char XBUF[256];
     96       1.2    bouyer #else
     97       1.2    bouyer #define	XENPRINTF(x)
     98       1.2    bouyer #define	XENPRINTK2(x)
     99       1.2    bouyer #endif
    100       1.2    bouyer 
    101       1.2    bouyer volatile shared_info_t *HYPERVISOR_shared_info;
    102      1.11       jym /* Xen requires the start_info struct to be page aligned */
    103      1.11       jym union start_info_union start_info_union __aligned(PAGE_SIZE);
    104       1.6    bouyer unsigned long *xpmap_phys_to_machine_mapping;
    105      1.37    cherry kmutex_t pte_lock;
    106  1.53.4.4     skrll vaddr_t xen_dummy_page;
    107  1.53.4.4     skrll 
    108  1.53.4.4     skrll pt_entry_t xpmap_pg_nx __read_mostly;
    109       1.2    bouyer 
    110       1.2    bouyer void xen_failsafe_handler(void);
    111       1.2    bouyer 
    112       1.2    bouyer #define HYPERVISOR_mmu_update_self(req, count, success_count) \
    113       1.2    bouyer 	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
    114       1.2    bouyer 
    115  1.53.4.3     skrll extern volatile struct xencons_interface *xencons_interface; /* XXX */
    116  1.53.4.3     skrll extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
    117  1.53.4.3     skrll 
    118  1.53.4.3     skrll static void xen_bt_set_readonly(vaddr_t);
    119  1.53.4.3     skrll static void xen_bootstrap_tables(vaddr_t, vaddr_t, size_t, size_t, bool);
    120  1.53.4.3     skrll 
    121  1.53.4.3     skrll vaddr_t xen_locore(void);
    122  1.53.4.3     skrll 
    123      1.48    bouyer /*
    124      1.48    bouyer  * kcpuset internally uses an array of uint32_t while xen uses an array of
    125      1.48    bouyer  * u_long. As we're little-endian we can cast one to the other.
    126      1.48    bouyer  */
    127      1.48    bouyer typedef union {
    128      1.48    bouyer #ifdef _LP64
    129      1.48    bouyer 	uint32_t xcpum_km[2];
    130      1.48    bouyer #else
    131      1.48    bouyer 	uint32_t xcpum_km[1];
    132  1.53.4.2     skrll #endif
    133  1.53.4.3     skrll 	u_long xcpum_xm;
    134      1.48    bouyer } xcpumask_t;
    135      1.48    bouyer 
    136       1.2    bouyer void
    137       1.2    bouyer xen_failsafe_handler(void)
    138       1.2    bouyer {
    139       1.2    bouyer 
    140       1.2    bouyer 	panic("xen_failsafe_handler called!\n");
    141       1.2    bouyer }
    142       1.2    bouyer 
    143       1.2    bouyer void
    144       1.2    bouyer xen_set_ldt(vaddr_t base, uint32_t entries)
    145       1.2    bouyer {
    146       1.2    bouyer 	vaddr_t va;
    147       1.2    bouyer 	vaddr_t end;
    148       1.4    bouyer 	pt_entry_t *ptp;
    149       1.2    bouyer 	int s;
    150       1.2    bouyer 
    151       1.2    bouyer #ifdef __x86_64__
    152       1.2    bouyer 	end = base + (entries << 3);
    153       1.2    bouyer #else
    154       1.2    bouyer 	end = base + entries * sizeof(union descriptor);
    155       1.2    bouyer #endif
    156       1.2    bouyer 
    157       1.2    bouyer 	for (va = base; va < end; va += PAGE_SIZE) {
    158       1.2    bouyer 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    159       1.2    bouyer 		ptp = kvtopte(va);
    160      1.19       jym 		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
    161      1.19       jym 		    base, entries, ptp));
    162       1.4    bouyer 		pmap_pte_clearbits(ptp, PG_RW);
    163       1.2    bouyer 	}
    164       1.2    bouyer 	s = splvm();
    165       1.2    bouyer 	xpq_queue_set_ldt(base, entries);
    166       1.2    bouyer 	splx(s);
    167       1.2    bouyer }
    168       1.2    bouyer 
    169       1.2    bouyer #ifdef XENDEBUG
    170       1.2    bouyer void xpq_debug_dump(void);
    171       1.2    bouyer #endif
    172       1.2    bouyer 
    173       1.2    bouyer #define XPQUEUE_SIZE 2048
    174      1.35    cherry static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
    175      1.35    cherry static int xpq_idx_array[MAXCPUS];
    176      1.30    cherry 
    177      1.41    cherry #ifdef i386
    178      1.41    cherry extern union descriptor tmpgdt[];
    179  1.53.4.3     skrll #endif
    180  1.53.4.3     skrll 
    181       1.2    bouyer void
    182      1.35    cherry xpq_flush_queue(void)
    183      1.30    cherry {
    184      1.35    cherry 	int i, ok = 0, ret;
    185      1.30    cherry 
    186      1.41    cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    187      1.41    cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    188       1.2    bouyer 
    189       1.2    bouyer 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
    190       1.2    bouyer 	for (i = 0; i < xpq_idx; i++)
    191      1.19       jym 		XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
    192      1.19       jym 		    xpq_queue[i].ptr, xpq_queue[i].val));
    193      1.23       jym 
    194      1.35    cherry retry:
    195      1.23       jym 	ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
    196      1.23       jym 
    197      1.23       jym 	if (xpq_idx != 0 && ret < 0) {
    198      1.39    bouyer 		struct cpu_info *ci;
    199      1.39    bouyer 		CPU_INFO_ITERATOR cii;
    200      1.39    bouyer 
    201      1.39    bouyer 		printf("xpq_flush_queue: %d entries (%d successful) on "
    202      1.39    bouyer 		    "cpu%d (%ld)\n",
    203      1.41    cherry 		    xpq_idx, ok, curcpu()->ci_index, curcpu()->ci_cpuid);
    204      1.35    cherry 
    205      1.35    cherry 		if (ok != 0) {
    206      1.35    cherry 			xpq_queue += ok;
    207      1.35    cherry 			xpq_idx -= ok;
    208      1.35    cherry 			ok = 0;
    209      1.35    cherry 			goto retry;
    210      1.35    cherry 		}
    211      1.35    cherry 
    212      1.39    bouyer 		for (CPU_INFO_FOREACH(cii, ci)) {
    213      1.39    bouyer 			xpq_queue = xpq_queue_array[ci->ci_cpuid];
    214      1.39    bouyer 			xpq_idx = xpq_idx_array[ci->ci_cpuid];
    215      1.39    bouyer 			printf("cpu%d (%ld):\n", ci->ci_index, ci->ci_cpuid);
    216      1.39    bouyer 			for (i = 0; i < xpq_idx; i++) {
    217      1.39    bouyer 				printf("  0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
    218      1.39    bouyer 				   xpq_queue[i].ptr, xpq_queue[i].val);
    219      1.39    bouyer 			}
    220      1.39    bouyer #ifdef __x86_64__
    221      1.39    bouyer 			for (i = 0; i < PDIR_SLOT_PTE; i++) {
    222      1.39    bouyer 				if (ci->ci_kpm_pdir[i] == 0)
    223      1.39    bouyer 					continue;
    224      1.39    bouyer 				printf(" kpm_pdir[%d]: 0x%" PRIx64 "\n",
    225      1.39    bouyer 				    i, ci->ci_kpm_pdir[i]);
    226      1.39    bouyer 			}
    227      1.39    bouyer #endif
    228      1.39    bouyer 		}
    229      1.23       jym 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
    230       1.2    bouyer 	}
    231      1.41    cherry 	xpq_idx_array[curcpu()->ci_cpuid] = 0;
    232       1.2    bouyer }
    233       1.2    bouyer 
    234       1.2    bouyer static inline void
    235       1.2    bouyer xpq_increment_idx(void)
    236       1.2    bouyer {
    237       1.2    bouyer 
    238      1.41    cherry 	if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE))
    239       1.2    bouyer 		xpq_flush_queue();
    240       1.2    bouyer }
    241       1.2    bouyer 
    242       1.2    bouyer void
    243       1.2    bouyer xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
    244       1.2    bouyer {
    245      1.35    cherry 
    246      1.41    cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    247      1.41    cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    248      1.35    cherry 
    249       1.6    bouyer 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
    250       1.6    bouyer 	    "\n", (int64_t)ma, (int64_t)pa));
    251      1.35    cherry 
    252       1.2    bouyer 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
    253      1.45       jym 	xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT;
    254       1.2    bouyer 	xpq_increment_idx();
    255       1.2    bouyer #ifdef XENDEBUG_SYNC
    256       1.2    bouyer 	xpq_flush_queue();
    257       1.2    bouyer #endif
    258       1.2    bouyer }
    259       1.2    bouyer 
    260       1.2    bouyer void
    261       1.6    bouyer xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
    262       1.2    bouyer {
    263       1.2    bouyer 
    264      1.41    cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    265      1.41    cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    266      1.35    cherry 
    267       1.6    bouyer 	KASSERT((ptr & 3) == 0);
    268       1.2    bouyer 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
    269       1.2    bouyer 	xpq_queue[xpq_idx].val = val;
    270       1.2    bouyer 	xpq_increment_idx();
    271       1.2    bouyer #ifdef XENDEBUG_SYNC
    272       1.2    bouyer 	xpq_flush_queue();
    273       1.2    bouyer #endif
    274       1.2    bouyer }
    275       1.2    bouyer 
    276       1.2    bouyer void
    277       1.2    bouyer xpq_queue_pt_switch(paddr_t pa)
    278       1.2    bouyer {
    279       1.2    bouyer 	struct mmuext_op op;
    280       1.2    bouyer 	xpq_flush_queue();
    281       1.2    bouyer 
    282       1.6    bouyer 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    283       1.6    bouyer 	    (int64_t)pa, (int64_t)pa));
    284       1.2    bouyer 	op.cmd = MMUEXT_NEW_BASEPTR;
    285       1.2    bouyer 	op.arg1.mfn = pa >> PAGE_SHIFT;
    286       1.2    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    287       1.2    bouyer 		panic("xpq_queue_pt_switch");
    288       1.2    bouyer }
    289       1.2    bouyer 
    290       1.2    bouyer void
    291      1.24       jym xpq_queue_pin_table(paddr_t pa, int lvl)
    292       1.2    bouyer {
    293       1.2    bouyer 	struct mmuext_op op;
    294      1.29    cherry 
    295       1.2    bouyer 	xpq_flush_queue();
    296       1.2    bouyer 
    297      1.24       jym 	XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
    298      1.24       jym 	    lvl + 1, pa));
    299       1.2    bouyer 
    300       1.6    bouyer 	op.arg1.mfn = pa >> PAGE_SHIFT;
    301      1.24       jym 	op.cmd = lvl;
    302       1.6    bouyer 
    303       1.6    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    304       1.6    bouyer 		panic("xpq_queue_pin_table");
    305       1.6    bouyer }
    306       1.6    bouyer 
    307       1.2    bouyer void
    308       1.2    bouyer xpq_queue_unpin_table(paddr_t pa)
    309       1.2    bouyer {
    310       1.2    bouyer 	struct mmuext_op op;
    311      1.29    cherry 
    312       1.2    bouyer 	xpq_flush_queue();
    313       1.2    bouyer 
    314      1.24       jym 	XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
    315       1.2    bouyer 	op.arg1.mfn = pa >> PAGE_SHIFT;
    316       1.2    bouyer 	op.cmd = MMUEXT_UNPIN_TABLE;
    317       1.2    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    318       1.2    bouyer 		panic("xpq_queue_unpin_table");
    319       1.2    bouyer }
    320       1.2    bouyer 
    321       1.2    bouyer void
    322       1.2    bouyer xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    323       1.2    bouyer {
    324       1.2    bouyer 	struct mmuext_op op;
    325      1.29    cherry 
    326       1.2    bouyer 	xpq_flush_queue();
    327       1.2    bouyer 
    328       1.2    bouyer 	XENPRINTK2(("xpq_queue_set_ldt\n"));
    329       1.2    bouyer 	KASSERT(va == (va & ~PAGE_MASK));
    330       1.2    bouyer 	op.cmd = MMUEXT_SET_LDT;
    331       1.2    bouyer 	op.arg1.linear_addr = va;
    332       1.2    bouyer 	op.arg2.nr_ents = entries;
    333       1.2    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    334       1.2    bouyer 		panic("xpq_queue_set_ldt");
    335       1.2    bouyer }
    336       1.2    bouyer 
    337       1.2    bouyer void
    338       1.8    cegger xpq_queue_tlb_flush(void)
    339       1.2    bouyer {
    340       1.2    bouyer 	struct mmuext_op op;
    341      1.29    cherry 
    342       1.2    bouyer 	xpq_flush_queue();
    343       1.2    bouyer 
    344       1.2    bouyer 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
    345       1.2    bouyer 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
    346       1.2    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    347       1.2    bouyer 		panic("xpq_queue_tlb_flush");
    348       1.2    bouyer }
    349       1.2    bouyer 
    350       1.2    bouyer void
    351       1.8    cegger xpq_flush_cache(void)
    352       1.2    bouyer {
    353      1.52   jnemeth 	int s = splvm();
    354      1.29    cherry 
    355       1.2    bouyer 	xpq_flush_queue();
    356       1.2    bouyer 
    357       1.2    bouyer 	XENPRINTK2(("xpq_queue_flush_cache\n"));
    358      1.52   jnemeth 	asm("wbinvd":::"memory");
    359      1.29    cherry 	splx(s); /* XXX: removeme */
    360       1.2    bouyer }
    361       1.2    bouyer 
    362       1.2    bouyer void
    363       1.2    bouyer xpq_queue_invlpg(vaddr_t va)
    364       1.2    bouyer {
    365       1.2    bouyer 	struct mmuext_op op;
    366       1.2    bouyer 	xpq_flush_queue();
    367       1.2    bouyer 
    368      1.19       jym 	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
    369       1.2    bouyer 	op.cmd = MMUEXT_INVLPG_LOCAL;
    370       1.2    bouyer 	op.arg1.linear_addr = (va & ~PAGE_MASK);
    371       1.2    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    372       1.2    bouyer 		panic("xpq_queue_invlpg");
    373       1.2    bouyer }
    374       1.2    bouyer 
    375      1.29    cherry void
    376      1.43     rmind xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc)
    377      1.29    cherry {
    378      1.48    bouyer 	xcpumask_t xcpumask;
    379      1.29    cherry 	mmuext_op_t op;
    380      1.29    cherry 
    381      1.49     rmind 	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
    382      1.44     rmind 
    383      1.29    cherry 	/* Flush pending page updates */
    384      1.29    cherry 	xpq_flush_queue();
    385      1.29    cherry 
    386      1.29    cherry 	op.cmd = MMUEXT_INVLPG_MULTI;
    387      1.29    cherry 	op.arg1.linear_addr = va;
    388      1.48    bouyer 	op.arg2.vcpumask = &xcpumask.xcpum_xm;
    389      1.29    cherry 
    390      1.29    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    391      1.29    cherry 		panic("xpq_queue_invlpg_all");
    392      1.29    cherry 	}
    393      1.29    cherry 
    394      1.29    cherry 	return;
    395      1.29    cherry }
    396      1.29    cherry 
    397      1.29    cherry void
    398      1.29    cherry xen_bcast_invlpg(vaddr_t va)
    399      1.29    cherry {
    400      1.29    cherry 	mmuext_op_t op;
    401      1.29    cherry 
    402      1.29    cherry 	/* Flush pending page updates */
    403      1.29    cherry 	xpq_flush_queue();
    404      1.29    cherry 
    405      1.29    cherry 	op.cmd = MMUEXT_INVLPG_ALL;
    406      1.29    cherry 	op.arg1.linear_addr = va;
    407      1.29    cherry 
    408      1.29    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    409      1.29    cherry 		panic("xpq_queue_invlpg_all");
    410      1.29    cherry 	}
    411      1.29    cherry 
    412      1.29    cherry 	return;
    413      1.29    cherry }
    414      1.29    cherry 
    415      1.29    cherry /* This is a synchronous call. */
    416      1.29    cherry void
    417      1.43     rmind xen_mcast_tlbflush(kcpuset_t *kc)
    418      1.29    cherry {
    419      1.48    bouyer 	xcpumask_t xcpumask;
    420      1.29    cherry 	mmuext_op_t op;
    421      1.29    cherry 
    422      1.49     rmind 	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
    423      1.44     rmind 
    424      1.29    cherry 	/* Flush pending page updates */
    425      1.29    cherry 	xpq_flush_queue();
    426      1.29    cherry 
    427      1.29    cherry 	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
    428      1.48    bouyer 	op.arg2.vcpumask = &xcpumask.xcpum_xm;
    429      1.29    cherry 
    430      1.29    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    431      1.29    cherry 		panic("xpq_queue_invlpg_all");
    432      1.29    cherry 	}
    433      1.29    cherry 
    434      1.29    cherry 	return;
    435      1.29    cherry }
    436      1.29    cherry 
    437      1.29    cherry /* This is a synchronous call. */
    438      1.29    cherry void
    439      1.29    cherry xen_bcast_tlbflush(void)
    440      1.29    cherry {
    441      1.29    cherry 	mmuext_op_t op;
    442      1.29    cherry 
    443      1.29    cherry 	/* Flush pending page updates */
    444      1.29    cherry 	xpq_flush_queue();
    445      1.29    cherry 
    446      1.29    cherry 	op.cmd = MMUEXT_TLB_FLUSH_ALL;
    447      1.29    cherry 
    448      1.29    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    449      1.29    cherry 		panic("xpq_queue_invlpg_all");
    450      1.29    cherry 	}
    451      1.29    cherry 
    452      1.29    cherry 	return;
    453      1.29    cherry }
    454      1.29    cherry 
    455      1.29    cherry /* This is a synchronous call. */
    456      1.29    cherry void
    457      1.43     rmind xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, kcpuset_t *kc)
    458      1.29    cherry {
    459      1.29    cherry 	KASSERT(eva > sva);
    460      1.29    cherry 
    461      1.29    cherry 	/* Flush pending page updates */
    462      1.29    cherry 	xpq_flush_queue();
    463      1.29    cherry 
    464      1.29    cherry 	/* Align to nearest page boundary */
    465      1.29    cherry 	sva &= ~PAGE_MASK;
    466      1.29    cherry 	eva &= ~PAGE_MASK;
    467      1.29    cherry 
    468      1.29    cherry 	for ( ; sva <= eva; sva += PAGE_SIZE) {
    469      1.43     rmind 		xen_mcast_invlpg(sva, kc);
    470      1.29    cherry 	}
    471      1.29    cherry 
    472      1.29    cherry 	return;
    473      1.29    cherry }
    474      1.29    cherry 
    475      1.29    cherry /* This is a synchronous call. */
    476      1.29    cherry void
    477      1.29    cherry xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
    478      1.29    cherry {
    479      1.29    cherry 	KASSERT(eva > sva);
    480      1.29    cherry 
    481      1.29    cherry 	/* Flush pending page updates */
    482      1.29    cherry 	xpq_flush_queue();
    483      1.29    cherry 
    484      1.29    cherry 	/* Align to nearest page boundary */
    485      1.29    cherry 	sva &= ~PAGE_MASK;
    486      1.29    cherry 	eva &= ~PAGE_MASK;
    487      1.29    cherry 
    488      1.29    cherry 	for ( ; sva <= eva; sva += PAGE_SIZE) {
    489      1.29    cherry 		xen_bcast_invlpg(sva);
    490      1.29    cherry 	}
    491      1.29    cherry 
    492      1.29    cherry 	return;
    493      1.29    cherry }
    494      1.29    cherry 
    495      1.53    cherry /* Copy a page */
    496      1.53    cherry void
    497      1.53    cherry xen_copy_page(paddr_t srcpa, paddr_t dstpa)
    498      1.53    cherry {
    499      1.53    cherry 	mmuext_op_t op;
    500      1.53    cherry 
    501      1.53    cherry 	op.cmd = MMUEXT_COPY_PAGE;
    502      1.53    cherry 	op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT;
    503      1.53    cherry 	op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT;
    504      1.53    cherry 
    505      1.53    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    506      1.53    cherry 		panic(__func__);
    507      1.53    cherry 	}
    508      1.53    cherry }
    509      1.53    cherry 
    510      1.53    cherry /* Zero a physical page */
    511      1.53    cherry void
    512      1.53    cherry xen_pagezero(paddr_t pa)
    513      1.53    cherry {
    514      1.53    cherry 	mmuext_op_t op;
    515      1.53    cherry 
    516      1.53    cherry 	op.cmd = MMUEXT_CLEAR_PAGE;
    517      1.53    cherry 	op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
    518      1.53    cherry 
    519      1.53    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    520      1.53    cherry 		panic(__func__);
    521      1.53    cherry 	}
    522      1.53    cherry }
    523      1.53    cherry 
    524       1.2    bouyer int
    525       1.6    bouyer xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
    526       1.2    bouyer {
    527       1.2    bouyer 	mmu_update_t op;
    528       1.2    bouyer 	int ok;
    529      1.29    cherry 
    530       1.2    bouyer 	xpq_flush_queue();
    531       1.2    bouyer 
    532       1.6    bouyer 	op.ptr = ptr;
    533       1.2    bouyer 	op.val = val;
    534       1.2    bouyer 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
    535       1.2    bouyer 		return EFAULT;
    536       1.2    bouyer 	return (0);
    537       1.2    bouyer }
    538       1.2    bouyer 
    539       1.2    bouyer #ifdef XENDEBUG
    540       1.2    bouyer void
    541       1.8    cegger xpq_debug_dump(void)
    542       1.2    bouyer {
    543       1.2    bouyer 	int i;
    544       1.2    bouyer 
    545      1.41    cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    546      1.41    cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    547      1.35    cherry 
    548       1.2    bouyer 	XENPRINTK2(("idx: %d\n", xpq_idx));
    549       1.2    bouyer 	for (i = 0; i < xpq_idx; i++) {
    550      1.13    cegger 		snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
    551      1.19       jym 		    xpq_queue[i].ptr, xpq_queue[i].val);
    552       1.2    bouyer 		if (++i < xpq_idx)
    553      1.13    cegger 			snprintf(XBUF + strlen(XBUF),
    554      1.13    cegger 			    sizeof(XBUF) - strlen(XBUF),
    555      1.13    cegger 			    "%" PRIx64 " %08" PRIx64,
    556      1.19       jym 			    xpq_queue[i].ptr, xpq_queue[i].val);
    557       1.2    bouyer 		if (++i < xpq_idx)
    558      1.13    cegger 			snprintf(XBUF + strlen(XBUF),
    559      1.13    cegger 			    sizeof(XBUF) - strlen(XBUF),
    560  1.53.4.2     skrll 			    "%" PRIx64 " %08" PRIx64,
    561      1.19       jym 			    xpq_queue[i].ptr, xpq_queue[i].val);
    562       1.2    bouyer 		if (++i < xpq_idx)
    563      1.13    cegger 			snprintf(XBUF + strlen(XBUF),
    564      1.13    cegger 			    sizeof(XBUF) - strlen(XBUF),
    565      1.13    cegger 			    "%" PRIx64 " %08" PRIx64,
    566      1.19       jym 			    xpq_queue[i].ptr, xpq_queue[i].val);
    567       1.2    bouyer 		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
    568       1.2    bouyer 	}
    569       1.2    bouyer }
    570       1.2    bouyer #endif
    571       1.2    bouyer 
    572       1.2    bouyer 
    573       1.2    bouyer #if L2_SLOT_KERNBASE > 0
    574       1.2    bouyer #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
    575       1.2    bouyer #else
    576       1.2    bouyer #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
    577       1.2    bouyer #endif
    578       1.2    bouyer 
    579       1.6    bouyer #ifdef PAE
    580       1.6    bouyer /*
    581  1.53.4.3     skrll  * For PAE, we consider a single contiguous L2 "superpage" of 4 pages, all of
    582  1.53.4.3     skrll  * them mapped by the L3 page. We also need a shadow page for L3[3].
    583       1.6    bouyer  */
    584       1.6    bouyer static const int l2_4_count = 6;
    585      1.36    cherry #elif defined(__x86_64__)
    586      1.36    cherry static const int l2_4_count = PTP_LEVELS;
    587       1.6    bouyer #else
    588       1.6    bouyer static const int l2_4_count = PTP_LEVELS - 1;
    589       1.6    bouyer #endif
    590       1.6    bouyer 
    591  1.53.4.3     skrll /*
    592  1.53.4.3     skrll  * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page
    593  1.53.4.3     skrll  * tables.
    594  1.53.4.4     skrll  *
    595  1.53.4.4     skrll  * Virtual address space of the kernel when leaving this function:
    596  1.53.4.4     skrll  * +--------------+------------------+-------------+------------+---------------
    597  1.53.4.4     skrll  * | KERNEL IMAGE | BOOTSTRAP TABLES | PROC0 UAREA | DUMMY PAGE | HYPER. SHARED
    598  1.53.4.4     skrll  * +--------------+------------------+-------------+------------+---------------
    599  1.53.4.4     skrll  *
    600  1.53.4.4     skrll  * ------+-----------------+-------------+
    601  1.53.4.4     skrll  *  INFO | EARLY ZERO PAGE | ISA I/O MEM |
    602  1.53.4.4     skrll  * ------+-----------------+-------------+
    603  1.53.4.4     skrll  *
    604  1.53.4.4     skrll  * DUMMY PAGE is either a PDG for amd64 or a GDT for i386.
    605  1.53.4.4     skrll  *
    606  1.53.4.4     skrll  * (HYPER. SHARED INFO + EARLY ZERO PAGE + ISA I/O MEM) have no physical
    607  1.53.4.4     skrll  * addresses preallocated.
    608  1.53.4.3     skrll  */
    609       1.2    bouyer vaddr_t
    610  1.53.4.3     skrll xen_locore(void)
    611       1.2    bouyer {
    612  1.53.4.3     skrll 	size_t count, oldcount, mapsize;
    613       1.2    bouyer 	vaddr_t bootstrap_tables, init_tables;
    614  1.53.4.4     skrll 	u_int descs[4];
    615       1.2    bouyer 
    616  1.53.4.1     skrll 	xen_init_features();
    617  1.53.4.1     skrll 
    618  1.53.4.2     skrll 	memset(xpq_idx_array, 0, sizeof(xpq_idx_array));
    619      1.35    cherry 
    620       1.6    bouyer 	xpmap_phys_to_machine_mapping =
    621       1.6    bouyer 	    (unsigned long *)xen_start_info.mfn_list;
    622       1.2    bouyer 
    623  1.53.4.4     skrll 	/* Set the NX/XD bit, if available. descs[3] = %edx. */
    624  1.53.4.4     skrll 	x86_cpuid(0x80000001, descs);
    625  1.53.4.4     skrll 	xpmap_pg_nx = (descs[3] & CPUID_NOX) ? PG_NX : 0;
    626  1.53.4.4     skrll 
    627       1.2    bouyer 	/* Space after Xen boostrap tables should be free */
    628  1.53.4.3     skrll 	init_tables = xen_start_info.pt_base;
    629  1.53.4.3     skrll 	bootstrap_tables = init_tables +
    630  1.53.4.3     skrll 	    (xen_start_info.nr_pt_frames * PAGE_SIZE);
    631       1.2    bouyer 
    632       1.4    bouyer 	/*
    633  1.53.4.3     skrll 	 * Calculate how much space we need. First, everything mapped before
    634  1.53.4.3     skrll 	 * the Xen bootstrap tables.
    635       1.4    bouyer 	 */
    636       1.4    bouyer 	mapsize = init_tables - KERNTEXTOFF;
    637       1.4    bouyer 	/* after the tables we'll have:
    638       1.4    bouyer 	 *  - UAREA
    639       1.4    bouyer 	 *  - dummy user PGD (x86_64)
    640       1.4    bouyer 	 *  - HYPERVISOR_shared_info
    641      1.40    bouyer 	 *  - early_zerop
    642       1.4    bouyer 	 *  - ISA I/O mem (if needed)
    643       1.4    bouyer 	 */
    644  1.53.4.2     skrll 	mapsize += UPAGES * PAGE_SIZE;
    645       1.4    bouyer #ifdef __x86_64__
    646  1.53.4.2     skrll 	mapsize += PAGE_SIZE;
    647       1.4    bouyer #endif
    648  1.53.4.2     skrll 	mapsize += PAGE_SIZE;
    649  1.53.4.2     skrll 	mapsize += PAGE_SIZE;
    650       1.2    bouyer #ifdef DOM0OPS
    651      1.10    cegger 	if (xendomain_is_dom0()) {
    652       1.4    bouyer 		mapsize += IOM_SIZE;
    653       1.4    bouyer 	}
    654       1.4    bouyer #endif
    655       1.4    bouyer 
    656  1.53.4.3     skrll 	/*
    657  1.53.4.3     skrll 	 * At this point, mapsize doesn't include the table size.
    658  1.53.4.3     skrll 	 */
    659       1.4    bouyer #ifdef __x86_64__
    660       1.4    bouyer 	count = TABLE_L2_ENTRIES;
    661       1.4    bouyer #else
    662  1.53.4.3     skrll 	count = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT;
    663  1.53.4.3     skrll #endif
    664  1.53.4.2     skrll 
    665  1.53.4.3     skrll 	/*
    666  1.53.4.3     skrll 	 * Now compute how many L2 pages we need exactly. This is useful only
    667  1.53.4.3     skrll 	 * on i386, since the initial count for amd64 is already enough.
    668  1.53.4.3     skrll 	 */
    669  1.53.4.3     skrll 	while (KERNTEXTOFF + mapsize + (count + l2_4_count) * PAGE_SIZE >
    670  1.53.4.3     skrll 	    KERNBASE + (count << L2_SHIFT)) {
    671       1.4    bouyer 		count++;
    672       1.2    bouyer 	}
    673  1.53.4.3     skrll 
    674  1.53.4.4     skrll #ifdef i386
    675       1.5    bouyer 	/*
    676  1.53.4.3     skrll 	 * One more L2 page: we'll allocate several pages after kva_start
    677       1.5    bouyer 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
    678       1.5    bouyer 	 * counted here. It's not a big issue to allocate one more L2 as
    679       1.5    bouyer 	 * pmap_growkernel() will be called anyway.
    680       1.5    bouyer 	 */
    681       1.5    bouyer 	count++;
    682       1.4    bouyer 	nkptp[1] = count;
    683       1.2    bouyer #endif
    684       1.2    bouyer 
    685       1.4    bouyer 	/*
    686  1.53.4.3     skrll 	 * Install bootstrap pages. We may need more L2 pages than will
    687  1.53.4.3     skrll 	 * have the final table here, as it's installed after the final table.
    688       1.4    bouyer 	 */
    689       1.4    bouyer 	oldcount = count;
    690       1.4    bouyer 
    691       1.4    bouyer bootstrap_again:
    692  1.53.4.3     skrll 
    693  1.53.4.2     skrll 	/*
    694       1.2    bouyer 	 * Xen space we'll reclaim may not be enough for our new page tables,
    695  1.53.4.3     skrll 	 * move bootstrap tables if necessary.
    696       1.2    bouyer 	 */
    697       1.4    bouyer 	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
    698       1.2    bouyer 		bootstrap_tables = init_tables +
    699  1.53.4.3     skrll 		    ((count + l2_4_count) * PAGE_SIZE);
    700  1.53.4.3     skrll 
    701  1.53.4.3     skrll 	/*
    702  1.53.4.3     skrll 	 * Make sure the number of L2 pages we have is enough to map everything
    703  1.53.4.3     skrll 	 * from KERNBASE to the bootstrap tables themselves.
    704  1.53.4.3     skrll 	 */
    705  1.53.4.2     skrll 	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
    706  1.53.4.3     skrll 	    KERNBASE + (oldcount << L2_SHIFT)) {
    707       1.4    bouyer 		oldcount++;
    708       1.4    bouyer 		goto bootstrap_again;
    709       1.4    bouyer 	}
    710       1.2    bouyer 
    711       1.2    bouyer 	/* Create temporary tables */
    712  1.53.4.3     skrll 	xen_bootstrap_tables(init_tables, bootstrap_tables,
    713  1.53.4.3     skrll 	    xen_start_info.nr_pt_frames, oldcount, false);
    714       1.2    bouyer 
    715       1.2    bouyer 	/* Create final tables */
    716       1.2    bouyer 	xen_bootstrap_tables(bootstrap_tables, init_tables,
    717  1.53.4.3     skrll 	    oldcount + l2_4_count, count, true);
    718       1.2    bouyer 
    719  1.53.4.4     skrll 	/* Zero out PROC0 UAREA and DUMMY PAGE. */
    720       1.4    bouyer 	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
    721  1.53.4.2     skrll 	    (UPAGES + 1) * PAGE_SIZE);
    722      1.28     rmind 
    723      1.28     rmind 	/* Finally, flush TLB. */
    724      1.28     rmind 	xpq_queue_tlb_flush();
    725      1.28     rmind 
    726       1.4    bouyer 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
    727       1.2    bouyer }
    728       1.2    bouyer 
    729       1.2    bouyer /*
    730  1.53.4.2     skrll  * Build a new table and switch to it.
    731  1.53.4.2     skrll  * old_count is # of old tables (including PGD, PDTPE and PDE).
    732  1.53.4.2     skrll  * new_count is # of new tables (PTE only).
    733  1.53.4.2     skrll  * We assume the areas don't overlap.
    734       1.2    bouyer  */
    735       1.2    bouyer static void
    736  1.53.4.3     skrll xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
    737  1.53.4.3     skrll     size_t new_count, bool final)
    738       1.2    bouyer {
    739       1.2    bouyer 	pd_entry_t *pdtpe, *pde, *pte;
    740      1.50       mrg 	pd_entry_t *bt_pgd;
    741       1.6    bouyer 	paddr_t addr;
    742  1.53.4.2     skrll 	vaddr_t page, avail, map_end;
    743       1.2    bouyer 	int i;
    744  1.53.4.2     skrll 	extern char __rodata_start;
    745       1.2    bouyer 	extern char __data_start;
    746  1.53.4.2     skrll 	extern char __kernel_end;
    747      1.40    bouyer 	extern char *early_zerop; /* from pmap.c */
    748  1.53.4.2     skrll 
    749       1.2    bouyer 	/*
    750  1.53.4.3     skrll 	 * Layout of RW area after the kernel image:
    751  1.53.4.2     skrll 	 *     xencons_interface (if present)
    752  1.53.4.2     skrll 	 *     xenstore_interface (if present)
    753  1.53.4.2     skrll 	 *     table pages (new_count + l2_4_count entries)
    754  1.53.4.3     skrll 	 * Extra mappings (only when final is true):
    755  1.53.4.2     skrll 	 *     UAREA
    756  1.53.4.3     skrll 	 *     dummy user PGD (x86_64 only) / GDT page (i386 only)
    757  1.53.4.2     skrll 	 *     HYPERVISOR_shared_info
    758  1.53.4.2     skrll 	 *     early_zerop
    759  1.53.4.2     skrll 	 *     ISA I/O mem (if needed)
    760       1.2    bouyer 	 */
    761  1.53.4.2     skrll 	map_end = new_pgd + ((new_count + l2_4_count) * PAGE_SIZE);
    762       1.2    bouyer 	if (final) {
    763  1.53.4.4     skrll 		map_end += UPAGES * PAGE_SIZE;
    764  1.53.4.4     skrll 		xen_dummy_page = (vaddr_t)map_end;
    765  1.53.4.4     skrll 		map_end += PAGE_SIZE;
    766       1.4    bouyer 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
    767  1.53.4.2     skrll 		map_end += PAGE_SIZE;
    768      1.40    bouyer 		early_zerop = (char *)map_end;
    769  1.53.4.2     skrll 		map_end += PAGE_SIZE;
    770       1.2    bouyer 	}
    771  1.53.4.2     skrll 
    772       1.4    bouyer 	/*
    773  1.53.4.3     skrll 	 * We always set atdevbase, as it's used by init386 to find the first
    774       1.4    bouyer 	 * available VA. map_end is updated only if we are dom0, so
    775       1.4    bouyer 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
    776       1.4    bouyer 	 * this case.
    777       1.4    bouyer 	 */
    778  1.53.4.3     skrll 	if (final) {
    779       1.4    bouyer 		atdevbase = map_end;
    780       1.2    bouyer #ifdef DOM0OPS
    781  1.53.4.3     skrll 		if (xendomain_is_dom0()) {
    782  1.53.4.3     skrll 			/* ISA I/O mem */
    783  1.53.4.3     skrll 			map_end += IOM_SIZE;
    784  1.53.4.3     skrll 		}
    785  1.53.4.3     skrll #endif
    786       1.2    bouyer 	}
    787       1.2    bouyer 
    788  1.53.4.2     skrll 	__PRINTK(("xen_bootstrap_tables map_end 0x%lx\n", map_end));
    789      1.19       jym 	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
    790      1.19       jym 	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
    791       1.2    bouyer 
    792  1.53.4.2     skrll 	/*
    793  1.53.4.2     skrll 	 * Create bootstrap page tables. What we need:
    794       1.2    bouyer 	 * - a PGD (level 4)
    795       1.2    bouyer 	 * - a PDTPE (level 3)
    796  1.53.4.2     skrll 	 * - a PDE (level 2)
    797       1.2    bouyer 	 * - some PTEs (level 1)
    798       1.2    bouyer 	 */
    799  1.53.4.2     skrll 
    800  1.53.4.2     skrll 	bt_pgd = (pd_entry_t *)new_pgd;
    801  1.53.4.2     skrll 	memset(bt_pgd, 0, PAGE_SIZE);
    802       1.2    bouyer 	avail = new_pgd + PAGE_SIZE;
    803  1.53.4.2     skrll 
    804       1.4    bouyer #if PTP_LEVELS > 3
    805  1.53.4.3     skrll 	/* Per-cpu L4 */
    806      1.36    cherry 	pd_entry_t *bt_cpu_pgd = bt_pgd;
    807  1.53.4.3     skrll 	/* pmap_kernel() "shadow" L4 */
    808  1.53.4.2     skrll 	bt_pgd = (pd_entry_t *)avail;
    809      1.36    cherry 	memset(bt_pgd, 0, PAGE_SIZE);
    810      1.36    cherry 	avail += PAGE_SIZE;
    811      1.36    cherry 
    812  1.53.4.3     skrll 	/* Install L3 */
    813  1.53.4.2     skrll 	pdtpe = (pd_entry_t *)avail;
    814  1.53.4.2     skrll 	memset(pdtpe, 0, PAGE_SIZE);
    815       1.2    bouyer 	avail += PAGE_SIZE;
    816       1.2    bouyer 
    817  1.53.4.2     skrll 	addr = ((u_long)pdtpe) - KERNBASE;
    818      1.36    cherry 	bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] =
    819  1.53.4.3     skrll 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
    820       1.4    bouyer #else
    821       1.4    bouyer 	pdtpe = bt_pgd;
    822  1.53.4.3     skrll #endif
    823       1.2    bouyer 
    824       1.4    bouyer #if PTP_LEVELS > 2
    825       1.2    bouyer 	/* Level 2 */
    826  1.53.4.2     skrll 	pde = (pd_entry_t *)avail;
    827       1.2    bouyer 	memset(pde, 0, PAGE_SIZE);
    828       1.2    bouyer 	avail += PAGE_SIZE;
    829       1.2    bouyer 
    830  1.53.4.2     skrll 	addr = ((u_long)pde) - KERNBASE;
    831       1.2    bouyer 	pdtpe[pl3_pi(KERNTEXTOFF)] =
    832       1.6    bouyer 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
    833       1.6    bouyer #elif defined(PAE)
    834  1.53.4.4     skrll 	/*
    835  1.53.4.4     skrll 	 * Our PAE-style level 2, 5 contiguous pages (4 L2 + 1 shadow).
    836  1.53.4.4     skrll 	 *                  +-----------------+----------------+---------+
    837  1.53.4.4     skrll 	 * Physical layout: | 3 * USERLAND L2 | L2 KERN SHADOW | L2 KERN |
    838  1.53.4.4     skrll 	 *                  +-----------------+----------------+---------+
    839  1.53.4.4     skrll 	 * However, we enter pdtpte[3] into L2 KERN, and not L2 KERN SHADOW.
    840  1.53.4.4     skrll 	 * This way, pde[L2_SLOT_KERN] always points to the shadow.
    841  1.53.4.4     skrll 	 */
    842  1.53.4.2     skrll 	pde = (pd_entry_t *)avail;
    843       1.6    bouyer 	memset(pde, 0, PAGE_SIZE * 5);
    844       1.6    bouyer 	avail += PAGE_SIZE * 5;
    845  1.53.4.3     skrll 
    846       1.6    bouyer 	/*
    847  1.53.4.4     skrll 	 * Link L2 pages in L3, with a special case for L2 KERN. Xen doesn't
    848  1.53.4.4     skrll 	 * want RW permissions in L3 entries, it'll add them itself.
    849       1.6    bouyer 	 */
    850  1.53.4.4     skrll 	addr = ((u_long)pde) - KERNBASE;
    851       1.6    bouyer 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    852       1.6    bouyer 		pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    853       1.6    bouyer 	}
    854       1.6    bouyer 	addr += PAGE_SIZE;
    855       1.6    bouyer 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    856  1.53.4.3     skrll #else
    857       1.4    bouyer 	pde = bt_pgd;
    858  1.53.4.3     skrll #endif
    859       1.2    bouyer 
    860       1.2    bouyer 	/* Level 1 */
    861       1.2    bouyer 	page = KERNTEXTOFF;
    862       1.2    bouyer 	for (i = 0; i < new_count; i ++) {
    863       1.6    bouyer 		vaddr_t cur_page = page;
    864       1.2    bouyer 
    865  1.53.4.2     skrll 		pte = (pd_entry_t *)avail;
    866       1.2    bouyer 		avail += PAGE_SIZE;
    867       1.2    bouyer 
    868       1.2    bouyer 		memset(pte, 0, PAGE_SIZE);
    869  1.53.4.2     skrll 		while (pl2_pi(page) == pl2_pi(cur_page)) {
    870       1.2    bouyer 			if (page >= map_end) {
    871       1.2    bouyer 				/* not mapped at all */
    872       1.2    bouyer 				pte[pl1_pi(page)] = 0;
    873       1.2    bouyer 				page += PAGE_SIZE;
    874       1.2    bouyer 				continue;
    875       1.2    bouyer 			}
    876       1.2    bouyer 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
    877       1.2    bouyer 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
    878       1.2    bouyer 				pte[pl1_pi(page)] = xen_start_info.shared_info;
    879       1.2    bouyer 			}
    880       1.7    bouyer 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    881      1.12    cegger 			    == xen_start_info.console.domU.mfn) {
    882       1.2    bouyer 				xencons_interface = (void *)page;
    883      1.19       jym 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
    884       1.6    bouyer 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    885       1.2    bouyer 			}
    886       1.7    bouyer 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    887       1.7    bouyer 			    == xen_start_info.store_mfn) {
    888       1.2    bouyer 				xenstore_interface = (void *)page;
    889       1.6    bouyer 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
    890       1.6    bouyer 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    891       1.2    bouyer 			}
    892       1.2    bouyer #ifdef DOM0OPS
    893       1.2    bouyer 			if (page >= (vaddr_t)atdevbase &&
    894       1.2    bouyer 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
    895       1.2    bouyer 				pte[pl1_pi(page)] =
    896       1.2    bouyer 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
    897  1.53.4.4     skrll 				pte[pl1_pi(page)] |= xpmap_pg_nx;
    898       1.2    bouyer 			}
    899       1.2    bouyer #endif
    900  1.53.4.2     skrll 
    901       1.4    bouyer 			pte[pl1_pi(page)] |= PG_k | PG_V;
    902  1.53.4.2     skrll 			if (page < (vaddr_t)&__rodata_start) {
    903  1.53.4.2     skrll 				/* Map the kernel text RX. */
    904  1.53.4.2     skrll 				pte[pl1_pi(page)] |= PG_RO;
    905  1.53.4.2     skrll 			} else if (page >= (vaddr_t)&__rodata_start &&
    906  1.53.4.2     skrll 			    page < (vaddr_t)&__data_start) {
    907  1.53.4.2     skrll 				/* Map the kernel rodata R. */
    908  1.53.4.4     skrll 				pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
    909  1.53.4.2     skrll 			} else if (page >= old_pgd &&
    910  1.53.4.2     skrll 			    page < old_pgd + (old_count * PAGE_SIZE)) {
    911  1.53.4.2     skrll 				/* Map the old page tables R. */
    912  1.53.4.4     skrll 				pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
    913       1.2    bouyer 			} else if (page >= new_pgd &&
    914       1.6    bouyer 			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
    915  1.53.4.2     skrll 				/* Map the new page tables R. */
    916  1.53.4.4     skrll 				pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
    917      1.41    cherry #ifdef i386
    918      1.41    cherry 			} else if (page == (vaddr_t)tmpgdt) {
    919      1.41    cherry 				/*
    920  1.53.4.3     skrll 				 * Map bootstrap gdt R/O. Later, we will re-add
    921  1.53.4.3     skrll 				 * this page to uvm after making it writable.
    922      1.41    cherry 				 */
    923      1.41    cherry 				pte[pl1_pi(page)] = 0;
    924      1.41    cherry 				page += PAGE_SIZE;
    925      1.41    cherry 				continue;
    926  1.53.4.3     skrll #endif
    927  1.53.4.2     skrll 			} else if (page >= (vaddr_t)&__data_start &&
    928  1.53.4.2     skrll 			    page < (vaddr_t)&__kernel_end) {
    929  1.53.4.2     skrll 				/* Map the kernel data+bss RW. */
    930  1.53.4.4     skrll 				pte[pl1_pi(page)] |= PG_RW | xpmap_pg_nx;
    931       1.2    bouyer 			} else {
    932  1.53.4.3     skrll 				/* Map the page RW. */
    933  1.53.4.4     skrll 				pte[pl1_pi(page)] |= PG_RW | xpmap_pg_nx;
    934       1.2    bouyer 			}
    935  1.53.4.2     skrll 
    936       1.2    bouyer 			page += PAGE_SIZE;
    937       1.2    bouyer 		}
    938       1.2    bouyer 
    939  1.53.4.3     skrll 		addr = ((u_long)pte) - KERNBASE;
    940       1.2    bouyer 		pde[pl2_pi(cur_page)] =
    941       1.4    bouyer 		    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    942  1.53.4.3     skrll 
    943       1.2    bouyer 		/* Mark readonly */
    944  1.53.4.3     skrll 		xen_bt_set_readonly((vaddr_t)pte);
    945       1.2    bouyer 	}
    946       1.2    bouyer 
    947       1.2    bouyer 	/* Install recursive page tables mapping */
    948       1.6    bouyer #ifdef PAE
    949  1.53.4.4     skrll 	/* Copy L2 KERN into L2 KERN SHADOW, and reference the latter in cpu0. */
    950       1.6    bouyer 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
    951      1.36    cherry 	cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG];
    952      1.36    cherry 	cpu_info_primary.ci_kpm_pdirpa =
    953  1.53.4.4     skrll 	    (vaddr_t)cpu_info_primary.ci_kpm_pdir - KERNBASE;
    954       1.6    bouyer 
    955       1.6    bouyer 	/*
    956  1.53.4.3     skrll 	 * We don't enter a recursive entry from the L3 PD. Instead, we enter
    957  1.53.4.3     skrll 	 * the first 4 L2 pages, which includes the kernel's L2 shadow. But we
    958  1.53.4.3     skrll 	 * have to enter the shadow after switching %cr3, or Xen will refcount
    959  1.53.4.3     skrll 	 * some PTEs with the wrong type.
    960       1.6    bouyer 	 */
    961       1.6    bouyer 	addr = (u_long)pde - KERNBASE;
    962       1.6    bouyer 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    963  1.53.4.3     skrll 		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V |
    964  1.53.4.4     skrll 		    xpmap_pg_nx;
    965       1.6    bouyer 	}
    966  1.53.4.4     skrll 
    967  1.53.4.4     skrll 	/* Mark tables RO, and pin L2 KERN SHADOW. */
    968       1.6    bouyer 	addr = (u_long)pde - KERNBASE;
    969       1.6    bouyer 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
    970       1.6    bouyer 		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
    971       1.6    bouyer 	}
    972       1.6    bouyer 	if (final) {
    973       1.6    bouyer 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
    974      1.24       jym 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    975       1.6    bouyer 	}
    976       1.6    bouyer #else /* PAE */
    977  1.53.4.2     skrll 
    978  1.53.4.2     skrll 	/* Recursive entry in pmap_kernel(). */
    979  1.53.4.2     skrll 	bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE)
    980  1.53.4.4     skrll 	    | PG_k | PG_RO | PG_V | xpmap_pg_nx;
    981      1.36    cherry #ifdef __x86_64__
    982  1.53.4.2     skrll 	/* Recursive entry in higher-level per-cpu PD. */
    983  1.53.4.2     skrll 	bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE)
    984  1.53.4.4     skrll 	    | PG_k | PG_RO | PG_V | xpmap_pg_nx;
    985  1.53.4.2     skrll #endif
    986  1.53.4.2     skrll 
    987       1.2    bouyer 	/* Mark tables RO */
    988  1.53.4.3     skrll 	xen_bt_set_readonly((vaddr_t)pde);
    989  1.53.4.4     skrll #endif /* PAE */
    990  1.53.4.4     skrll 
    991       1.6    bouyer #if PTP_LEVELS > 2 || defined(PAE)
    992  1.53.4.3     skrll 	xen_bt_set_readonly((vaddr_t)pdtpe);
    993       1.4    bouyer #endif
    994       1.4    bouyer #if PTP_LEVELS > 3
    995       1.2    bouyer 	xen_bt_set_readonly(new_pgd);
    996       1.4    bouyer #endif
    997  1.53.4.2     skrll 
    998       1.2    bouyer 	/* Pin the PGD */
    999      1.24       jym #ifdef __x86_64__
   1000      1.24       jym 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
   1001      1.24       jym #elif PAE
   1002       1.6    bouyer 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
   1003       1.6    bouyer #else
   1004      1.24       jym 	xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
   1005       1.6    bouyer #endif
   1006      1.21       jym 
   1007       1.4    bouyer 	/* Save phys. addr of PDP, for libkvm. */
   1008       1.6    bouyer #ifdef PAE
   1009      1.21       jym 	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
   1010      1.21       jym #else
   1011      1.36    cherry 	PDPpaddr = (u_long)bt_pgd - KERNBASE;
   1012      1.21       jym #endif
   1013      1.21       jym 
   1014       1.2    bouyer 	/* Switch to new tables */
   1015       1.2    bouyer 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
   1016      1.21       jym 
   1017       1.6    bouyer #ifdef PAE
   1018       1.6    bouyer 	if (final) {
   1019  1.53.4.3     skrll 		/* Save the address of the L3 page */
   1020      1.21       jym 		cpu_info_primary.ci_pae_l3_pdir = pdtpe;
   1021      1.21       jym 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
   1022      1.21       jym 
   1023  1.53.4.3     skrll 		/* Now enter the kernel's PTE mappings */
   1024  1.53.4.3     skrll 		addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
   1025       1.6    bouyer 		xpq_queue_pte_update(
   1026  1.53.4.2     skrll 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
   1027       1.6    bouyer 		    xpmap_ptom_masked(addr) | PG_k | PG_V);
   1028       1.6    bouyer 		xpq_flush_queue();
   1029       1.6    bouyer 	}
   1030      1.36    cherry #elif defined(__x86_64__)
   1031      1.36    cherry 	if (final) {
   1032  1.53.4.3     skrll 		/* Save the address of the real per-cpu L4 page. */
   1033      1.36    cherry 		cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd;
   1034  1.53.4.3     skrll 		cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)bt_cpu_pgd - KERNBASE);
   1035      1.36    cherry 	}
   1036       1.6    bouyer #endif
   1037      1.51  christos 	__USE(pdtpe);
   1038       1.6    bouyer 
   1039  1.53.4.3     skrll 	/*
   1040  1.53.4.3     skrll 	 * Now we can safely reclaim the space taken by the old tables.
   1041  1.53.4.3     skrll 	 */
   1042  1.53.4.2     skrll 
   1043       1.2    bouyer 	/* Unpin old PGD */
   1044       1.2    bouyer 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
   1045  1.53.4.3     skrll 
   1046       1.2    bouyer 	/* Mark old tables RW */
   1047       1.2    bouyer 	page = old_pgd;
   1048  1.53.4.3     skrll 	addr = xpmap_mtop((paddr_t)pde[pl2_pi(page)] & PG_FRAME);
   1049  1.53.4.3     skrll 	pte = (pd_entry_t *)((u_long)addr + KERNBASE);
   1050       1.2    bouyer 	pte += pl1_pi(page);
   1051       1.2    bouyer 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
   1052  1.53.4.3     skrll 		addr = xpmap_ptom(((u_long)pte) - KERNBASE);
   1053       1.6    bouyer 		xpq_queue_pte_update(addr, *pte | PG_RW);
   1054       1.2    bouyer 		page += PAGE_SIZE;
   1055  1.53.4.2     skrll 		/*
   1056  1.53.4.2     skrll 		 * Our PTEs are contiguous so it's safe to just "++" here.
   1057       1.2    bouyer 		 */
   1058       1.2    bouyer 		pte++;
   1059       1.2    bouyer 	}
   1060       1.2    bouyer 	xpq_flush_queue();
   1061       1.2    bouyer }
   1062       1.2    bouyer 
   1063       1.2    bouyer /*
   1064  1.53.4.4     skrll  * Mark a page read-only, assuming vaddr = paddr + KERNBASE.
   1065       1.2    bouyer  */
   1066       1.2    bouyer static void
   1067  1.53.4.3     skrll xen_bt_set_readonly(vaddr_t page)
   1068       1.2    bouyer {
   1069       1.2    bouyer 	pt_entry_t entry;
   1070       1.2    bouyer 
   1071       1.2    bouyer 	entry = xpmap_ptom_masked(page - KERNBASE);
   1072  1.53.4.4     skrll 	entry |= PG_k | PG_V | xpmap_pg_nx;
   1073       1.2    bouyer 
   1074  1.53.4.3     skrll 	HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG);
   1075       1.2    bouyer }
   1076       1.4    bouyer 
   1077       1.4    bouyer #ifdef __x86_64__
   1078       1.4    bouyer void
   1079       1.4    bouyer xen_set_user_pgd(paddr_t page)
   1080       1.4    bouyer {
   1081       1.4    bouyer 	struct mmuext_op op;
   1082       1.4    bouyer 	int s = splvm();
   1083       1.4    bouyer 
   1084       1.4    bouyer 	xpq_flush_queue();
   1085       1.4    bouyer 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
   1086      1.46       jym 	op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT;
   1087  1.53.4.3     skrll 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
   1088       1.4    bouyer 		panic("xen_set_user_pgd: failed to install new user page"
   1089      1.19       jym 			" directory %#" PRIxPADDR, page);
   1090       1.4    bouyer 	splx(s);
   1091       1.4    bouyer }
   1092       1.4    bouyer #endif /* __x86_64__ */
   1093