Home | History | Annotate | Line # | Download | only in x86
x86_xpmap.c revision 1.67
      1  1.64      maxv /*	$NetBSD: x86_xpmap.c,v 1.67 2016/11/15 17:01:12 maxv Exp $	*/
      2   1.2    bouyer 
      3   1.2    bouyer /*
      4   1.2    bouyer  * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
      5   1.2    bouyer  *
      6   1.2    bouyer  * Permission to use, copy, modify, and distribute this software for any
      7   1.2    bouyer  * purpose with or without fee is hereby granted, provided that the above
      8   1.2    bouyer  * copyright notice and this permission notice appear in all copies.
      9   1.2    bouyer  *
     10   1.2    bouyer  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     11   1.2    bouyer  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     12   1.2    bouyer  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     13   1.2    bouyer  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     14   1.2    bouyer  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     15   1.2    bouyer  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     16   1.2    bouyer  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     17   1.2    bouyer  */
     18   1.2    bouyer 
     19   1.2    bouyer /*
     20   1.2    bouyer  * Copyright (c) 2006, 2007 Manuel Bouyer.
     21   1.2    bouyer  *
     22   1.2    bouyer  * Redistribution and use in source and binary forms, with or without
     23   1.2    bouyer  * modification, are permitted provided that the following conditions
     24   1.2    bouyer  * are met:
     25   1.2    bouyer  * 1. Redistributions of source code must retain the above copyright
     26   1.2    bouyer  *    notice, this list of conditions and the following disclaimer.
     27   1.2    bouyer  * 2. Redistributions in binary form must reproduce the above copyright
     28   1.2    bouyer  *    notice, this list of conditions and the following disclaimer in the
     29   1.2    bouyer  *    documentation and/or other materials provided with the distribution.
     30   1.2    bouyer  *
     31   1.2    bouyer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     32   1.2    bouyer  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     33   1.2    bouyer  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     34   1.2    bouyer  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     35   1.2    bouyer  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36   1.2    bouyer  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     37   1.2    bouyer  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     38   1.2    bouyer  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     39   1.2    bouyer  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     40   1.2    bouyer  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     41   1.2    bouyer  */
     42   1.2    bouyer 
     43   1.2    bouyer /*
     44   1.2    bouyer  * Copyright (c) 2004 Christian Limpach.
     45   1.2    bouyer  * All rights reserved.
     46   1.2    bouyer  *
     47   1.2    bouyer  * Redistribution and use in source and binary forms, with or without
     48   1.2    bouyer  * modification, are permitted provided that the following conditions
     49   1.2    bouyer  * are met:
     50   1.2    bouyer  * 1. Redistributions of source code must retain the above copyright
     51   1.2    bouyer  *    notice, this list of conditions and the following disclaimer.
     52   1.2    bouyer  * 2. Redistributions in binary form must reproduce the above copyright
     53   1.2    bouyer  *    notice, this list of conditions and the following disclaimer in the
     54   1.2    bouyer  *    documentation and/or other materials provided with the distribution.
     55   1.2    bouyer  *
     56   1.2    bouyer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     57   1.2    bouyer  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     58   1.2    bouyer  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     59   1.2    bouyer  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     60   1.2    bouyer  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     61   1.2    bouyer  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     62   1.2    bouyer  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     63   1.2    bouyer  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     64   1.2    bouyer  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     65   1.2    bouyer  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     66   1.2    bouyer  */
     67   1.2    bouyer 
     68   1.2    bouyer #include <sys/cdefs.h>
     69  1.64      maxv __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.67 2016/11/15 17:01:12 maxv Exp $");
     70   1.2    bouyer 
     71   1.2    bouyer #include "opt_xen.h"
     72   1.4    bouyer #include "opt_ddb.h"
     73   1.4    bouyer #include "ksyms.h"
     74   1.2    bouyer 
     75   1.2    bouyer #include <sys/param.h>
     76   1.2    bouyer #include <sys/systm.h>
     77  1.38    cherry #include <sys/mutex.h>
     78  1.42    bouyer #include <sys/cpu.h>
     79   1.2    bouyer 
     80   1.2    bouyer #include <uvm/uvm.h>
     81   1.2    bouyer 
     82  1.42    bouyer #include <x86/pmap.h>
     83   1.2    bouyer #include <machine/gdt.h>
     84   1.2    bouyer #include <xen/xenfunc.h>
     85   1.2    bouyer 
     86   1.2    bouyer #include <dev/isa/isareg.h>
     87   1.2    bouyer #include <machine/isa_machdep.h>
     88   1.2    bouyer 
     89   1.2    bouyer #undef	XENDEBUG
     90   1.2    bouyer /* #define XENDEBUG_SYNC */
     91   1.2    bouyer 
     92   1.2    bouyer #ifdef XENDEBUG
     93   1.2    bouyer #define	XENPRINTF(x) printf x
     94   1.2    bouyer #define	XENPRINTK2(x) /* printk x */
     95   1.2    bouyer static char XBUF[256];
     96   1.2    bouyer #else
     97   1.2    bouyer #define	XENPRINTF(x)
     98   1.2    bouyer #define	XENPRINTK2(x)
     99   1.2    bouyer #endif
    100   1.2    bouyer 
    101   1.2    bouyer volatile shared_info_t *HYPERVISOR_shared_info;
    102  1.11       jym /* Xen requires the start_info struct to be page aligned */
    103  1.11       jym union start_info_union start_info_union __aligned(PAGE_SIZE);
    104   1.6    bouyer unsigned long *xpmap_phys_to_machine_mapping;
    105  1.37    cherry kmutex_t pte_lock;
    106   1.2    bouyer 
    107   1.2    bouyer void xen_failsafe_handler(void);
    108   1.2    bouyer 
    109   1.2    bouyer #define HYPERVISOR_mmu_update_self(req, count, success_count) \
    110   1.2    bouyer 	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
    111   1.2    bouyer 
    112  1.64      maxv extern volatile struct xencons_interface *xencons_interface; /* XXX */
    113  1.64      maxv extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
    114  1.64      maxv 
    115  1.64      maxv static void xen_bt_set_readonly(vaddr_t);
    116  1.66      maxv static void xen_bootstrap_tables(vaddr_t, vaddr_t, size_t, size_t, bool);
    117  1.64      maxv 
    118  1.65      maxv vaddr_t xen_locore(void);
    119  1.64      maxv 
    120  1.48    bouyer /*
    121  1.48    bouyer  * kcpuset internally uses an array of uint32_t while xen uses an array of
    122  1.48    bouyer  * u_long. As we're little-endian we can cast one to the other.
    123  1.48    bouyer  */
    124  1.48    bouyer typedef union {
    125  1.48    bouyer #ifdef _LP64
    126  1.48    bouyer 	uint32_t xcpum_km[2];
    127  1.48    bouyer #else
    128  1.48    bouyer 	uint32_t xcpum_km[1];
    129  1.64      maxv #endif
    130  1.64      maxv 	u_long xcpum_xm;
    131  1.48    bouyer } xcpumask_t;
    132  1.48    bouyer 
    133   1.2    bouyer void
    134   1.2    bouyer xen_failsafe_handler(void)
    135   1.2    bouyer {
    136   1.2    bouyer 
    137   1.2    bouyer 	panic("xen_failsafe_handler called!\n");
    138   1.2    bouyer }
    139   1.2    bouyer 
    140   1.2    bouyer void
    141   1.2    bouyer xen_set_ldt(vaddr_t base, uint32_t entries)
    142   1.2    bouyer {
    143   1.2    bouyer 	vaddr_t va;
    144   1.2    bouyer 	vaddr_t end;
    145   1.4    bouyer 	pt_entry_t *ptp;
    146   1.2    bouyer 	int s;
    147   1.2    bouyer 
    148   1.2    bouyer #ifdef __x86_64__
    149   1.2    bouyer 	end = base + (entries << 3);
    150   1.2    bouyer #else
    151   1.2    bouyer 	end = base + entries * sizeof(union descriptor);
    152   1.2    bouyer #endif
    153   1.2    bouyer 
    154   1.2    bouyer 	for (va = base; va < end; va += PAGE_SIZE) {
    155   1.2    bouyer 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    156   1.2    bouyer 		ptp = kvtopte(va);
    157  1.19       jym 		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
    158  1.19       jym 		    base, entries, ptp));
    159   1.4    bouyer 		pmap_pte_clearbits(ptp, PG_RW);
    160   1.2    bouyer 	}
    161   1.2    bouyer 	s = splvm();
    162   1.2    bouyer 	xpq_queue_set_ldt(base, entries);
    163   1.2    bouyer 	splx(s);
    164   1.2    bouyer }
    165   1.2    bouyer 
    166   1.2    bouyer #ifdef XENDEBUG
    167   1.2    bouyer void xpq_debug_dump(void);
    168   1.2    bouyer #endif
    169   1.2    bouyer 
    170   1.2    bouyer #define XPQUEUE_SIZE 2048
    171  1.35    cherry static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
    172  1.35    cherry static int xpq_idx_array[MAXCPUS];
    173  1.30    cherry 
    174  1.41    cherry #ifdef i386
    175  1.41    cherry extern union descriptor tmpgdt[];
    176  1.66      maxv #endif
    177  1.66      maxv 
    178   1.2    bouyer void
    179  1.35    cherry xpq_flush_queue(void)
    180  1.30    cherry {
    181  1.35    cherry 	int i, ok = 0, ret;
    182  1.30    cherry 
    183  1.41    cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    184  1.41    cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    185   1.2    bouyer 
    186   1.2    bouyer 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
    187   1.2    bouyer 	for (i = 0; i < xpq_idx; i++)
    188  1.19       jym 		XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
    189  1.19       jym 		    xpq_queue[i].ptr, xpq_queue[i].val));
    190  1.23       jym 
    191  1.35    cherry retry:
    192  1.23       jym 	ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
    193  1.23       jym 
    194  1.23       jym 	if (xpq_idx != 0 && ret < 0) {
    195  1.39    bouyer 		struct cpu_info *ci;
    196  1.39    bouyer 		CPU_INFO_ITERATOR cii;
    197  1.39    bouyer 
    198  1.39    bouyer 		printf("xpq_flush_queue: %d entries (%d successful) on "
    199  1.39    bouyer 		    "cpu%d (%ld)\n",
    200  1.41    cherry 		    xpq_idx, ok, curcpu()->ci_index, curcpu()->ci_cpuid);
    201  1.35    cherry 
    202  1.35    cherry 		if (ok != 0) {
    203  1.35    cherry 			xpq_queue += ok;
    204  1.35    cherry 			xpq_idx -= ok;
    205  1.35    cherry 			ok = 0;
    206  1.35    cherry 			goto retry;
    207  1.35    cherry 		}
    208  1.35    cherry 
    209  1.39    bouyer 		for (CPU_INFO_FOREACH(cii, ci)) {
    210  1.39    bouyer 			xpq_queue = xpq_queue_array[ci->ci_cpuid];
    211  1.39    bouyer 			xpq_idx = xpq_idx_array[ci->ci_cpuid];
    212  1.39    bouyer 			printf("cpu%d (%ld):\n", ci->ci_index, ci->ci_cpuid);
    213  1.39    bouyer 			for (i = 0; i < xpq_idx; i++) {
    214  1.39    bouyer 				printf("  0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
    215  1.39    bouyer 				   xpq_queue[i].ptr, xpq_queue[i].val);
    216  1.39    bouyer 			}
    217  1.39    bouyer #ifdef __x86_64__
    218  1.39    bouyer 			for (i = 0; i < PDIR_SLOT_PTE; i++) {
    219  1.39    bouyer 				if (ci->ci_kpm_pdir[i] == 0)
    220  1.39    bouyer 					continue;
    221  1.39    bouyer 				printf(" kpm_pdir[%d]: 0x%" PRIx64 "\n",
    222  1.39    bouyer 				    i, ci->ci_kpm_pdir[i]);
    223  1.39    bouyer 			}
    224  1.39    bouyer #endif
    225  1.39    bouyer 		}
    226  1.23       jym 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
    227   1.2    bouyer 	}
    228  1.41    cherry 	xpq_idx_array[curcpu()->ci_cpuid] = 0;
    229   1.2    bouyer }
    230   1.2    bouyer 
    231   1.2    bouyer static inline void
    232   1.2    bouyer xpq_increment_idx(void)
    233   1.2    bouyer {
    234   1.2    bouyer 
    235  1.41    cherry 	if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE))
    236   1.2    bouyer 		xpq_flush_queue();
    237   1.2    bouyer }
    238   1.2    bouyer 
    239   1.2    bouyer void
    240   1.2    bouyer xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
    241   1.2    bouyer {
    242  1.35    cherry 
    243  1.41    cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    244  1.41    cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    245  1.35    cherry 
    246   1.6    bouyer 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
    247   1.6    bouyer 	    "\n", (int64_t)ma, (int64_t)pa));
    248  1.35    cherry 
    249   1.2    bouyer 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
    250  1.45       jym 	xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT;
    251   1.2    bouyer 	xpq_increment_idx();
    252   1.2    bouyer #ifdef XENDEBUG_SYNC
    253   1.2    bouyer 	xpq_flush_queue();
    254   1.2    bouyer #endif
    255   1.2    bouyer }
    256   1.2    bouyer 
    257   1.2    bouyer void
    258   1.6    bouyer xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
    259   1.2    bouyer {
    260   1.2    bouyer 
    261  1.41    cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    262  1.41    cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    263  1.35    cherry 
    264   1.6    bouyer 	KASSERT((ptr & 3) == 0);
    265   1.2    bouyer 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
    266   1.2    bouyer 	xpq_queue[xpq_idx].val = val;
    267   1.2    bouyer 	xpq_increment_idx();
    268   1.2    bouyer #ifdef XENDEBUG_SYNC
    269   1.2    bouyer 	xpq_flush_queue();
    270   1.2    bouyer #endif
    271   1.2    bouyer }
    272   1.2    bouyer 
    273   1.2    bouyer void
    274   1.2    bouyer xpq_queue_pt_switch(paddr_t pa)
    275   1.2    bouyer {
    276   1.2    bouyer 	struct mmuext_op op;
    277   1.2    bouyer 	xpq_flush_queue();
    278   1.2    bouyer 
    279   1.6    bouyer 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    280   1.6    bouyer 	    (int64_t)pa, (int64_t)pa));
    281   1.2    bouyer 	op.cmd = MMUEXT_NEW_BASEPTR;
    282   1.2    bouyer 	op.arg1.mfn = pa >> PAGE_SHIFT;
    283   1.2    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    284   1.2    bouyer 		panic("xpq_queue_pt_switch");
    285   1.2    bouyer }
    286   1.2    bouyer 
    287   1.2    bouyer void
    288  1.24       jym xpq_queue_pin_table(paddr_t pa, int lvl)
    289   1.2    bouyer {
    290   1.2    bouyer 	struct mmuext_op op;
    291  1.29    cherry 
    292   1.2    bouyer 	xpq_flush_queue();
    293   1.2    bouyer 
    294  1.24       jym 	XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
    295  1.24       jym 	    lvl + 1, pa));
    296   1.2    bouyer 
    297   1.6    bouyer 	op.arg1.mfn = pa >> PAGE_SHIFT;
    298  1.24       jym 	op.cmd = lvl;
    299   1.6    bouyer 
    300   1.6    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    301   1.6    bouyer 		panic("xpq_queue_pin_table");
    302   1.6    bouyer }
    303   1.6    bouyer 
    304   1.2    bouyer void
    305   1.2    bouyer xpq_queue_unpin_table(paddr_t pa)
    306   1.2    bouyer {
    307   1.2    bouyer 	struct mmuext_op op;
    308  1.29    cherry 
    309   1.2    bouyer 	xpq_flush_queue();
    310   1.2    bouyer 
    311  1.24       jym 	XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
    312   1.2    bouyer 	op.arg1.mfn = pa >> PAGE_SHIFT;
    313   1.2    bouyer 	op.cmd = MMUEXT_UNPIN_TABLE;
    314   1.2    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    315   1.2    bouyer 		panic("xpq_queue_unpin_table");
    316   1.2    bouyer }
    317   1.2    bouyer 
    318   1.2    bouyer void
    319   1.2    bouyer xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    320   1.2    bouyer {
    321   1.2    bouyer 	struct mmuext_op op;
    322  1.29    cherry 
    323   1.2    bouyer 	xpq_flush_queue();
    324   1.2    bouyer 
    325   1.2    bouyer 	XENPRINTK2(("xpq_queue_set_ldt\n"));
    326   1.2    bouyer 	KASSERT(va == (va & ~PAGE_MASK));
    327   1.2    bouyer 	op.cmd = MMUEXT_SET_LDT;
    328   1.2    bouyer 	op.arg1.linear_addr = va;
    329   1.2    bouyer 	op.arg2.nr_ents = entries;
    330   1.2    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    331   1.2    bouyer 		panic("xpq_queue_set_ldt");
    332   1.2    bouyer }
    333   1.2    bouyer 
    334   1.2    bouyer void
    335   1.8    cegger xpq_queue_tlb_flush(void)
    336   1.2    bouyer {
    337   1.2    bouyer 	struct mmuext_op op;
    338  1.29    cherry 
    339   1.2    bouyer 	xpq_flush_queue();
    340   1.2    bouyer 
    341   1.2    bouyer 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
    342   1.2    bouyer 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
    343   1.2    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    344   1.2    bouyer 		panic("xpq_queue_tlb_flush");
    345   1.2    bouyer }
    346   1.2    bouyer 
    347   1.2    bouyer void
    348   1.8    cegger xpq_flush_cache(void)
    349   1.2    bouyer {
    350  1.52   jnemeth 	int s = splvm();
    351  1.29    cherry 
    352   1.2    bouyer 	xpq_flush_queue();
    353   1.2    bouyer 
    354   1.2    bouyer 	XENPRINTK2(("xpq_queue_flush_cache\n"));
    355  1.52   jnemeth 	asm("wbinvd":::"memory");
    356  1.29    cherry 	splx(s); /* XXX: removeme */
    357   1.2    bouyer }
    358   1.2    bouyer 
    359   1.2    bouyer void
    360   1.2    bouyer xpq_queue_invlpg(vaddr_t va)
    361   1.2    bouyer {
    362   1.2    bouyer 	struct mmuext_op op;
    363   1.2    bouyer 	xpq_flush_queue();
    364   1.2    bouyer 
    365  1.19       jym 	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
    366   1.2    bouyer 	op.cmd = MMUEXT_INVLPG_LOCAL;
    367   1.2    bouyer 	op.arg1.linear_addr = (va & ~PAGE_MASK);
    368   1.2    bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    369   1.2    bouyer 		panic("xpq_queue_invlpg");
    370   1.2    bouyer }
    371   1.2    bouyer 
    372  1.29    cherry void
    373  1.43     rmind xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc)
    374  1.29    cherry {
    375  1.48    bouyer 	xcpumask_t xcpumask;
    376  1.29    cherry 	mmuext_op_t op;
    377  1.29    cherry 
    378  1.49     rmind 	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
    379  1.44     rmind 
    380  1.29    cherry 	/* Flush pending page updates */
    381  1.29    cherry 	xpq_flush_queue();
    382  1.29    cherry 
    383  1.29    cherry 	op.cmd = MMUEXT_INVLPG_MULTI;
    384  1.29    cherry 	op.arg1.linear_addr = va;
    385  1.48    bouyer 	op.arg2.vcpumask = &xcpumask.xcpum_xm;
    386  1.29    cherry 
    387  1.29    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    388  1.29    cherry 		panic("xpq_queue_invlpg_all");
    389  1.29    cherry 	}
    390  1.29    cherry 
    391  1.29    cherry 	return;
    392  1.29    cherry }
    393  1.29    cherry 
    394  1.29    cherry void
    395  1.29    cherry xen_bcast_invlpg(vaddr_t va)
    396  1.29    cherry {
    397  1.29    cherry 	mmuext_op_t op;
    398  1.29    cherry 
    399  1.29    cherry 	/* Flush pending page updates */
    400  1.29    cherry 	xpq_flush_queue();
    401  1.29    cherry 
    402  1.29    cherry 	op.cmd = MMUEXT_INVLPG_ALL;
    403  1.29    cherry 	op.arg1.linear_addr = va;
    404  1.29    cherry 
    405  1.29    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    406  1.29    cherry 		panic("xpq_queue_invlpg_all");
    407  1.29    cherry 	}
    408  1.29    cherry 
    409  1.29    cherry 	return;
    410  1.29    cherry }
    411  1.29    cherry 
    412  1.29    cherry /* This is a synchronous call. */
    413  1.29    cherry void
    414  1.43     rmind xen_mcast_tlbflush(kcpuset_t *kc)
    415  1.29    cherry {
    416  1.48    bouyer 	xcpumask_t xcpumask;
    417  1.29    cherry 	mmuext_op_t op;
    418  1.29    cherry 
    419  1.49     rmind 	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
    420  1.44     rmind 
    421  1.29    cherry 	/* Flush pending page updates */
    422  1.29    cherry 	xpq_flush_queue();
    423  1.29    cherry 
    424  1.29    cherry 	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
    425  1.48    bouyer 	op.arg2.vcpumask = &xcpumask.xcpum_xm;
    426  1.29    cherry 
    427  1.29    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    428  1.29    cherry 		panic("xpq_queue_invlpg_all");
    429  1.29    cherry 	}
    430  1.29    cherry 
    431  1.29    cherry 	return;
    432  1.29    cherry }
    433  1.29    cherry 
    434  1.29    cherry /* This is a synchronous call. */
    435  1.29    cherry void
    436  1.29    cherry xen_bcast_tlbflush(void)
    437  1.29    cherry {
    438  1.29    cherry 	mmuext_op_t op;
    439  1.29    cherry 
    440  1.29    cherry 	/* Flush pending page updates */
    441  1.29    cherry 	xpq_flush_queue();
    442  1.29    cherry 
    443  1.29    cherry 	op.cmd = MMUEXT_TLB_FLUSH_ALL;
    444  1.29    cherry 
    445  1.29    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    446  1.29    cherry 		panic("xpq_queue_invlpg_all");
    447  1.29    cherry 	}
    448  1.29    cherry 
    449  1.29    cherry 	return;
    450  1.29    cherry }
    451  1.29    cherry 
    452  1.29    cherry /* This is a synchronous call. */
    453  1.29    cherry void
    454  1.43     rmind xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, kcpuset_t *kc)
    455  1.29    cherry {
    456  1.29    cherry 	KASSERT(eva > sva);
    457  1.29    cherry 
    458  1.29    cherry 	/* Flush pending page updates */
    459  1.29    cherry 	xpq_flush_queue();
    460  1.29    cherry 
    461  1.29    cherry 	/* Align to nearest page boundary */
    462  1.29    cherry 	sva &= ~PAGE_MASK;
    463  1.29    cherry 	eva &= ~PAGE_MASK;
    464  1.29    cherry 
    465  1.29    cherry 	for ( ; sva <= eva; sva += PAGE_SIZE) {
    466  1.43     rmind 		xen_mcast_invlpg(sva, kc);
    467  1.29    cherry 	}
    468  1.29    cherry 
    469  1.29    cherry 	return;
    470  1.29    cherry }
    471  1.29    cherry 
    472  1.29    cherry /* This is a synchronous call. */
    473  1.29    cherry void
    474  1.29    cherry xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
    475  1.29    cherry {
    476  1.29    cherry 	KASSERT(eva > sva);
    477  1.29    cherry 
    478  1.29    cherry 	/* Flush pending page updates */
    479  1.29    cherry 	xpq_flush_queue();
    480  1.29    cherry 
    481  1.29    cherry 	/* Align to nearest page boundary */
    482  1.29    cherry 	sva &= ~PAGE_MASK;
    483  1.29    cherry 	eva &= ~PAGE_MASK;
    484  1.29    cherry 
    485  1.29    cherry 	for ( ; sva <= eva; sva += PAGE_SIZE) {
    486  1.29    cherry 		xen_bcast_invlpg(sva);
    487  1.29    cherry 	}
    488  1.29    cherry 
    489  1.29    cherry 	return;
    490  1.29    cherry }
    491  1.29    cherry 
    492  1.53    cherry /* Copy a page */
    493  1.53    cherry void
    494  1.53    cherry xen_copy_page(paddr_t srcpa, paddr_t dstpa)
    495  1.53    cherry {
    496  1.53    cherry 	mmuext_op_t op;
    497  1.53    cherry 
    498  1.53    cherry 	op.cmd = MMUEXT_COPY_PAGE;
    499  1.53    cherry 	op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT;
    500  1.53    cherry 	op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT;
    501  1.53    cherry 
    502  1.53    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    503  1.53    cherry 		panic(__func__);
    504  1.53    cherry 	}
    505  1.53    cherry }
    506  1.53    cherry 
    507  1.53    cherry /* Zero a physical page */
    508  1.53    cherry void
    509  1.53    cherry xen_pagezero(paddr_t pa)
    510  1.53    cherry {
    511  1.53    cherry 	mmuext_op_t op;
    512  1.53    cherry 
    513  1.53    cherry 	op.cmd = MMUEXT_CLEAR_PAGE;
    514  1.53    cherry 	op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
    515  1.53    cherry 
    516  1.53    cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    517  1.53    cherry 		panic(__func__);
    518  1.53    cherry 	}
    519  1.53    cherry }
    520  1.53    cherry 
    521   1.2    bouyer int
    522   1.6    bouyer xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
    523   1.2    bouyer {
    524   1.2    bouyer 	mmu_update_t op;
    525   1.2    bouyer 	int ok;
    526  1.29    cherry 
    527   1.2    bouyer 	xpq_flush_queue();
    528   1.2    bouyer 
    529   1.6    bouyer 	op.ptr = ptr;
    530   1.2    bouyer 	op.val = val;
    531   1.2    bouyer 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
    532   1.2    bouyer 		return EFAULT;
    533   1.2    bouyer 	return (0);
    534   1.2    bouyer }
    535   1.2    bouyer 
    536   1.2    bouyer #ifdef XENDEBUG
    537   1.2    bouyer void
    538   1.8    cegger xpq_debug_dump(void)
    539   1.2    bouyer {
    540   1.2    bouyer 	int i;
    541   1.2    bouyer 
    542  1.41    cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    543  1.41    cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    544  1.35    cherry 
    545   1.2    bouyer 	XENPRINTK2(("idx: %d\n", xpq_idx));
    546   1.2    bouyer 	for (i = 0; i < xpq_idx; i++) {
    547  1.13    cegger 		snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
    548  1.19       jym 		    xpq_queue[i].ptr, xpq_queue[i].val);
    549   1.2    bouyer 		if (++i < xpq_idx)
    550  1.13    cegger 			snprintf(XBUF + strlen(XBUF),
    551  1.13    cegger 			    sizeof(XBUF) - strlen(XBUF),
    552  1.13    cegger 			    "%" PRIx64 " %08" PRIx64,
    553  1.19       jym 			    xpq_queue[i].ptr, xpq_queue[i].val);
    554   1.2    bouyer 		if (++i < xpq_idx)
    555  1.13    cegger 			snprintf(XBUF + strlen(XBUF),
    556  1.13    cegger 			    sizeof(XBUF) - strlen(XBUF),
    557  1.13    cegger 			    "%" PRIx64 " %08" PRIx64,
    558  1.19       jym 			    xpq_queue[i].ptr, xpq_queue[i].val);
    559   1.2    bouyer 		if (++i < xpq_idx)
    560  1.13    cegger 			snprintf(XBUF + strlen(XBUF),
    561  1.13    cegger 			    sizeof(XBUF) - strlen(XBUF),
    562  1.13    cegger 			    "%" PRIx64 " %08" PRIx64,
    563  1.19       jym 			    xpq_queue[i].ptr, xpq_queue[i].val);
    564   1.2    bouyer 		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
    565   1.2    bouyer 	}
    566   1.2    bouyer }
    567   1.2    bouyer #endif
    568   1.2    bouyer 
    569   1.2    bouyer 
    570   1.2    bouyer #if L2_SLOT_KERNBASE > 0
    571   1.2    bouyer #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
    572   1.2    bouyer #else
    573   1.2    bouyer #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
    574   1.2    bouyer #endif
    575   1.2    bouyer 
    576   1.6    bouyer #ifdef PAE
    577   1.6    bouyer /*
    578  1.66      maxv  * For PAE, we consider a single contiguous L2 "superpage" of 4 pages, all of
    579  1.64      maxv  * them mapped by the L3 page. We also need a shadow page for L3[3].
    580   1.6    bouyer  */
    581   1.6    bouyer static const int l2_4_count = 6;
    582  1.36    cherry #elif defined(__x86_64__)
    583  1.36    cherry static const int l2_4_count = PTP_LEVELS;
    584   1.6    bouyer #else
    585   1.6    bouyer static const int l2_4_count = PTP_LEVELS - 1;
    586   1.6    bouyer #endif
    587   1.6    bouyer 
    588  1.64      maxv /*
    589  1.64      maxv  * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page
    590  1.64      maxv  * tables.
    591  1.64      maxv  */
    592   1.2    bouyer vaddr_t
    593  1.65      maxv xen_locore(void)
    594   1.2    bouyer {
    595  1.64      maxv 	size_t count, oldcount, mapsize;
    596   1.2    bouyer 	vaddr_t bootstrap_tables, init_tables;
    597   1.2    bouyer 
    598  1.54    bouyer 	xen_init_features();
    599  1.54    bouyer 
    600  1.55      maxv 	memset(xpq_idx_array, 0, sizeof(xpq_idx_array));
    601  1.35    cherry 
    602   1.6    bouyer 	xpmap_phys_to_machine_mapping =
    603   1.6    bouyer 	    (unsigned long *)xen_start_info.mfn_list;
    604   1.2    bouyer 
    605   1.2    bouyer 	/* Space after Xen boostrap tables should be free */
    606  1.64      maxv 	init_tables = xen_start_info.pt_base;
    607  1.64      maxv 	bootstrap_tables = init_tables +
    608  1.64      maxv 	    (xen_start_info.nr_pt_frames * PAGE_SIZE);
    609   1.2    bouyer 
    610   1.4    bouyer 	/*
    611  1.64      maxv 	 * Calculate how much space we need. First, everything mapped before
    612  1.64      maxv 	 * the Xen bootstrap tables.
    613   1.4    bouyer 	 */
    614   1.4    bouyer 	mapsize = init_tables - KERNTEXTOFF;
    615   1.4    bouyer 	/* after the tables we'll have:
    616   1.4    bouyer 	 *  - UAREA
    617   1.4    bouyer 	 *  - dummy user PGD (x86_64)
    618   1.4    bouyer 	 *  - HYPERVISOR_shared_info
    619  1.40    bouyer 	 *  - early_zerop
    620   1.4    bouyer 	 *  - ISA I/O mem (if needed)
    621   1.4    bouyer 	 */
    622  1.55      maxv 	mapsize += UPAGES * PAGE_SIZE;
    623   1.4    bouyer #ifdef __x86_64__
    624  1.55      maxv 	mapsize += PAGE_SIZE;
    625   1.4    bouyer #endif
    626  1.55      maxv 	mapsize += PAGE_SIZE;
    627  1.55      maxv 	mapsize += PAGE_SIZE;
    628   1.2    bouyer #ifdef DOM0OPS
    629  1.10    cegger 	if (xendomain_is_dom0()) {
    630   1.4    bouyer 		mapsize += IOM_SIZE;
    631   1.4    bouyer 	}
    632   1.4    bouyer #endif
    633   1.4    bouyer 
    634  1.64      maxv 	/*
    635  1.64      maxv 	 * At this point, mapsize doesn't include the table size.
    636  1.64      maxv 	 */
    637   1.4    bouyer #ifdef __x86_64__
    638   1.4    bouyer 	count = TABLE_L2_ENTRIES;
    639   1.4    bouyer #else
    640  1.66      maxv 	count = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT;
    641  1.64      maxv #endif
    642  1.64      maxv 
    643  1.64      maxv 	/*
    644  1.64      maxv 	 * Now compute how many L2 pages we need exactly. This is useful only
    645  1.64      maxv 	 * on i386, since the initial count for amd64 is already enough.
    646  1.64      maxv 	 */
    647  1.66      maxv 	while (KERNTEXTOFF + mapsize + (count + l2_4_count) * PAGE_SIZE >
    648  1.66      maxv 	    KERNBASE + (count << L2_SHIFT)) {
    649   1.4    bouyer 		count++;
    650   1.2    bouyer 	}
    651  1.64      maxv 
    652   1.4    bouyer #ifndef __x86_64__
    653   1.5    bouyer 	/*
    654  1.64      maxv 	 * One more L2 page: we'll allocate several pages after kva_start
    655   1.5    bouyer 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
    656   1.5    bouyer 	 * counted here. It's not a big issue to allocate one more L2 as
    657   1.5    bouyer 	 * pmap_growkernel() will be called anyway.
    658   1.5    bouyer 	 */
    659   1.5    bouyer 	count++;
    660   1.4    bouyer 	nkptp[1] = count;
    661   1.2    bouyer #endif
    662   1.2    bouyer 
    663   1.4    bouyer 	/*
    664  1.64      maxv 	 * Install bootstrap pages. We may need more L2 pages than will
    665  1.64      maxv 	 * have the final table here, as it's installed after the final table.
    666   1.4    bouyer 	 */
    667   1.4    bouyer 	oldcount = count;
    668   1.4    bouyer 
    669   1.4    bouyer bootstrap_again:
    670  1.64      maxv 
    671   1.2    bouyer 	/*
    672   1.2    bouyer 	 * Xen space we'll reclaim may not be enough for our new page tables,
    673  1.64      maxv 	 * move bootstrap tables if necessary.
    674   1.2    bouyer 	 */
    675   1.4    bouyer 	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
    676   1.2    bouyer 		bootstrap_tables = init_tables +
    677  1.64      maxv 		    ((count + l2_4_count) * PAGE_SIZE);
    678  1.64      maxv 
    679  1.66      maxv 	/*
    680  1.66      maxv 	 * Make sure the number of L2 pages we have is enough to map everything
    681  1.66      maxv 	 * from KERNBASE to the bootstrap tables themselves.
    682  1.66      maxv 	 */
    683   1.4    bouyer 	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
    684  1.66      maxv 	    KERNBASE + (oldcount << L2_SHIFT)) {
    685   1.4    bouyer 		oldcount++;
    686   1.4    bouyer 		goto bootstrap_again;
    687   1.4    bouyer 	}
    688   1.2    bouyer 
    689   1.2    bouyer 	/* Create temporary tables */
    690  1.64      maxv 	xen_bootstrap_tables(init_tables, bootstrap_tables,
    691  1.66      maxv 	    xen_start_info.nr_pt_frames, oldcount, false);
    692   1.2    bouyer 
    693   1.2    bouyer 	/* Create final tables */
    694   1.2    bouyer 	xen_bootstrap_tables(bootstrap_tables, init_tables,
    695  1.66      maxv 	    oldcount + l2_4_count, count, true);
    696   1.2    bouyer 
    697  1.64      maxv 	/* Zero out free space after tables */
    698   1.4    bouyer 	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
    699  1.55      maxv 	    (UPAGES + 1) * PAGE_SIZE);
    700  1.28     rmind 
    701  1.28     rmind 	/* Finally, flush TLB. */
    702  1.28     rmind 	xpq_queue_tlb_flush();
    703  1.28     rmind 
    704   1.4    bouyer 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
    705   1.2    bouyer }
    706   1.2    bouyer 
    707   1.2    bouyer /*
    708  1.55      maxv  * Build a new table and switch to it.
    709  1.55      maxv  * old_count is # of old tables (including PGD, PDTPE and PDE).
    710  1.55      maxv  * new_count is # of new tables (PTE only).
    711  1.55      maxv  * We assume the areas don't overlap.
    712   1.2    bouyer  */
    713   1.2    bouyer static void
    714  1.64      maxv xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
    715  1.66      maxv     size_t new_count, bool final)
    716   1.2    bouyer {
    717   1.2    bouyer 	pd_entry_t *pdtpe, *pde, *pte;
    718  1.50       mrg 	pd_entry_t *bt_pgd;
    719   1.6    bouyer 	paddr_t addr;
    720  1.61    bouyer 	vaddr_t page, avail, map_end;
    721   1.2    bouyer 	int i;
    722  1.61    bouyer 	extern char __rodata_start;
    723   1.2    bouyer 	extern char __data_start;
    724  1.61    bouyer 	extern char __kernel_end;
    725  1.40    bouyer 	extern char *early_zerop; /* from pmap.c */
    726  1.61    bouyer 	pt_entry_t pg_nx;
    727  1.61    bouyer 	u_int descs[4];
    728   1.2    bouyer 
    729  1.61    bouyer 	/*
    730  1.61    bouyer 	 * Set the NX/XD bit, if available. descs[3] = %edx.
    731  1.61    bouyer 	 */
    732  1.61    bouyer 	x86_cpuid(0x80000001, descs);
    733  1.61    bouyer 	pg_nx = (descs[3] & CPUID_NOX) ? PG_NX : 0;
    734  1.55      maxv 
    735   1.2    bouyer 	/*
    736  1.66      maxv 	 * Layout of RW area after the kernel image:
    737  1.55      maxv 	 *     xencons_interface (if present)
    738  1.55      maxv 	 *     xenstore_interface (if present)
    739  1.55      maxv 	 *     table pages (new_count + l2_4_count entries)
    740  1.64      maxv 	 * Extra mappings (only when final is true):
    741  1.55      maxv 	 *     UAREA
    742  1.64      maxv 	 *     dummy user PGD (x86_64 only) / GDT page (i386 only)
    743  1.55      maxv 	 *     HYPERVISOR_shared_info
    744  1.55      maxv 	 *     early_zerop
    745  1.55      maxv 	 *     ISA I/O mem (if needed)
    746   1.2    bouyer 	 */
    747  1.55      maxv 	map_end = new_pgd + ((new_count + l2_4_count) * PAGE_SIZE);
    748   1.2    bouyer 	if (final) {
    749  1.55      maxv 		map_end += (UPAGES + 1) * PAGE_SIZE;
    750   1.4    bouyer 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
    751  1.55      maxv 		map_end += PAGE_SIZE;
    752  1.40    bouyer 		early_zerop = (char *)map_end;
    753  1.55      maxv 		map_end += PAGE_SIZE;
    754   1.2    bouyer 	}
    755  1.55      maxv 
    756   1.4    bouyer 	/*
    757  1.64      maxv 	 * We always set atdevbase, as it's used by init386 to find the first
    758   1.4    bouyer 	 * available VA. map_end is updated only if we are dom0, so
    759   1.4    bouyer 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
    760   1.4    bouyer 	 * this case.
    761   1.4    bouyer 	 */
    762  1.66      maxv 	if (final) {
    763   1.4    bouyer 		atdevbase = map_end;
    764   1.2    bouyer #ifdef DOM0OPS
    765  1.66      maxv 		if (xendomain_is_dom0()) {
    766  1.66      maxv 			/* ISA I/O mem */
    767  1.66      maxv 			map_end += IOM_SIZE;
    768  1.66      maxv 		}
    769  1.66      maxv #endif
    770   1.2    bouyer 	}
    771   1.2    bouyer 
    772  1.61    bouyer 	__PRINTK(("xen_bootstrap_tables map_end 0x%lx\n", map_end));
    773  1.19       jym 	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
    774  1.19       jym 	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
    775   1.2    bouyer 
    776   1.2    bouyer 	/*
    777  1.55      maxv 	 * Create bootstrap page tables. What we need:
    778   1.2    bouyer 	 * - a PGD (level 4)
    779   1.2    bouyer 	 * - a PDTPE (level 3)
    780  1.55      maxv 	 * - a PDE (level 2)
    781   1.2    bouyer 	 * - some PTEs (level 1)
    782   1.2    bouyer 	 */
    783   1.2    bouyer 
    784  1.55      maxv 	bt_pgd = (pd_entry_t *)new_pgd;
    785  1.55      maxv 	memset(bt_pgd, 0, PAGE_SIZE);
    786   1.2    bouyer 	avail = new_pgd + PAGE_SIZE;
    787  1.55      maxv 
    788   1.4    bouyer #if PTP_LEVELS > 3
    789  1.64      maxv 	/* Per-cpu L4 */
    790  1.36    cherry 	pd_entry_t *bt_cpu_pgd = bt_pgd;
    791  1.64      maxv 	/* pmap_kernel() "shadow" L4 */
    792  1.55      maxv 	bt_pgd = (pd_entry_t *)avail;
    793  1.36    cherry 	memset(bt_pgd, 0, PAGE_SIZE);
    794  1.36    cherry 	avail += PAGE_SIZE;
    795  1.36    cherry 
    796  1.64      maxv 	/* Install L3 */
    797  1.55      maxv 	pdtpe = (pd_entry_t *)avail;
    798  1.55      maxv 	memset(pdtpe, 0, PAGE_SIZE);
    799   1.2    bouyer 	avail += PAGE_SIZE;
    800   1.2    bouyer 
    801  1.55      maxv 	addr = ((u_long)pdtpe) - KERNBASE;
    802  1.67      maxv 	bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] =
    803  1.66      maxv 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
    804   1.4    bouyer #else
    805   1.4    bouyer 	pdtpe = bt_pgd;
    806  1.64      maxv #endif
    807   1.2    bouyer 
    808   1.4    bouyer #if PTP_LEVELS > 2
    809   1.2    bouyer 	/* Level 2 */
    810  1.55      maxv 	pde = (pd_entry_t *)avail;
    811   1.2    bouyer 	memset(pde, 0, PAGE_SIZE);
    812   1.2    bouyer 	avail += PAGE_SIZE;
    813   1.2    bouyer 
    814  1.55      maxv 	addr = ((u_long)pde) - KERNBASE;
    815  1.67      maxv 	pdtpe[pl3_pi(KERNTEXTOFF)] =
    816   1.6    bouyer 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
    817   1.6    bouyer #elif defined(PAE)
    818  1.64      maxv 	/* Our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
    819  1.55      maxv 	pde = (pd_entry_t *)avail;
    820   1.6    bouyer 	memset(pde, 0, PAGE_SIZE * 5);
    821   1.6    bouyer 	avail += PAGE_SIZE * 5;
    822  1.55      maxv 	addr = ((u_long)pde) - KERNBASE;
    823  1.64      maxv 
    824   1.6    bouyer 	/*
    825  1.64      maxv 	 * Enter L2 pages in L3. The real L2 kernel PD will be the last one
    826  1.64      maxv 	 * (so that pde[L2_SLOT_KERN] always points to the shadow).
    827   1.6    bouyer 	 */
    828   1.6    bouyer 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    829   1.6    bouyer 		/*
    830  1.64      maxv 		 * Xen doesn't want RW mappings in L3 entries, it'll add it
    831   1.6    bouyer 		 * itself.
    832   1.6    bouyer 		 */
    833   1.6    bouyer 		pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    834   1.6    bouyer 	}
    835   1.6    bouyer 	addr += PAGE_SIZE;
    836   1.6    bouyer 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    837  1.64      maxv #else
    838   1.4    bouyer 	pde = bt_pgd;
    839  1.64      maxv #endif
    840   1.2    bouyer 
    841   1.2    bouyer 	/* Level 1 */
    842   1.2    bouyer 	page = KERNTEXTOFF;
    843   1.2    bouyer 	for (i = 0; i < new_count; i ++) {
    844   1.6    bouyer 		vaddr_t cur_page = page;
    845   1.2    bouyer 
    846  1.55      maxv 		pte = (pd_entry_t *)avail;
    847   1.2    bouyer 		avail += PAGE_SIZE;
    848   1.2    bouyer 
    849   1.2    bouyer 		memset(pte, 0, PAGE_SIZE);
    850  1.55      maxv 		while (pl2_pi(page) == pl2_pi(cur_page)) {
    851   1.2    bouyer 			if (page >= map_end) {
    852   1.2    bouyer 				/* not mapped at all */
    853   1.2    bouyer 				pte[pl1_pi(page)] = 0;
    854   1.2    bouyer 				page += PAGE_SIZE;
    855   1.2    bouyer 				continue;
    856   1.2    bouyer 			}
    857   1.2    bouyer 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
    858   1.2    bouyer 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
    859   1.2    bouyer 				pte[pl1_pi(page)] = xen_start_info.shared_info;
    860   1.2    bouyer 			}
    861   1.7    bouyer 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    862  1.12    cegger 			    == xen_start_info.console.domU.mfn) {
    863   1.2    bouyer 				xencons_interface = (void *)page;
    864  1.19       jym 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
    865   1.6    bouyer 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    866   1.2    bouyer 			}
    867   1.7    bouyer 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    868   1.7    bouyer 			    == xen_start_info.store_mfn) {
    869   1.2    bouyer 				xenstore_interface = (void *)page;
    870   1.6    bouyer 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
    871   1.6    bouyer 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    872   1.2    bouyer 			}
    873   1.2    bouyer #ifdef DOM0OPS
    874   1.2    bouyer 			if (page >= (vaddr_t)atdevbase &&
    875   1.2    bouyer 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
    876   1.2    bouyer 				pte[pl1_pi(page)] =
    877   1.2    bouyer 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
    878  1.61    bouyer 				pte[pl1_pi(page)] |= pg_nx;
    879   1.2    bouyer 			}
    880   1.2    bouyer #endif
    881  1.61    bouyer 
    882   1.4    bouyer 			pte[pl1_pi(page)] |= PG_k | PG_V;
    883  1.61    bouyer 			if (page < (vaddr_t)&__rodata_start) {
    884  1.61    bouyer 				/* Map the kernel text RX. */
    885  1.56      maxv 				pte[pl1_pi(page)] |= PG_RO;
    886  1.61    bouyer 			} else if (page >= (vaddr_t)&__rodata_start &&
    887  1.61    bouyer 			    page < (vaddr_t)&__data_start) {
    888  1.61    bouyer 				/* Map the kernel rodata R. */
    889  1.61    bouyer 				pte[pl1_pi(page)] |= PG_RO | pg_nx;
    890  1.55      maxv 			} else if (page >= old_pgd &&
    891  1.55      maxv 			    page < old_pgd + (old_count * PAGE_SIZE)) {
    892  1.61    bouyer 				/* Map the old page tables R. */
    893  1.61    bouyer 				pte[pl1_pi(page)] |= PG_RO | pg_nx;
    894   1.2    bouyer 			} else if (page >= new_pgd &&
    895   1.6    bouyer 			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
    896  1.61    bouyer 				/* Map the new page tables R. */
    897  1.61    bouyer 				pte[pl1_pi(page)] |= PG_RO | pg_nx;
    898  1.41    cherry #ifdef i386
    899  1.41    cherry 			} else if (page == (vaddr_t)tmpgdt) {
    900  1.41    cherry 				/*
    901  1.64      maxv 				 * Map bootstrap gdt R/O. Later, we will re-add
    902  1.64      maxv 				 * this page to uvm after making it writable.
    903  1.41    cherry 				 */
    904  1.41    cherry 				pte[pl1_pi(page)] = 0;
    905  1.41    cherry 				page += PAGE_SIZE;
    906  1.41    cherry 				continue;
    907  1.64      maxv #endif
    908  1.61    bouyer 			} else if (page >= (vaddr_t)&__data_start &&
    909  1.61    bouyer 			    page < (vaddr_t)&__kernel_end) {
    910  1.61    bouyer 				/* Map the kernel data+bss RW. */
    911  1.61    bouyer 				pte[pl1_pi(page)] |= PG_RW | pg_nx;
    912   1.2    bouyer 			} else {
    913  1.62      maxv 				/* Map the page RW. */
    914  1.62      maxv 				pte[pl1_pi(page)] |= PG_RW | pg_nx;
    915   1.2    bouyer 			}
    916  1.64      maxv 
    917   1.2    bouyer 			page += PAGE_SIZE;
    918   1.2    bouyer 		}
    919   1.2    bouyer 
    920  1.64      maxv 		addr = ((u_long)pte) - KERNBASE;
    921   1.2    bouyer 		pde[pl2_pi(cur_page)] =
    922   1.4    bouyer 		    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    923  1.64      maxv 
    924   1.2    bouyer 		/* Mark readonly */
    925  1.64      maxv 		xen_bt_set_readonly((vaddr_t)pte);
    926   1.2    bouyer 	}
    927   1.2    bouyer 
    928   1.2    bouyer 	/* Install recursive page tables mapping */
    929   1.6    bouyer #ifdef PAE
    930   1.6    bouyer 	/*
    931  1.64      maxv 	 * We need a shadow page for the kernel's L2 page.
    932   1.6    bouyer 	 * The real L2 kernel PD will be the last one (so that
    933  1.64      maxv 	 * pde[L2_SLOT_KERN] always points to the shadow).
    934   1.6    bouyer 	 */
    935   1.6    bouyer 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
    936  1.36    cherry 	cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG];
    937  1.36    cherry 	cpu_info_primary.ci_kpm_pdirpa =
    938  1.36    cherry 	    (vaddr_t) cpu_info_primary.ci_kpm_pdir - KERNBASE;
    939   1.6    bouyer 
    940   1.6    bouyer 	/*
    941  1.64      maxv 	 * We don't enter a recursive entry from the L3 PD. Instead, we enter
    942  1.64      maxv 	 * the first 4 L2 pages, which includes the kernel's L2 shadow. But we
    943  1.64      maxv 	 * have to enter the shadow after switching %cr3, or Xen will refcount
    944  1.64      maxv 	 * some PTEs with the wrong type.
    945   1.6    bouyer 	 */
    946   1.6    bouyer 	addr = (u_long)pde - KERNBASE;
    947   1.6    bouyer 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    948  1.63      maxv 		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V |
    949  1.63      maxv 		    pg_nx;
    950   1.6    bouyer 	}
    951   1.6    bouyer #if 0
    952   1.6    bouyer 	addr += PAGE_SIZE; /* point to shadow L2 */
    953   1.6    bouyer 	pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    954   1.6    bouyer #endif
    955  1.14       jym 	/* Mark tables RO, and pin the kernel's shadow as L2 */
    956   1.6    bouyer 	addr = (u_long)pde - KERNBASE;
    957   1.6    bouyer 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
    958   1.6    bouyer 		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
    959  1.64      maxv #if 0
    960   1.6    bouyer 		if (i == 2 || i == 3)
    961   1.6    bouyer 			continue;
    962  1.24       jym 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    963   1.6    bouyer #endif
    964   1.6    bouyer 	}
    965   1.6    bouyer 	if (final) {
    966   1.6    bouyer 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
    967  1.24       jym 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    968   1.6    bouyer 	}
    969   1.6    bouyer #if 0
    970   1.6    bouyer 	addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
    971  1.24       jym 	xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    972   1.6    bouyer #endif
    973   1.6    bouyer #else /* PAE */
    974  1.61    bouyer 
    975  1.61    bouyer 	/* Recursive entry in pmap_kernel(). */
    976  1.61    bouyer 	bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE)
    977  1.61    bouyer 	    | PG_k | PG_RO | PG_V | pg_nx;
    978  1.36    cherry #ifdef __x86_64__
    979  1.61    bouyer 	/* Recursive entry in higher-level per-cpu PD. */
    980  1.61    bouyer 	bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE)
    981  1.61    bouyer 	    | PG_k | PG_RO | PG_V | pg_nx;
    982  1.61    bouyer #endif
    983  1.61    bouyer 
    984   1.2    bouyer 	/* Mark tables RO */
    985  1.64      maxv 	xen_bt_set_readonly((vaddr_t)pde);
    986   1.6    bouyer #endif
    987   1.6    bouyer #if PTP_LEVELS > 2 || defined(PAE)
    988  1.64      maxv 	xen_bt_set_readonly((vaddr_t)pdtpe);
    989   1.4    bouyer #endif
    990   1.4    bouyer #if PTP_LEVELS > 3
    991   1.2    bouyer 	xen_bt_set_readonly(new_pgd);
    992   1.4    bouyer #endif
    993  1.61    bouyer 
    994   1.2    bouyer 	/* Pin the PGD */
    995  1.24       jym #ifdef __x86_64__
    996  1.24       jym 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    997  1.24       jym #elif PAE
    998   1.6    bouyer 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    999   1.6    bouyer #else
   1000  1.24       jym 	xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
   1001   1.6    bouyer #endif
   1002  1.21       jym 
   1003   1.4    bouyer 	/* Save phys. addr of PDP, for libkvm. */
   1004   1.6    bouyer #ifdef PAE
   1005  1.21       jym 	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
   1006  1.21       jym #else
   1007  1.36    cherry 	PDPpaddr = (u_long)bt_pgd - KERNBASE;
   1008  1.21       jym #endif
   1009  1.21       jym 
   1010   1.2    bouyer 	/* Switch to new tables */
   1011   1.2    bouyer 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
   1012  1.21       jym 
   1013   1.6    bouyer #ifdef PAE
   1014   1.6    bouyer 	if (final) {
   1015  1.64      maxv 		/* Save the address of the L3 page */
   1016  1.21       jym 		cpu_info_primary.ci_pae_l3_pdir = pdtpe;
   1017  1.21       jym 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
   1018  1.21       jym 
   1019  1.64      maxv 		/* Now enter the kernel's PTE mappings */
   1020  1.64      maxv 		addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
   1021   1.6    bouyer 		xpq_queue_pte_update(
   1022  1.64      maxv 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
   1023   1.6    bouyer 		    xpmap_ptom_masked(addr) | PG_k | PG_V);
   1024   1.6    bouyer 		xpq_flush_queue();
   1025   1.6    bouyer 	}
   1026  1.36    cherry #elif defined(__x86_64__)
   1027  1.36    cherry 	if (final) {
   1028  1.66      maxv 		/* Save the address of the real per-cpu L4 page. */
   1029  1.36    cherry 		cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd;
   1030  1.66      maxv 		cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)bt_cpu_pgd - KERNBASE);
   1031  1.36    cherry 	}
   1032   1.6    bouyer #endif
   1033  1.51  christos 	__USE(pdtpe);
   1034   1.6    bouyer 
   1035  1.66      maxv 	/*
   1036  1.66      maxv 	 * Now we can safely reclaim the space taken by the old tables.
   1037  1.66      maxv 	 */
   1038  1.66      maxv 
   1039   1.2    bouyer 	/* Unpin old PGD */
   1040   1.2    bouyer 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
   1041  1.66      maxv 
   1042   1.2    bouyer 	/* Mark old tables RW */
   1043   1.2    bouyer 	page = old_pgd;
   1044  1.66      maxv 	addr = xpmap_mtop((paddr_t)pde[pl2_pi(page)] & PG_FRAME);
   1045  1.64      maxv 	pte = (pd_entry_t *)((u_long)addr + KERNBASE);
   1046   1.2    bouyer 	pte += pl1_pi(page);
   1047   1.2    bouyer 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
   1048  1.66      maxv 		addr = xpmap_ptom(((u_long)pte) - KERNBASE);
   1049   1.6    bouyer 		xpq_queue_pte_update(addr, *pte | PG_RW);
   1050   1.2    bouyer 		page += PAGE_SIZE;
   1051   1.2    bouyer 		/*
   1052  1.55      maxv 		 * Our PTEs are contiguous so it's safe to just "++" here.
   1053   1.2    bouyer 		 */
   1054   1.2    bouyer 		pte++;
   1055   1.2    bouyer 	}
   1056   1.2    bouyer 	xpq_flush_queue();
   1057   1.2    bouyer }
   1058   1.2    bouyer 
   1059   1.2    bouyer 
   1060   1.2    bouyer /*
   1061   1.2    bouyer  * Bootstrap helper functions
   1062   1.2    bouyer  */
   1063   1.2    bouyer 
   1064   1.2    bouyer /*
   1065   1.2    bouyer  * Mark a page readonly
   1066   1.2    bouyer  * XXX: assuming vaddr = paddr + KERNBASE
   1067   1.2    bouyer  */
   1068   1.2    bouyer 
   1069   1.2    bouyer static void
   1070  1.64      maxv xen_bt_set_readonly(vaddr_t page)
   1071   1.2    bouyer {
   1072   1.2    bouyer 	pt_entry_t entry;
   1073   1.2    bouyer 
   1074   1.2    bouyer 	entry = xpmap_ptom_masked(page - KERNBASE);
   1075   1.4    bouyer 	entry |= PG_k | PG_V;
   1076   1.2    bouyer 
   1077  1.64      maxv 	HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG);
   1078   1.2    bouyer }
   1079   1.4    bouyer 
   1080   1.4    bouyer #ifdef __x86_64__
   1081   1.4    bouyer void
   1082   1.4    bouyer xen_set_user_pgd(paddr_t page)
   1083   1.4    bouyer {
   1084   1.4    bouyer 	struct mmuext_op op;
   1085   1.4    bouyer 	int s = splvm();
   1086   1.4    bouyer 
   1087   1.4    bouyer 	xpq_flush_queue();
   1088   1.4    bouyer 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
   1089  1.46       jym 	op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT;
   1090  1.64      maxv 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
   1091   1.4    bouyer 		panic("xen_set_user_pgd: failed to install new user page"
   1092  1.19       jym 			" directory %#" PRIxPADDR, page);
   1093   1.4    bouyer 	splx(s);
   1094   1.4    bouyer }
   1095   1.4    bouyer #endif /* __x86_64__ */
   1096