Home | History | Annotate | Line # | Download | only in x86
x86_xpmap.c revision 1.41
      1  1.41  cherry /*	$NetBSD: x86_xpmap.c,v 1.41 2012/02/24 08:06:08 cherry Exp $	*/
      2   1.2  bouyer 
      3   1.2  bouyer /*
      4   1.2  bouyer  * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
      5   1.2  bouyer  *
      6   1.2  bouyer  * Permission to use, copy, modify, and distribute this software for any
      7   1.2  bouyer  * purpose with or without fee is hereby granted, provided that the above
      8   1.2  bouyer  * copyright notice and this permission notice appear in all copies.
      9   1.2  bouyer  *
     10   1.2  bouyer  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     11   1.2  bouyer  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     12   1.2  bouyer  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     13   1.2  bouyer  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     14   1.2  bouyer  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     15   1.2  bouyer  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     16   1.2  bouyer  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     17   1.2  bouyer  */
     18   1.2  bouyer 
     19   1.2  bouyer /*
     20   1.2  bouyer  * Copyright (c) 2006, 2007 Manuel Bouyer.
     21   1.2  bouyer  *
     22   1.2  bouyer  * Redistribution and use in source and binary forms, with or without
     23   1.2  bouyer  * modification, are permitted provided that the following conditions
     24   1.2  bouyer  * are met:
     25   1.2  bouyer  * 1. Redistributions of source code must retain the above copyright
     26   1.2  bouyer  *    notice, this list of conditions and the following disclaimer.
     27   1.2  bouyer  * 2. Redistributions in binary form must reproduce the above copyright
     28   1.2  bouyer  *    notice, this list of conditions and the following disclaimer in the
     29   1.2  bouyer  *    documentation and/or other materials provided with the distribution.
     30   1.2  bouyer  *
     31   1.2  bouyer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     32   1.2  bouyer  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     33   1.2  bouyer  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     34   1.2  bouyer  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     35   1.2  bouyer  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     36   1.2  bouyer  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     37   1.2  bouyer  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     38   1.2  bouyer  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     39   1.2  bouyer  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     40   1.2  bouyer  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     41   1.2  bouyer  *
     42   1.2  bouyer  */
     43   1.2  bouyer 
     44   1.2  bouyer /*
     45   1.2  bouyer  *
     46   1.2  bouyer  * Copyright (c) 2004 Christian Limpach.
     47   1.2  bouyer  * All rights reserved.
     48   1.2  bouyer  *
     49   1.2  bouyer  * Redistribution and use in source and binary forms, with or without
     50   1.2  bouyer  * modification, are permitted provided that the following conditions
     51   1.2  bouyer  * are met:
     52   1.2  bouyer  * 1. Redistributions of source code must retain the above copyright
     53   1.2  bouyer  *    notice, this list of conditions and the following disclaimer.
     54   1.2  bouyer  * 2. Redistributions in binary form must reproduce the above copyright
     55   1.2  bouyer  *    notice, this list of conditions and the following disclaimer in the
     56   1.2  bouyer  *    documentation and/or other materials provided with the distribution.
     57   1.2  bouyer  *
     58   1.2  bouyer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     59   1.2  bouyer  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     60   1.2  bouyer  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     61   1.2  bouyer  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     62   1.2  bouyer  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     63   1.2  bouyer  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     64   1.2  bouyer  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     65   1.2  bouyer  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     66   1.2  bouyer  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     67   1.2  bouyer  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     68   1.2  bouyer  */
     69   1.2  bouyer 
     70   1.2  bouyer 
     71   1.2  bouyer #include <sys/cdefs.h>
     72  1.41  cherry __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.41 2012/02/24 08:06:08 cherry Exp $");
     73   1.2  bouyer 
     74   1.2  bouyer #include "opt_xen.h"
     75   1.4  bouyer #include "opt_ddb.h"
     76   1.4  bouyer #include "ksyms.h"
     77   1.2  bouyer 
     78   1.2  bouyer #include <sys/param.h>
     79   1.2  bouyer #include <sys/systm.h>
     80  1.38  cherry #include <sys/mutex.h>
     81   1.2  bouyer 
     82   1.2  bouyer #include <uvm/uvm.h>
     83   1.2  bouyer 
     84   1.2  bouyer #include <machine/pmap.h>
     85   1.2  bouyer #include <machine/gdt.h>
     86   1.2  bouyer #include <xen/xenfunc.h>
     87   1.2  bouyer 
     88   1.2  bouyer #include <dev/isa/isareg.h>
     89   1.2  bouyer #include <machine/isa_machdep.h>
     90   1.2  bouyer 
     91   1.2  bouyer #undef	XENDEBUG
     92   1.2  bouyer /* #define XENDEBUG_SYNC */
     93   1.2  bouyer /* #define	XENDEBUG_LOW */
     94   1.2  bouyer 
     95   1.2  bouyer #ifdef XENDEBUG
     96   1.2  bouyer #define	XENPRINTF(x) printf x
     97   1.2  bouyer #define	XENPRINTK(x) printk x
     98   1.2  bouyer #define	XENPRINTK2(x) /* printk x */
     99   1.2  bouyer 
    100   1.2  bouyer static char XBUF[256];
    101   1.2  bouyer #else
    102   1.2  bouyer #define	XENPRINTF(x)
    103   1.2  bouyer #define	XENPRINTK(x)
    104   1.2  bouyer #define	XENPRINTK2(x)
    105   1.2  bouyer #endif
    106   1.2  bouyer #define	PRINTF(x) printf x
    107   1.2  bouyer #define	PRINTK(x) printk x
    108   1.2  bouyer 
    109   1.4  bouyer /* on x86_64 kernel runs in ring 3 */
    110   1.4  bouyer #ifdef __x86_64__
    111   1.4  bouyer #define PG_k PG_u
    112   1.4  bouyer #else
    113   1.4  bouyer #define PG_k 0
    114   1.4  bouyer #endif
    115   1.4  bouyer 
    116   1.2  bouyer volatile shared_info_t *HYPERVISOR_shared_info;
    117  1.11     jym /* Xen requires the start_info struct to be page aligned */
    118  1.11     jym union start_info_union start_info_union __aligned(PAGE_SIZE);
    119   1.6  bouyer unsigned long *xpmap_phys_to_machine_mapping;
    120  1.37  cherry kmutex_t pte_lock;
    121   1.2  bouyer 
    122   1.2  bouyer void xen_failsafe_handler(void);
    123   1.2  bouyer 
    124   1.2  bouyer #define HYPERVISOR_mmu_update_self(req, count, success_count) \
    125   1.2  bouyer 	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
    126   1.2  bouyer 
    127   1.2  bouyer void
    128   1.2  bouyer xen_failsafe_handler(void)
    129   1.2  bouyer {
    130   1.2  bouyer 
    131   1.2  bouyer 	panic("xen_failsafe_handler called!\n");
    132   1.2  bouyer }
    133   1.2  bouyer 
    134   1.2  bouyer 
    135   1.2  bouyer void
    136   1.2  bouyer xen_set_ldt(vaddr_t base, uint32_t entries)
    137   1.2  bouyer {
    138   1.2  bouyer 	vaddr_t va;
    139   1.2  bouyer 	vaddr_t end;
    140   1.4  bouyer 	pt_entry_t *ptp;
    141   1.2  bouyer 	int s;
    142   1.2  bouyer 
    143   1.2  bouyer #ifdef __x86_64__
    144   1.2  bouyer 	end = base + (entries << 3);
    145   1.2  bouyer #else
    146   1.2  bouyer 	end = base + entries * sizeof(union descriptor);
    147   1.2  bouyer #endif
    148   1.2  bouyer 
    149   1.2  bouyer 	for (va = base; va < end; va += PAGE_SIZE) {
    150   1.2  bouyer 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    151   1.2  bouyer 		ptp = kvtopte(va);
    152  1.19     jym 		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
    153  1.19     jym 		    base, entries, ptp));
    154   1.4  bouyer 		pmap_pte_clearbits(ptp, PG_RW);
    155   1.2  bouyer 	}
    156   1.2  bouyer 	s = splvm();
    157   1.2  bouyer 	xpq_queue_set_ldt(base, entries);
    158   1.2  bouyer 	splx(s);
    159   1.2  bouyer }
    160   1.2  bouyer 
    161   1.2  bouyer #ifdef XENDEBUG
    162   1.2  bouyer void xpq_debug_dump(void);
    163   1.2  bouyer #endif
    164   1.2  bouyer 
    165   1.2  bouyer #define XPQUEUE_SIZE 2048
    166  1.35  cherry static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
    167  1.35  cherry static int xpq_idx_array[MAXCPUS];
    168  1.30  cherry 
    169  1.41  cherry #ifdef i386
    170  1.41  cherry extern union descriptor tmpgdt[];
    171  1.41  cherry #endif /* i386 */
    172   1.2  bouyer void
    173  1.35  cherry xpq_flush_queue(void)
    174  1.30  cherry {
    175  1.35  cherry 	int i, ok = 0, ret;
    176  1.30  cherry 
    177  1.41  cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    178  1.41  cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    179   1.2  bouyer 
    180   1.2  bouyer 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
    181   1.2  bouyer 	for (i = 0; i < xpq_idx; i++)
    182  1.19     jym 		XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
    183  1.19     jym 		    xpq_queue[i].ptr, xpq_queue[i].val));
    184  1.23     jym 
    185  1.35  cherry retry:
    186  1.23     jym 	ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
    187  1.23     jym 
    188  1.23     jym 	if (xpq_idx != 0 && ret < 0) {
    189  1.39  bouyer 		struct cpu_info *ci;
    190  1.39  bouyer 		CPU_INFO_ITERATOR cii;
    191  1.39  bouyer 
    192  1.39  bouyer 		printf("xpq_flush_queue: %d entries (%d successful) on "
    193  1.39  bouyer 		    "cpu%d (%ld)\n",
    194  1.41  cherry 		    xpq_idx, ok, curcpu()->ci_index, curcpu()->ci_cpuid);
    195  1.35  cherry 
    196  1.35  cherry 		if (ok != 0) {
    197  1.35  cherry 			xpq_queue += ok;
    198  1.35  cherry 			xpq_idx -= ok;
    199  1.35  cherry 			ok = 0;
    200  1.35  cherry 			goto retry;
    201  1.35  cherry 		}
    202  1.35  cherry 
    203  1.39  bouyer 		for (CPU_INFO_FOREACH(cii, ci)) {
    204  1.39  bouyer 			xpq_queue = xpq_queue_array[ci->ci_cpuid];
    205  1.39  bouyer 			xpq_idx = xpq_idx_array[ci->ci_cpuid];
    206  1.39  bouyer 			printf("cpu%d (%ld):\n", ci->ci_index, ci->ci_cpuid);
    207  1.39  bouyer 			for (i = 0; i < xpq_idx; i++) {
    208  1.39  bouyer 				printf("  0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
    209  1.39  bouyer 				   xpq_queue[i].ptr, xpq_queue[i].val);
    210  1.39  bouyer 			}
    211  1.39  bouyer #ifdef __x86_64__
    212  1.39  bouyer 			for (i = 0; i < PDIR_SLOT_PTE; i++) {
    213  1.39  bouyer 				if (ci->ci_kpm_pdir[i] == 0)
    214  1.39  bouyer 					continue;
    215  1.39  bouyer 				printf(" kpm_pdir[%d]: 0x%" PRIx64 "\n",
    216  1.39  bouyer 				    i, ci->ci_kpm_pdir[i]);
    217  1.39  bouyer 			}
    218  1.39  bouyer #endif
    219  1.39  bouyer 		}
    220  1.23     jym 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
    221   1.2  bouyer 	}
    222  1.41  cherry 	xpq_idx_array[curcpu()->ci_cpuid] = 0;
    223   1.2  bouyer }
    224   1.2  bouyer 
    225   1.2  bouyer static inline void
    226   1.2  bouyer xpq_increment_idx(void)
    227   1.2  bouyer {
    228   1.2  bouyer 
    229  1.41  cherry 	if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE))
    230   1.2  bouyer 		xpq_flush_queue();
    231   1.2  bouyer }
    232   1.2  bouyer 
    233   1.2  bouyer void
    234   1.2  bouyer xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
    235   1.2  bouyer {
    236  1.35  cherry 
    237  1.41  cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    238  1.41  cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    239  1.35  cherry 
    240   1.6  bouyer 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
    241   1.6  bouyer 	    "\n", (int64_t)ma, (int64_t)pa));
    242  1.35  cherry 
    243   1.2  bouyer 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
    244   1.2  bouyer 	xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
    245   1.2  bouyer 	xpq_increment_idx();
    246   1.2  bouyer #ifdef XENDEBUG_SYNC
    247   1.2  bouyer 	xpq_flush_queue();
    248   1.2  bouyer #endif
    249   1.2  bouyer }
    250   1.2  bouyer 
    251   1.2  bouyer void
    252   1.6  bouyer xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
    253   1.2  bouyer {
    254   1.2  bouyer 
    255  1.41  cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    256  1.41  cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    257  1.35  cherry 
    258   1.6  bouyer 	KASSERT((ptr & 3) == 0);
    259   1.2  bouyer 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
    260   1.2  bouyer 	xpq_queue[xpq_idx].val = val;
    261   1.2  bouyer 	xpq_increment_idx();
    262   1.2  bouyer #ifdef XENDEBUG_SYNC
    263   1.2  bouyer 	xpq_flush_queue();
    264   1.2  bouyer #endif
    265   1.2  bouyer }
    266   1.2  bouyer 
    267   1.2  bouyer void
    268   1.2  bouyer xpq_queue_pt_switch(paddr_t pa)
    269   1.2  bouyer {
    270   1.2  bouyer 	struct mmuext_op op;
    271   1.2  bouyer 	xpq_flush_queue();
    272   1.2  bouyer 
    273   1.6  bouyer 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    274   1.6  bouyer 	    (int64_t)pa, (int64_t)pa));
    275   1.2  bouyer 	op.cmd = MMUEXT_NEW_BASEPTR;
    276   1.2  bouyer 	op.arg1.mfn = pa >> PAGE_SHIFT;
    277   1.2  bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    278   1.2  bouyer 		panic("xpq_queue_pt_switch");
    279   1.2  bouyer }
    280   1.2  bouyer 
    281   1.2  bouyer void
    282  1.24     jym xpq_queue_pin_table(paddr_t pa, int lvl)
    283   1.2  bouyer {
    284   1.2  bouyer 	struct mmuext_op op;
    285  1.29  cherry 
    286   1.2  bouyer 	xpq_flush_queue();
    287   1.2  bouyer 
    288  1.24     jym 	XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
    289  1.24     jym 	    lvl + 1, pa));
    290   1.2  bouyer 
    291   1.6  bouyer 	op.arg1.mfn = pa >> PAGE_SHIFT;
    292  1.24     jym 	op.cmd = lvl;
    293   1.6  bouyer 
    294   1.6  bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    295   1.6  bouyer 		panic("xpq_queue_pin_table");
    296   1.6  bouyer }
    297   1.6  bouyer 
    298   1.2  bouyer void
    299   1.2  bouyer xpq_queue_unpin_table(paddr_t pa)
    300   1.2  bouyer {
    301   1.2  bouyer 	struct mmuext_op op;
    302  1.29  cherry 
    303   1.2  bouyer 	xpq_flush_queue();
    304   1.2  bouyer 
    305  1.24     jym 	XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
    306   1.2  bouyer 	op.arg1.mfn = pa >> PAGE_SHIFT;
    307   1.2  bouyer 	op.cmd = MMUEXT_UNPIN_TABLE;
    308   1.2  bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    309   1.2  bouyer 		panic("xpq_queue_unpin_table");
    310   1.2  bouyer }
    311   1.2  bouyer 
    312   1.2  bouyer void
    313   1.2  bouyer xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    314   1.2  bouyer {
    315   1.2  bouyer 	struct mmuext_op op;
    316  1.29  cherry 
    317   1.2  bouyer 	xpq_flush_queue();
    318   1.2  bouyer 
    319   1.2  bouyer 	XENPRINTK2(("xpq_queue_set_ldt\n"));
    320   1.2  bouyer 	KASSERT(va == (va & ~PAGE_MASK));
    321   1.2  bouyer 	op.cmd = MMUEXT_SET_LDT;
    322   1.2  bouyer 	op.arg1.linear_addr = va;
    323   1.2  bouyer 	op.arg2.nr_ents = entries;
    324   1.2  bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    325   1.2  bouyer 		panic("xpq_queue_set_ldt");
    326   1.2  bouyer }
    327   1.2  bouyer 
    328   1.2  bouyer void
    329   1.8  cegger xpq_queue_tlb_flush(void)
    330   1.2  bouyer {
    331   1.2  bouyer 	struct mmuext_op op;
    332  1.29  cherry 
    333   1.2  bouyer 	xpq_flush_queue();
    334   1.2  bouyer 
    335   1.2  bouyer 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
    336   1.2  bouyer 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
    337   1.2  bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    338   1.2  bouyer 		panic("xpq_queue_tlb_flush");
    339   1.2  bouyer }
    340   1.2  bouyer 
    341   1.2  bouyer void
    342   1.8  cegger xpq_flush_cache(void)
    343   1.2  bouyer {
    344   1.2  bouyer 	struct mmuext_op op;
    345  1.29  cherry 	int s = splvm(), err;
    346  1.29  cherry 
    347   1.2  bouyer 	xpq_flush_queue();
    348   1.2  bouyer 
    349   1.2  bouyer 	XENPRINTK2(("xpq_queue_flush_cache\n"));
    350   1.2  bouyer 	op.cmd = MMUEXT_FLUSH_CACHE;
    351  1.33     jym 	if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0) {
    352  1.33     jym 		panic("xpq_flush_cache, err %d", err);
    353  1.33     jym 	}
    354  1.29  cherry 	splx(s); /* XXX: removeme */
    355   1.2  bouyer }
    356   1.2  bouyer 
    357   1.2  bouyer void
    358   1.2  bouyer xpq_queue_invlpg(vaddr_t va)
    359   1.2  bouyer {
    360   1.2  bouyer 	struct mmuext_op op;
    361   1.2  bouyer 	xpq_flush_queue();
    362   1.2  bouyer 
    363  1.19     jym 	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
    364   1.2  bouyer 	op.cmd = MMUEXT_INVLPG_LOCAL;
    365   1.2  bouyer 	op.arg1.linear_addr = (va & ~PAGE_MASK);
    366   1.2  bouyer 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    367   1.2  bouyer 		panic("xpq_queue_invlpg");
    368   1.2  bouyer }
    369   1.2  bouyer 
    370  1.29  cherry void
    371  1.29  cherry xen_mcast_invlpg(vaddr_t va, uint32_t cpumask)
    372  1.29  cherry {
    373  1.29  cherry 	mmuext_op_t op;
    374  1.29  cherry 
    375  1.29  cherry 	/* Flush pending page updates */
    376  1.29  cherry 	xpq_flush_queue();
    377  1.29  cherry 
    378  1.29  cherry 	op.cmd = MMUEXT_INVLPG_MULTI;
    379  1.29  cherry 	op.arg1.linear_addr = va;
    380  1.29  cherry 	op.arg2.vcpumask = &cpumask;
    381  1.29  cherry 
    382  1.29  cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    383  1.29  cherry 		panic("xpq_queue_invlpg_all");
    384  1.29  cherry 	}
    385  1.29  cherry 
    386  1.29  cherry 	return;
    387  1.29  cherry }
    388  1.29  cherry 
    389  1.29  cherry void
    390  1.29  cherry xen_bcast_invlpg(vaddr_t va)
    391  1.29  cherry {
    392  1.29  cherry 	mmuext_op_t op;
    393  1.29  cherry 
    394  1.29  cherry 	/* Flush pending page updates */
    395  1.29  cherry 	xpq_flush_queue();
    396  1.29  cherry 
    397  1.29  cherry 	op.cmd = MMUEXT_INVLPG_ALL;
    398  1.29  cherry 	op.arg1.linear_addr = va;
    399  1.29  cherry 
    400  1.29  cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    401  1.29  cherry 		panic("xpq_queue_invlpg_all");
    402  1.29  cherry 	}
    403  1.29  cherry 
    404  1.29  cherry 	return;
    405  1.29  cherry }
    406  1.29  cherry 
    407  1.29  cherry /* This is a synchronous call. */
    408  1.29  cherry void
    409  1.29  cherry xen_mcast_tlbflush(uint32_t cpumask)
    410  1.29  cherry {
    411  1.29  cherry 	mmuext_op_t op;
    412  1.29  cherry 
    413  1.29  cherry 	/* Flush pending page updates */
    414  1.29  cherry 	xpq_flush_queue();
    415  1.29  cherry 
    416  1.29  cherry 	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
    417  1.29  cherry 	op.arg2.vcpumask = &cpumask;
    418  1.29  cherry 
    419  1.29  cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    420  1.29  cherry 		panic("xpq_queue_invlpg_all");
    421  1.29  cherry 	}
    422  1.29  cherry 
    423  1.29  cherry 	return;
    424  1.29  cherry }
    425  1.29  cherry 
    426  1.29  cherry /* This is a synchronous call. */
    427  1.29  cherry void
    428  1.29  cherry xen_bcast_tlbflush(void)
    429  1.29  cherry {
    430  1.29  cherry 	mmuext_op_t op;
    431  1.29  cherry 
    432  1.29  cherry 	/* Flush pending page updates */
    433  1.29  cherry 	xpq_flush_queue();
    434  1.29  cherry 
    435  1.29  cherry 	op.cmd = MMUEXT_TLB_FLUSH_ALL;
    436  1.29  cherry 
    437  1.29  cherry 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
    438  1.29  cherry 		panic("xpq_queue_invlpg_all");
    439  1.29  cherry 	}
    440  1.29  cherry 
    441  1.29  cherry 	return;
    442  1.29  cherry }
    443  1.29  cherry 
    444  1.29  cherry /* This is a synchronous call. */
    445  1.29  cherry void
    446  1.29  cherry xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, uint32_t cpumask)
    447  1.29  cherry {
    448  1.29  cherry 	KASSERT(eva > sva);
    449  1.29  cherry 
    450  1.29  cherry 	/* Flush pending page updates */
    451  1.29  cherry 	xpq_flush_queue();
    452  1.29  cherry 
    453  1.29  cherry 	/* Align to nearest page boundary */
    454  1.29  cherry 	sva &= ~PAGE_MASK;
    455  1.29  cherry 	eva &= ~PAGE_MASK;
    456  1.29  cherry 
    457  1.29  cherry 	for ( ; sva <= eva; sva += PAGE_SIZE) {
    458  1.29  cherry 		xen_mcast_invlpg(sva, cpumask);
    459  1.29  cherry 	}
    460  1.29  cherry 
    461  1.29  cherry 	return;
    462  1.29  cherry }
    463  1.29  cherry 
    464  1.29  cherry /* This is a synchronous call. */
    465  1.29  cherry void
    466  1.29  cherry xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
    467  1.29  cherry {
    468  1.29  cherry 	KASSERT(eva > sva);
    469  1.29  cherry 
    470  1.29  cherry 	/* Flush pending page updates */
    471  1.29  cherry 	xpq_flush_queue();
    472  1.29  cherry 
    473  1.29  cherry 	/* Align to nearest page boundary */
    474  1.29  cherry 	sva &= ~PAGE_MASK;
    475  1.29  cherry 	eva &= ~PAGE_MASK;
    476  1.29  cherry 
    477  1.29  cherry 	for ( ; sva <= eva; sva += PAGE_SIZE) {
    478  1.29  cherry 		xen_bcast_invlpg(sva);
    479  1.29  cherry 	}
    480  1.29  cherry 
    481  1.29  cherry 	return;
    482  1.29  cherry }
    483  1.29  cherry 
    484   1.2  bouyer int
    485   1.6  bouyer xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
    486   1.2  bouyer {
    487   1.2  bouyer 	mmu_update_t op;
    488   1.2  bouyer 	int ok;
    489  1.29  cherry 
    490   1.2  bouyer 	xpq_flush_queue();
    491   1.2  bouyer 
    492   1.6  bouyer 	op.ptr = ptr;
    493   1.2  bouyer 	op.val = val;
    494   1.2  bouyer 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
    495   1.2  bouyer 		return EFAULT;
    496   1.2  bouyer 	return (0);
    497   1.2  bouyer }
    498   1.2  bouyer 
    499   1.2  bouyer #ifdef XENDEBUG
    500   1.2  bouyer void
    501   1.8  cegger xpq_debug_dump(void)
    502   1.2  bouyer {
    503   1.2  bouyer 	int i;
    504   1.2  bouyer 
    505  1.41  cherry 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
    506  1.41  cherry 	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];
    507  1.35  cherry 
    508   1.2  bouyer 	XENPRINTK2(("idx: %d\n", xpq_idx));
    509   1.2  bouyer 	for (i = 0; i < xpq_idx; i++) {
    510  1.13  cegger 		snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
    511  1.19     jym 		    xpq_queue[i].ptr, xpq_queue[i].val);
    512   1.2  bouyer 		if (++i < xpq_idx)
    513  1.13  cegger 			snprintf(XBUF + strlen(XBUF),
    514  1.13  cegger 			    sizeof(XBUF) - strlen(XBUF),
    515  1.13  cegger 			    "%" PRIx64 " %08" PRIx64,
    516  1.19     jym 			    xpq_queue[i].ptr, xpq_queue[i].val);
    517   1.2  bouyer 		if (++i < xpq_idx)
    518  1.13  cegger 			snprintf(XBUF + strlen(XBUF),
    519  1.13  cegger 			    sizeof(XBUF) - strlen(XBUF),
    520  1.13  cegger 			    "%" PRIx64 " %08" PRIx64,
    521  1.19     jym 			    xpq_queue[i].ptr, xpq_queue[i].val);
    522   1.2  bouyer 		if (++i < xpq_idx)
    523  1.13  cegger 			snprintf(XBUF + strlen(XBUF),
    524  1.13  cegger 			    sizeof(XBUF) - strlen(XBUF),
    525  1.13  cegger 			    "%" PRIx64 " %08" PRIx64,
    526  1.19     jym 			    xpq_queue[i].ptr, xpq_queue[i].val);
    527   1.2  bouyer 		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
    528   1.2  bouyer 	}
    529   1.2  bouyer }
    530   1.2  bouyer #endif
    531   1.2  bouyer 
    532   1.2  bouyer 
    533   1.2  bouyer extern volatile struct xencons_interface *xencons_interface; /* XXX */
    534   1.2  bouyer extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
    535   1.2  bouyer 
    536   1.2  bouyer static void xen_bt_set_readonly (vaddr_t);
    537   1.2  bouyer static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
    538   1.2  bouyer 
    539   1.2  bouyer /* How many PDEs ? */
    540   1.2  bouyer #if L2_SLOT_KERNBASE > 0
    541   1.2  bouyer #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
    542   1.2  bouyer #else
    543   1.2  bouyer #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
    544   1.2  bouyer #endif
    545   1.2  bouyer 
    546   1.2  bouyer /*
    547   1.2  bouyer  * Construct and switch to new pagetables
    548   1.2  bouyer  * first_avail is the first vaddr we can use after
    549   1.2  bouyer  * we get rid of Xen pagetables
    550   1.2  bouyer  */
    551   1.2  bouyer 
    552   1.2  bouyer vaddr_t xen_pmap_bootstrap (void);
    553   1.2  bouyer 
    554   1.2  bouyer /*
    555   1.2  bouyer  * Function to get rid of Xen bootstrap tables
    556   1.2  bouyer  */
    557   1.2  bouyer 
    558   1.6  bouyer /* How many PDP do we need: */
    559   1.6  bouyer #ifdef PAE
    560   1.6  bouyer /*
    561   1.6  bouyer  * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
    562   1.6  bouyer  * all of them mapped by the L3 page. We also need a shadow page
    563   1.6  bouyer  * for L3[3].
    564   1.6  bouyer  */
    565   1.6  bouyer static const int l2_4_count = 6;
    566  1.36  cherry #elif defined(__x86_64__)
    567  1.36  cherry static const int l2_4_count = PTP_LEVELS;
    568   1.6  bouyer #else
    569   1.6  bouyer static const int l2_4_count = PTP_LEVELS - 1;
    570   1.6  bouyer #endif
    571   1.6  bouyer 
    572   1.2  bouyer vaddr_t
    573   1.8  cegger xen_pmap_bootstrap(void)
    574   1.2  bouyer {
    575   1.4  bouyer 	int count, oldcount;
    576   1.4  bouyer 	long mapsize;
    577   1.2  bouyer 	vaddr_t bootstrap_tables, init_tables;
    578   1.2  bouyer 
    579  1.35  cherry 	memset(xpq_idx_array, 0, sizeof xpq_idx_array);
    580  1.35  cherry 
    581   1.6  bouyer 	xpmap_phys_to_machine_mapping =
    582   1.6  bouyer 	    (unsigned long *)xen_start_info.mfn_list;
    583   1.2  bouyer 	init_tables = xen_start_info.pt_base;
    584   1.2  bouyer 	__PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
    585   1.2  bouyer 
    586   1.2  bouyer 	/* Space after Xen boostrap tables should be free */
    587   1.2  bouyer 	bootstrap_tables = xen_start_info.pt_base +
    588   1.2  bouyer 		(xen_start_info.nr_pt_frames * PAGE_SIZE);
    589   1.2  bouyer 
    590   1.4  bouyer 	/*
    591   1.4  bouyer 	 * Calculate how many space we need
    592   1.4  bouyer 	 * first everything mapped before the Xen bootstrap tables
    593   1.4  bouyer 	 */
    594   1.4  bouyer 	mapsize = init_tables - KERNTEXTOFF;
    595   1.4  bouyer 	/* after the tables we'll have:
    596   1.4  bouyer 	 *  - UAREA
    597   1.4  bouyer 	 *  - dummy user PGD (x86_64)
    598   1.4  bouyer 	 *  - HYPERVISOR_shared_info
    599  1.40  bouyer 	 *  - early_zerop
    600   1.4  bouyer 	 *  - ISA I/O mem (if needed)
    601   1.4  bouyer 	 */
    602   1.4  bouyer 	mapsize += UPAGES * NBPG;
    603   1.4  bouyer #ifdef __x86_64__
    604   1.4  bouyer 	mapsize += NBPG;
    605   1.4  bouyer #endif
    606   1.4  bouyer 	mapsize += NBPG;
    607  1.40  bouyer 	mapsize += NBPG;
    608   1.2  bouyer 
    609   1.2  bouyer #ifdef DOM0OPS
    610  1.10  cegger 	if (xendomain_is_dom0()) {
    611   1.2  bouyer 		/* space for ISA I/O mem */
    612   1.4  bouyer 		mapsize += IOM_SIZE;
    613   1.4  bouyer 	}
    614   1.4  bouyer #endif
    615   1.4  bouyer 	/* at this point mapsize doens't include the table size */
    616   1.4  bouyer 
    617   1.4  bouyer #ifdef __x86_64__
    618   1.4  bouyer 	count = TABLE_L2_ENTRIES;
    619   1.4  bouyer #else
    620   1.4  bouyer 	count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
    621   1.4  bouyer #endif /* __x86_64__ */
    622   1.4  bouyer 
    623   1.4  bouyer 	/* now compute how many L2 pages we need exactly */
    624   1.4  bouyer 	XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
    625   1.4  bouyer 	while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
    626   1.4  bouyer 	    ((long)count << L2_SHIFT) + KERNBASE) {
    627   1.4  bouyer 		count++;
    628   1.2  bouyer 	}
    629   1.4  bouyer #ifndef __x86_64__
    630   1.5  bouyer 	/*
    631   1.5  bouyer 	 * one more L2 page: we'll alocate several pages after kva_start
    632   1.5  bouyer 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
    633   1.5  bouyer 	 * counted here. It's not a big issue to allocate one more L2 as
    634   1.5  bouyer 	 * pmap_growkernel() will be called anyway.
    635   1.5  bouyer 	 */
    636   1.5  bouyer 	count++;
    637   1.4  bouyer 	nkptp[1] = count;
    638   1.2  bouyer #endif
    639   1.2  bouyer 
    640   1.4  bouyer 	/*
    641   1.4  bouyer 	 * install bootstrap pages. We may need more L2 pages than will
    642   1.4  bouyer 	 * have the final table here, as it's installed after the final table
    643   1.4  bouyer 	 */
    644   1.4  bouyer 	oldcount = count;
    645   1.4  bouyer 
    646   1.4  bouyer bootstrap_again:
    647   1.4  bouyer 	XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
    648   1.2  bouyer 	/*
    649   1.2  bouyer 	 * Xen space we'll reclaim may not be enough for our new page tables,
    650   1.2  bouyer 	 * move bootstrap tables if necessary
    651   1.2  bouyer 	 */
    652   1.4  bouyer 	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
    653   1.2  bouyer 		bootstrap_tables = init_tables +
    654   1.4  bouyer 					((count + l2_4_count) * PAGE_SIZE);
    655   1.4  bouyer 	/* make sure we have enough to map the bootstrap_tables */
    656   1.4  bouyer 	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
    657   1.4  bouyer 	    ((long)oldcount << L2_SHIFT) + KERNBASE) {
    658   1.4  bouyer 		oldcount++;
    659   1.4  bouyer 		goto bootstrap_again;
    660   1.4  bouyer 	}
    661   1.2  bouyer 
    662   1.2  bouyer 	/* Create temporary tables */
    663   1.2  bouyer 	xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
    664   1.4  bouyer 		xen_start_info.nr_pt_frames, oldcount, 0);
    665   1.2  bouyer 
    666   1.2  bouyer 	/* Create final tables */
    667   1.2  bouyer 	xen_bootstrap_tables(bootstrap_tables, init_tables,
    668   1.4  bouyer 	    oldcount + l2_4_count, count, 1);
    669   1.2  bouyer 
    670   1.4  bouyer 	/* zero out free space after tables */
    671   1.4  bouyer 	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
    672   1.4  bouyer 	    (UPAGES + 1) * NBPG);
    673  1.28   rmind 
    674  1.28   rmind 	/* Finally, flush TLB. */
    675  1.28   rmind 	xpq_queue_tlb_flush();
    676  1.28   rmind 
    677   1.4  bouyer 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
    678   1.2  bouyer }
    679   1.2  bouyer 
    680   1.2  bouyer /*
    681   1.2  bouyer  * Build a new table and switch to it
    682   1.2  bouyer  * old_count is # of old tables (including PGD, PDTPE and PDE)
    683   1.2  bouyer  * new_count is # of new tables (PTE only)
    684   1.2  bouyer  * we assume areas don't overlap
    685   1.2  bouyer  */
    686   1.2  bouyer static void
    687   1.2  bouyer xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
    688   1.2  bouyer 	int old_count, int new_count, int final)
    689   1.2  bouyer {
    690   1.2  bouyer 	pd_entry_t *pdtpe, *pde, *pte;
    691   1.2  bouyer 	pd_entry_t *cur_pgd, *bt_pgd;
    692   1.6  bouyer 	paddr_t addr;
    693   1.6  bouyer 	vaddr_t page, avail, text_end, map_end;
    694   1.2  bouyer 	int i;
    695   1.2  bouyer 	extern char __data_start;
    696  1.40  bouyer 	extern char *early_zerop; /* from pmap.c */
    697   1.2  bouyer 
    698  1.19     jym 	__PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR ","
    699  1.19     jym 	    " %d, %d)\n",
    700   1.2  bouyer 	    old_pgd, new_pgd, old_count, new_count));
    701   1.2  bouyer 	text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
    702   1.2  bouyer 	/*
    703   1.2  bouyer 	 * size of R/W area after kernel text:
    704   1.2  bouyer 	 *  xencons_interface (if present)
    705   1.2  bouyer 	 *  xenstore_interface (if present)
    706   1.6  bouyer 	 *  table pages (new_count + l2_4_count entries)
    707   1.2  bouyer 	 * extra mappings (only when final is true):
    708   1.4  bouyer 	 *  UAREA
    709   1.4  bouyer 	 *  dummy user PGD (x86_64 only)/gdt page (i386 only)
    710   1.2  bouyer 	 *  HYPERVISOR_shared_info
    711  1.40  bouyer 	 *  early_zerop
    712   1.2  bouyer 	 *  ISA I/O mem (if needed)
    713   1.2  bouyer 	 */
    714   1.6  bouyer 	map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
    715   1.2  bouyer 	if (final) {
    716   1.4  bouyer 		map_end += (UPAGES + 1) * NBPG;
    717   1.4  bouyer 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
    718   1.2  bouyer 		map_end += NBPG;
    719  1.40  bouyer 		early_zerop = (char *)map_end;
    720  1.40  bouyer 		map_end += NBPG;
    721   1.2  bouyer 	}
    722   1.4  bouyer 	/*
    723   1.4  bouyer 	 * we always set atdevbase, as it's used by init386 to find the first
    724   1.4  bouyer 	 * available VA. map_end is updated only if we are dom0, so
    725   1.4  bouyer 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
    726   1.4  bouyer 	 * this case.
    727   1.4  bouyer 	 */
    728   1.4  bouyer 	if (final)
    729   1.4  bouyer 		atdevbase = map_end;
    730   1.2  bouyer #ifdef DOM0OPS
    731  1.10  cegger 	if (final && xendomain_is_dom0()) {
    732   1.2  bouyer 		/* ISA I/O mem */
    733   1.2  bouyer 		map_end += IOM_SIZE;
    734   1.2  bouyer 	}
    735   1.2  bouyer #endif /* DOM0OPS */
    736   1.2  bouyer 
    737   1.2  bouyer 	__PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
    738   1.2  bouyer 	    text_end, map_end));
    739  1.19     jym 	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
    740  1.19     jym 	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
    741   1.2  bouyer 
    742   1.2  bouyer 	/*
    743   1.2  bouyer 	 * Create bootstrap page tables
    744   1.2  bouyer 	 * What we need:
    745   1.2  bouyer 	 * - a PGD (level 4)
    746   1.2  bouyer 	 * - a PDTPE (level 3)
    747   1.2  bouyer 	 * - a PDE (level2)
    748   1.2  bouyer 	 * - some PTEs (level 1)
    749   1.2  bouyer 	 */
    750   1.2  bouyer 
    751   1.2  bouyer 	cur_pgd = (pd_entry_t *) old_pgd;
    752   1.2  bouyer 	bt_pgd = (pd_entry_t *) new_pgd;
    753   1.2  bouyer 	memset (bt_pgd, 0, PAGE_SIZE);
    754   1.2  bouyer 	avail = new_pgd + PAGE_SIZE;
    755   1.4  bouyer #if PTP_LEVELS > 3
    756  1.36  cherry 	/* per-cpu L4 PD */
    757  1.36  cherry 	pd_entry_t *bt_cpu_pgd = bt_pgd;
    758  1.36  cherry 	/* pmap_kernel() "shadow" L4 PD */
    759  1.36  cherry 	bt_pgd = (pd_entry_t *) avail;
    760  1.36  cherry 	memset(bt_pgd, 0, PAGE_SIZE);
    761  1.36  cherry 	avail += PAGE_SIZE;
    762  1.36  cherry 
    763   1.2  bouyer 	/* Install level 3 */
    764   1.2  bouyer 	pdtpe = (pd_entry_t *) avail;
    765   1.2  bouyer 	memset (pdtpe, 0, PAGE_SIZE);
    766   1.2  bouyer 	avail += PAGE_SIZE;
    767   1.2  bouyer 
    768   1.6  bouyer 	addr = ((u_long) pdtpe) - KERNBASE;
    769  1.36  cherry 	bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] =
    770   1.4  bouyer 	    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    771   1.2  bouyer 
    772  1.19     jym 	__PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    773  1.19     jym 	    " -> L4[%#x]\n",
    774  1.19     jym 	    pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF)));
    775   1.4  bouyer #else
    776   1.4  bouyer 	pdtpe = bt_pgd;
    777   1.4  bouyer #endif /* PTP_LEVELS > 3 */
    778   1.2  bouyer 
    779   1.4  bouyer #if PTP_LEVELS > 2
    780   1.2  bouyer 	/* Level 2 */
    781   1.2  bouyer 	pde = (pd_entry_t *) avail;
    782   1.2  bouyer 	memset(pde, 0, PAGE_SIZE);
    783   1.2  bouyer 	avail += PAGE_SIZE;
    784   1.2  bouyer 
    785   1.6  bouyer 	addr = ((u_long) pde) - KERNBASE;
    786   1.2  bouyer 	pdtpe[pl3_pi(KERNTEXTOFF)] =
    787   1.6  bouyer 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
    788  1.19     jym 	__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    789  1.19     jym 	    " -> L3[%#x]\n",
    790  1.19     jym 	    pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF)));
    791   1.6  bouyer #elif defined(PAE)
    792   1.6  bouyer 	/* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
    793   1.6  bouyer 	pde = (pd_entry_t *) avail;
    794   1.6  bouyer 	memset(pde, 0, PAGE_SIZE * 5);
    795   1.6  bouyer 	avail += PAGE_SIZE * 5;
    796   1.6  bouyer 	addr = ((u_long) pde) - KERNBASE;
    797   1.6  bouyer 	/*
    798   1.6  bouyer 	 * enter L2 pages in the L3.
    799   1.6  bouyer 	 * The real L2 kernel PD will be the last one (so that
    800   1.6  bouyer 	 * pde[L2_SLOT_KERN] always point to the shadow).
    801   1.6  bouyer 	 */
    802   1.6  bouyer 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    803   1.6  bouyer 		/*
    804  1.25     jym 		 * Xen doesn't want R/W mappings in L3 entries, it'll add it
    805   1.6  bouyer 		 * itself.
    806   1.6  bouyer 		 */
    807   1.6  bouyer 		pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    808  1.19     jym 		__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    809  1.19     jym 		    " -> L3[%#x]\n",
    810  1.19     jym 		    (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i));
    811   1.6  bouyer 	}
    812   1.6  bouyer 	addr += PAGE_SIZE;
    813   1.6  bouyer 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    814  1.19     jym 	__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    815  1.19     jym 	    " -> L3[%#x]\n",
    816  1.19     jym 	    (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3));
    817   1.6  bouyer 
    818   1.6  bouyer #else /* PAE */
    819   1.4  bouyer 	pde = bt_pgd;
    820   1.6  bouyer #endif /* PTP_LEVELS > 2 */
    821   1.2  bouyer 
    822   1.2  bouyer 	/* Level 1 */
    823   1.2  bouyer 	page = KERNTEXTOFF;
    824   1.2  bouyer 	for (i = 0; i < new_count; i ++) {
    825   1.6  bouyer 		vaddr_t cur_page = page;
    826   1.2  bouyer 
    827   1.2  bouyer 		pte = (pd_entry_t *) avail;
    828   1.2  bouyer 		avail += PAGE_SIZE;
    829   1.2  bouyer 
    830   1.2  bouyer 		memset(pte, 0, PAGE_SIZE);
    831   1.2  bouyer 		while (pl2_pi(page) == pl2_pi (cur_page)) {
    832   1.2  bouyer 			if (page >= map_end) {
    833   1.2  bouyer 				/* not mapped at all */
    834   1.2  bouyer 				pte[pl1_pi(page)] = 0;
    835   1.2  bouyer 				page += PAGE_SIZE;
    836   1.2  bouyer 				continue;
    837   1.2  bouyer 			}
    838   1.2  bouyer 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
    839   1.2  bouyer 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
    840   1.2  bouyer 				pte[pl1_pi(page)] = xen_start_info.shared_info;
    841   1.2  bouyer 				__PRINTK(("HYPERVISOR_shared_info "
    842  1.19     jym 				    "va %#lx pte %#" PRIxPADDR "\n",
    843  1.19     jym 				    HYPERVISOR_shared_info, pte[pl1_pi(page)]));
    844   1.2  bouyer 			}
    845   1.7  bouyer 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    846  1.12  cegger 			    == xen_start_info.console.domU.mfn) {
    847   1.2  bouyer 				xencons_interface = (void *)page;
    848  1.19     jym 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
    849   1.6  bouyer 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    850   1.2  bouyer 				__PRINTK(("xencons_interface "
    851  1.19     jym 				    "va %#lx pte %#" PRIxPADDR "\n",
    852  1.19     jym 				    xencons_interface, pte[pl1_pi(page)]));
    853   1.2  bouyer 			}
    854   1.7  bouyer 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    855   1.7  bouyer 			    == xen_start_info.store_mfn) {
    856   1.2  bouyer 				xenstore_interface = (void *)page;
    857   1.6  bouyer 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
    858   1.6  bouyer 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    859   1.2  bouyer 				__PRINTK(("xenstore_interface "
    860  1.19     jym 				    "va %#lx pte %#" PRIxPADDR "\n",
    861  1.19     jym 				    xenstore_interface, pte[pl1_pi(page)]));
    862   1.2  bouyer 			}
    863   1.2  bouyer #ifdef DOM0OPS
    864   1.2  bouyer 			if (page >= (vaddr_t)atdevbase &&
    865   1.2  bouyer 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
    866   1.2  bouyer 				pte[pl1_pi(page)] =
    867   1.2  bouyer 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
    868   1.2  bouyer 			}
    869   1.2  bouyer #endif
    870   1.4  bouyer 			pte[pl1_pi(page)] |= PG_k | PG_V;
    871   1.2  bouyer 			if (page < text_end) {
    872   1.2  bouyer 				/* map kernel text RO */
    873   1.2  bouyer 				pte[pl1_pi(page)] |= 0;
    874   1.2  bouyer 			} else if (page >= old_pgd
    875   1.2  bouyer 			    && page < old_pgd + (old_count * PAGE_SIZE)) {
    876   1.2  bouyer 				/* map old page tables RO */
    877   1.2  bouyer 				pte[pl1_pi(page)] |= 0;
    878   1.2  bouyer 			} else if (page >= new_pgd &&
    879   1.6  bouyer 			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
    880   1.2  bouyer 				/* map new page tables RO */
    881   1.2  bouyer 				pte[pl1_pi(page)] |= 0;
    882  1.41  cherry #ifdef i386
    883  1.41  cherry 			} else if (page == (vaddr_t)tmpgdt) {
    884  1.41  cherry 				/*
    885  1.41  cherry 				 * Map bootstrap gdt R/O. Later, we
    886  1.41  cherry 				 * will re-add this to page to uvm
    887  1.41  cherry 				 * after making it writable.
    888  1.41  cherry 				 */
    889  1.41  cherry 
    890  1.41  cherry 				pte[pl1_pi(page)] = 0;
    891  1.41  cherry 				page += PAGE_SIZE;
    892  1.41  cherry 				continue;
    893  1.41  cherry #endif /* i386 */
    894   1.2  bouyer 			} else {
    895   1.2  bouyer 				/* map page RW */
    896   1.2  bouyer 				pte[pl1_pi(page)] |= PG_RW;
    897   1.2  bouyer 			}
    898   1.6  bouyer 
    899   1.9    tron 			if ((page  >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
    900   1.9    tron 			    || page >= new_pgd) {
    901  1.19     jym 				__PRINTK(("va %#lx pa %#lx "
    902  1.19     jym 				    "entry 0x%" PRIxPADDR " -> L1[%#x]\n",
    903   1.2  bouyer 				    page, page - KERNBASE,
    904  1.19     jym 				    pte[pl1_pi(page)], pl1_pi(page)));
    905   1.9    tron 			}
    906   1.2  bouyer 			page += PAGE_SIZE;
    907   1.2  bouyer 		}
    908   1.2  bouyer 
    909   1.6  bouyer 		addr = ((u_long) pte) - KERNBASE;
    910   1.2  bouyer 		pde[pl2_pi(cur_page)] =
    911   1.4  bouyer 		    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    912  1.19     jym 		__PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
    913  1.19     jym 		    " -> L2[%#x]\n",
    914  1.19     jym 		    pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
    915   1.2  bouyer 		/* Mark readonly */
    916   1.2  bouyer 		xen_bt_set_readonly((vaddr_t) pte);
    917   1.2  bouyer 	}
    918   1.2  bouyer 
    919   1.2  bouyer 	/* Install recursive page tables mapping */
    920   1.6  bouyer #ifdef PAE
    921   1.6  bouyer 	/*
    922   1.6  bouyer 	 * we need a shadow page for the kernel's L2 page
    923   1.6  bouyer 	 * The real L2 kernel PD will be the last one (so that
    924   1.6  bouyer 	 * pde[L2_SLOT_KERN] always point to the shadow.
    925   1.6  bouyer 	 */
    926   1.6  bouyer 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
    927  1.36  cherry 	cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG];
    928  1.36  cherry 	cpu_info_primary.ci_kpm_pdirpa =
    929  1.36  cherry 	    (vaddr_t) cpu_info_primary.ci_kpm_pdir - KERNBASE;
    930   1.6  bouyer 
    931   1.6  bouyer 	/*
    932   1.6  bouyer 	 * We don't enter a recursive entry from the L3 PD. Instead,
    933   1.6  bouyer 	 * we enter the first 4 L2 pages, which includes the kernel's L2
    934   1.6  bouyer 	 * shadow. But we have to entrer the shadow after switching
    935   1.6  bouyer 	 * %cr3, or Xen will refcount some PTE with the wrong type.
    936   1.6  bouyer 	 */
    937   1.6  bouyer 	addr = (u_long)pde - KERNBASE;
    938   1.6  bouyer 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    939   1.6  bouyer 		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    940  1.19     jym 		__PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR
    941  1.19     jym 		    " entry %#" PRIxPADDR "\n",
    942  1.19     jym 		    (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i,
    943  1.19     jym 		    addr, pde[PDIR_SLOT_PTE + i]));
    944   1.6  bouyer 	}
    945   1.6  bouyer #if 0
    946   1.6  bouyer 	addr += PAGE_SIZE; /* point to shadow L2 */
    947   1.6  bouyer 	pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    948   1.6  bouyer 	__PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
    949   1.6  bouyer 	    (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
    950   1.6  bouyer 	    (int64_t)pde[PDIR_SLOT_PTE + 3]));
    951   1.6  bouyer #endif
    952  1.14     jym 	/* Mark tables RO, and pin the kernel's shadow as L2 */
    953   1.6  bouyer 	addr = (u_long)pde - KERNBASE;
    954   1.6  bouyer 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
    955   1.6  bouyer 		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
    956   1.6  bouyer 		if (i == 2 || i == 3)
    957   1.6  bouyer 			continue;
    958   1.6  bouyer #if 0
    959   1.6  bouyer 		__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
    960  1.24     jym 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    961   1.6  bouyer #endif
    962   1.6  bouyer 	}
    963   1.6  bouyer 	if (final) {
    964   1.6  bouyer 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
    965  1.19     jym 		__PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr));
    966  1.24     jym 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    967   1.6  bouyer 	}
    968   1.6  bouyer #if 0
    969   1.6  bouyer 	addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
    970   1.6  bouyer 	__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
    971  1.24     jym 	xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
    972   1.6  bouyer #endif
    973   1.6  bouyer #else /* PAE */
    974  1.36  cherry 	/* recursive entry in higher-level per-cpu PD and pmap_kernel() */
    975  1.36  cherry 	bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE) | PG_k | PG_V;
    976  1.36  cherry #ifdef __x86_64__
    977  1.36  cherry 	   bt_cpu_pgd[PDIR_SLOT_PTE] =
    978  1.36  cherry 		   xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE) | PG_k | PG_V;
    979  1.36  cherry #endif /* __x86_64__ */
    980  1.19     jym 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR
    981  1.19     jym 	    " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE,
    982  1.19     jym 	    bt_pgd[PDIR_SLOT_PTE]));
    983   1.2  bouyer 	/* Mark tables RO */
    984   1.2  bouyer 	xen_bt_set_readonly((vaddr_t) pde);
    985   1.6  bouyer #endif
    986   1.6  bouyer #if PTP_LEVELS > 2 || defined(PAE)
    987   1.2  bouyer 	xen_bt_set_readonly((vaddr_t) pdtpe);
    988   1.4  bouyer #endif
    989   1.4  bouyer #if PTP_LEVELS > 3
    990   1.2  bouyer 	xen_bt_set_readonly(new_pgd);
    991   1.4  bouyer #endif
    992   1.2  bouyer 	/* Pin the PGD */
    993  1.26     jym 	__PRINTK(("pin PGD: %"PRIxVADDR"\n", new_pgd - KERNBASE));
    994  1.24     jym #ifdef __x86_64__
    995  1.24     jym 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    996  1.24     jym #elif PAE
    997   1.6  bouyer 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    998   1.6  bouyer #else
    999  1.24     jym 	xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
   1000   1.6  bouyer #endif
   1001  1.21     jym 
   1002   1.4  bouyer 	/* Save phys. addr of PDP, for libkvm. */
   1003   1.6  bouyer #ifdef PAE
   1004  1.21     jym 	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
   1005  1.21     jym #else
   1006  1.36  cherry 	PDPpaddr = (u_long)bt_pgd - KERNBASE;
   1007  1.21     jym #endif
   1008  1.21     jym 
   1009   1.2  bouyer 	/* Switch to new tables */
   1010  1.14     jym 	__PRINTK(("switch to PGD\n"));
   1011   1.2  bouyer 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
   1012  1.19     jym 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n",
   1013  1.19     jym 	    bt_pgd[PDIR_SLOT_PTE]));
   1014  1.21     jym 
   1015   1.6  bouyer #ifdef PAE
   1016   1.6  bouyer 	if (final) {
   1017  1.21     jym 		/* save the address of the L3 page */
   1018  1.21     jym 		cpu_info_primary.ci_pae_l3_pdir = pdtpe;
   1019  1.21     jym 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
   1020  1.21     jym 
   1021   1.6  bouyer 		/* now enter kernel's PTE mappings */
   1022   1.6  bouyer 		addr =  (u_long)pde - KERNBASE + PAGE_SIZE * 3;
   1023   1.6  bouyer 		xpq_queue_pte_update(
   1024   1.6  bouyer 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
   1025   1.6  bouyer 		    xpmap_ptom_masked(addr) | PG_k | PG_V);
   1026   1.6  bouyer 		xpq_flush_queue();
   1027   1.6  bouyer 	}
   1028  1.36  cherry #elif defined(__x86_64__)
   1029  1.36  cherry 	if (final) {
   1030  1.36  cherry 		/* save the address of the real per-cpu L4 pgd page */
   1031  1.36  cherry 		cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd;
   1032  1.36  cherry 		cpu_info_primary.ci_kpm_pdirpa = ((paddr_t) bt_cpu_pgd - KERNBASE);
   1033  1.36  cherry 	}
   1034   1.6  bouyer #endif
   1035   1.6  bouyer 
   1036   1.2  bouyer 	/* Now we can safely reclaim space taken by old tables */
   1037   1.2  bouyer 
   1038  1.14     jym 	__PRINTK(("unpin old PGD\n"));
   1039   1.2  bouyer 	/* Unpin old PGD */
   1040   1.2  bouyer 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
   1041   1.2  bouyer 	/* Mark old tables RW */
   1042   1.2  bouyer 	page = old_pgd;
   1043   1.2  bouyer 	addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
   1044   1.2  bouyer 	addr = xpmap_mtop(addr);
   1045   1.6  bouyer 	pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
   1046   1.2  bouyer 	pte += pl1_pi(page);
   1047  1.19     jym 	__PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n",
   1048  1.19     jym 	    pde[pl2_pi(page)], addr, (long)pte));
   1049   1.2  bouyer 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
   1050   1.6  bouyer 		addr = xpmap_ptom(((u_long) pte) - KERNBASE);
   1051  1.19     jym 		XENPRINTK(("addr %#" PRIxPADDR " pte %#lx "
   1052  1.19     jym 		   "*pte %#" PRIxPADDR "\n",
   1053  1.19     jym 		   addr, (long)pte, *pte));
   1054   1.6  bouyer 		xpq_queue_pte_update(addr, *pte | PG_RW);
   1055   1.2  bouyer 		page += PAGE_SIZE;
   1056   1.2  bouyer 		/*
   1057   1.2  bouyer 		 * Our ptes are contiguous
   1058   1.2  bouyer 		 * so it's safe to just "++" here
   1059   1.2  bouyer 		 */
   1060   1.2  bouyer 		pte++;
   1061   1.2  bouyer 	}
   1062   1.2  bouyer 	xpq_flush_queue();
   1063   1.2  bouyer }
   1064   1.2  bouyer 
   1065   1.2  bouyer 
   1066   1.2  bouyer /*
   1067   1.2  bouyer  * Bootstrap helper functions
   1068   1.2  bouyer  */
   1069   1.2  bouyer 
   1070   1.2  bouyer /*
   1071   1.2  bouyer  * Mark a page readonly
   1072   1.2  bouyer  * XXX: assuming vaddr = paddr + KERNBASE
   1073   1.2  bouyer  */
   1074   1.2  bouyer 
   1075   1.2  bouyer static void
   1076   1.2  bouyer xen_bt_set_readonly (vaddr_t page)
   1077   1.2  bouyer {
   1078   1.2  bouyer 	pt_entry_t entry;
   1079   1.2  bouyer 
   1080   1.2  bouyer 	entry = xpmap_ptom_masked(page - KERNBASE);
   1081   1.4  bouyer 	entry |= PG_k | PG_V;
   1082   1.2  bouyer 
   1083   1.2  bouyer 	HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
   1084   1.2  bouyer }
   1085   1.4  bouyer 
   1086   1.4  bouyer #ifdef __x86_64__
   1087   1.4  bouyer void
   1088   1.4  bouyer xen_set_user_pgd(paddr_t page)
   1089   1.4  bouyer {
   1090   1.4  bouyer 	struct mmuext_op op;
   1091   1.4  bouyer 	int s = splvm();
   1092   1.4  bouyer 
   1093   1.4  bouyer 	xpq_flush_queue();
   1094   1.4  bouyer 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
   1095  1.34     jym 	op.arg1.mfn = pfn_to_mfn(page >> PAGE_SHIFT);
   1096   1.4  bouyer         if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
   1097   1.4  bouyer 		panic("xen_set_user_pgd: failed to install new user page"
   1098  1.19     jym 			" directory %#" PRIxPADDR, page);
   1099   1.4  bouyer 	splx(s);
   1100   1.4  bouyer }
   1101   1.4  bouyer #endif /* __x86_64__ */
   1102