Home | History | Annotate | Line # | Download | only in x86
x86_xpmap.c revision 1.3.8.5
      1  1.3.8.5  yamt /*	$NetBSD: x86_xpmap.c,v 1.3.8.5 2008/02/27 08:36:29 yamt Exp $	*/
      2  1.3.8.2  yamt 
      3  1.3.8.2  yamt /*
      4  1.3.8.2  yamt  * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
      5  1.3.8.2  yamt  *
      6  1.3.8.2  yamt  * Permission to use, copy, modify, and distribute this software for any
      7  1.3.8.2  yamt  * purpose with or without fee is hereby granted, provided that the above
      8  1.3.8.2  yamt  * copyright notice and this permission notice appear in all copies.
      9  1.3.8.2  yamt  *
     10  1.3.8.2  yamt  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     11  1.3.8.2  yamt  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     12  1.3.8.2  yamt  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     13  1.3.8.2  yamt  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     14  1.3.8.2  yamt  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     15  1.3.8.2  yamt  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     16  1.3.8.2  yamt  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     17  1.3.8.2  yamt  */
     18  1.3.8.2  yamt 
     19  1.3.8.2  yamt /*
     20  1.3.8.2  yamt  * Copyright (c) 2006, 2007 Manuel Bouyer.
     21  1.3.8.2  yamt  *
     22  1.3.8.2  yamt  * Redistribution and use in source and binary forms, with or without
     23  1.3.8.2  yamt  * modification, are permitted provided that the following conditions
     24  1.3.8.2  yamt  * are met:
     25  1.3.8.2  yamt  * 1. Redistributions of source code must retain the above copyright
     26  1.3.8.2  yamt  *    notice, this list of conditions and the following disclaimer.
     27  1.3.8.2  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     28  1.3.8.2  yamt  *    notice, this list of conditions and the following disclaimer in the
     29  1.3.8.2  yamt  *    documentation and/or other materials provided with the distribution.
     30  1.3.8.2  yamt  * 3. All advertising materials mentioning features or use of this software
     31  1.3.8.2  yamt  *    must display the following acknowledgement:
     32  1.3.8.2  yamt  *	This product includes software developed by Manuel Bouyer.
     33  1.3.8.2  yamt  * 4. The name of the author may not be used to endorse or promote products
     34  1.3.8.2  yamt  *    derived from this software without specific prior written permission.
     35  1.3.8.2  yamt  *
     36  1.3.8.2  yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     37  1.3.8.2  yamt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     38  1.3.8.2  yamt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     39  1.3.8.2  yamt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     40  1.3.8.2  yamt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     41  1.3.8.2  yamt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     42  1.3.8.2  yamt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     43  1.3.8.2  yamt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     44  1.3.8.2  yamt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     45  1.3.8.2  yamt  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     46  1.3.8.2  yamt  *
     47  1.3.8.2  yamt  */
     48  1.3.8.2  yamt 
     49  1.3.8.2  yamt /*
     50  1.3.8.2  yamt  *
     51  1.3.8.2  yamt  * Copyright (c) 2004 Christian Limpach.
     52  1.3.8.2  yamt  * All rights reserved.
     53  1.3.8.2  yamt  *
     54  1.3.8.2  yamt  * Redistribution and use in source and binary forms, with or without
     55  1.3.8.2  yamt  * modification, are permitted provided that the following conditions
     56  1.3.8.2  yamt  * are met:
     57  1.3.8.2  yamt  * 1. Redistributions of source code must retain the above copyright
     58  1.3.8.2  yamt  *    notice, this list of conditions and the following disclaimer.
     59  1.3.8.2  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     60  1.3.8.2  yamt  *    notice, this list of conditions and the following disclaimer in the
     61  1.3.8.2  yamt  *    documentation and/or other materials provided with the distribution.
     62  1.3.8.2  yamt  * 3. All advertising materials mentioning features or use of this software
     63  1.3.8.2  yamt  *    must display the following acknowledgement:
     64  1.3.8.2  yamt  *      This product includes software developed by Christian Limpach.
     65  1.3.8.2  yamt  * 4. The name of the author may not be used to endorse or promote products
     66  1.3.8.2  yamt  *    derived from this software without specific prior written permission.
     67  1.3.8.2  yamt  *
     68  1.3.8.2  yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     69  1.3.8.2  yamt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     70  1.3.8.2  yamt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     71  1.3.8.2  yamt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     72  1.3.8.2  yamt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     73  1.3.8.2  yamt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     74  1.3.8.2  yamt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     75  1.3.8.2  yamt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     76  1.3.8.2  yamt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     77  1.3.8.2  yamt  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     78  1.3.8.2  yamt  */
     79  1.3.8.2  yamt 
     80  1.3.8.2  yamt 
     81  1.3.8.2  yamt #include <sys/cdefs.h>
     82  1.3.8.5  yamt __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.3.8.5 2008/02/27 08:36:29 yamt Exp $");
     83  1.3.8.2  yamt 
     84  1.3.8.2  yamt #include "opt_xen.h"
     85  1.3.8.3  yamt #include "opt_ddb.h"
     86  1.3.8.3  yamt #include "ksyms.h"
     87  1.3.8.2  yamt 
     88  1.3.8.2  yamt #include <sys/param.h>
     89  1.3.8.2  yamt #include <sys/systm.h>
     90  1.3.8.2  yamt 
     91  1.3.8.2  yamt #include <uvm/uvm.h>
     92  1.3.8.2  yamt 
     93  1.3.8.2  yamt #include <machine/pmap.h>
     94  1.3.8.2  yamt #include <machine/gdt.h>
     95  1.3.8.2  yamt #include <xen/xenfunc.h>
     96  1.3.8.2  yamt 
     97  1.3.8.2  yamt #include <dev/isa/isareg.h>
     98  1.3.8.2  yamt #include <machine/isa_machdep.h>
     99  1.3.8.2  yamt 
    100  1.3.8.2  yamt #undef	XENDEBUG
    101  1.3.8.2  yamt /* #define XENDEBUG_SYNC */
    102  1.3.8.2  yamt /* #define	XENDEBUG_LOW */
    103  1.3.8.2  yamt 
    104  1.3.8.2  yamt #ifdef XENDEBUG
    105  1.3.8.2  yamt #define	XENPRINTF(x) printf x
    106  1.3.8.2  yamt #define	XENPRINTK(x) printk x
    107  1.3.8.2  yamt #define	XENPRINTK2(x) /* printk x */
    108  1.3.8.2  yamt 
    109  1.3.8.2  yamt static char XBUF[256];
    110  1.3.8.2  yamt #else
    111  1.3.8.2  yamt #define	XENPRINTF(x)
    112  1.3.8.2  yamt #define	XENPRINTK(x)
    113  1.3.8.2  yamt #define	XENPRINTK2(x)
    114  1.3.8.2  yamt #endif
    115  1.3.8.2  yamt #define	PRINTF(x) printf x
    116  1.3.8.2  yamt #define	PRINTK(x) printk x
    117  1.3.8.2  yamt 
    118  1.3.8.3  yamt /* on x86_64 kernel runs in ring 3 */
    119  1.3.8.3  yamt #ifdef __x86_64__
    120  1.3.8.3  yamt #define PG_k PG_u
    121  1.3.8.3  yamt #else
    122  1.3.8.3  yamt #define PG_k 0
    123  1.3.8.3  yamt #endif
    124  1.3.8.3  yamt 
    125  1.3.8.2  yamt volatile shared_info_t *HYPERVISOR_shared_info;
    126  1.3.8.2  yamt union start_info_union start_info_union;
    127  1.3.8.4  yamt unsigned long *xpmap_phys_to_machine_mapping;
    128  1.3.8.2  yamt 
    129  1.3.8.2  yamt void xen_failsafe_handler(void);
    130  1.3.8.2  yamt 
    131  1.3.8.2  yamt #ifdef XEN3
    132  1.3.8.2  yamt #define HYPERVISOR_mmu_update_self(req, count, success_count) \
    133  1.3.8.2  yamt 	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
    134  1.3.8.2  yamt #else
    135  1.3.8.2  yamt #define HYPERVISOR_mmu_update_self(req, count, success_count) \
    136  1.3.8.2  yamt 	HYPERVISOR_mmu_update((req), (count), (success_count))
    137  1.3.8.2  yamt #endif
    138  1.3.8.2  yamt 
    139  1.3.8.2  yamt void
    140  1.3.8.2  yamt xen_failsafe_handler(void)
    141  1.3.8.2  yamt {
    142  1.3.8.2  yamt 
    143  1.3.8.2  yamt 	panic("xen_failsafe_handler called!\n");
    144  1.3.8.2  yamt }
    145  1.3.8.2  yamt 
    146  1.3.8.2  yamt 
    147  1.3.8.2  yamt void
    148  1.3.8.2  yamt xen_set_ldt(vaddr_t base, uint32_t entries)
    149  1.3.8.2  yamt {
    150  1.3.8.2  yamt 	vaddr_t va;
    151  1.3.8.2  yamt 	vaddr_t end;
    152  1.3.8.3  yamt 	pt_entry_t *ptp;
    153  1.3.8.2  yamt 	int s;
    154  1.3.8.2  yamt 
    155  1.3.8.2  yamt #ifdef __x86_64__
    156  1.3.8.2  yamt 	end = base + (entries << 3);
    157  1.3.8.2  yamt #else
    158  1.3.8.2  yamt 	end = base + entries * sizeof(union descriptor);
    159  1.3.8.2  yamt #endif
    160  1.3.8.2  yamt 
    161  1.3.8.2  yamt 	for (va = base; va < end; va += PAGE_SIZE) {
    162  1.3.8.2  yamt 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    163  1.3.8.2  yamt 		ptp = kvtopte(va);
    164  1.3.8.3  yamt 		XENPRINTF(("xen_set_ldt %p %d %p\n", (void *)base,
    165  1.3.8.3  yamt 			      entries, ptp));
    166  1.3.8.3  yamt 		pmap_pte_clearbits(ptp, PG_RW);
    167  1.3.8.2  yamt 	}
    168  1.3.8.2  yamt 	s = splvm();
    169  1.3.8.2  yamt 	xpq_queue_set_ldt(base, entries);
    170  1.3.8.2  yamt 	xpq_flush_queue();
    171  1.3.8.2  yamt 	splx(s);
    172  1.3.8.2  yamt }
    173  1.3.8.2  yamt 
    174  1.3.8.2  yamt #ifdef XENDEBUG
    175  1.3.8.2  yamt void xpq_debug_dump(void);
    176  1.3.8.2  yamt #endif
    177  1.3.8.2  yamt 
    178  1.3.8.2  yamt #define XPQUEUE_SIZE 2048
    179  1.3.8.2  yamt static mmu_update_t xpq_queue[XPQUEUE_SIZE];
    180  1.3.8.2  yamt static int xpq_idx = 0;
    181  1.3.8.2  yamt 
    182  1.3.8.2  yamt void
    183  1.3.8.2  yamt xpq_flush_queue()
    184  1.3.8.2  yamt {
    185  1.3.8.2  yamt 	int i, ok;
    186  1.3.8.2  yamt 
    187  1.3.8.2  yamt 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
    188  1.3.8.2  yamt 	for (i = 0; i < xpq_idx; i++)
    189  1.3.8.4  yamt 		XENPRINTK2(("%d: %p %08" PRIx64 "\n", i,
    190  1.3.8.4  yamt 		    (u_int64_t)xpq_queue[i].ptr, (u_int64_t)xpq_queue[i].val));
    191  1.3.8.2  yamt 	if (xpq_idx != 0 &&
    192  1.3.8.2  yamt 	    HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok) < 0) {
    193  1.3.8.2  yamt 		printf("xpq_flush_queue: %d entries \n", xpq_idx);
    194  1.3.8.2  yamt 		for (i = 0; i < xpq_idx; i++)
    195  1.3.8.2  yamt 			printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
    196  1.3.8.2  yamt 			   (u_int64_t)xpq_queue[i].ptr,
    197  1.3.8.2  yamt 			   (u_int64_t)xpq_queue[i].val);
    198  1.3.8.2  yamt 		panic("HYPERVISOR_mmu_update failed\n");
    199  1.3.8.2  yamt 	}
    200  1.3.8.2  yamt 	xpq_idx = 0;
    201  1.3.8.2  yamt }
    202  1.3.8.2  yamt 
    203  1.3.8.2  yamt static inline void
    204  1.3.8.2  yamt xpq_increment_idx(void)
    205  1.3.8.2  yamt {
    206  1.3.8.2  yamt 
    207  1.3.8.2  yamt 	xpq_idx++;
    208  1.3.8.2  yamt 	if (__predict_false(xpq_idx == XPQUEUE_SIZE))
    209  1.3.8.2  yamt 		xpq_flush_queue();
    210  1.3.8.2  yamt }
    211  1.3.8.2  yamt 
    212  1.3.8.2  yamt void
    213  1.3.8.2  yamt xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
    214  1.3.8.2  yamt {
    215  1.3.8.4  yamt 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
    216  1.3.8.4  yamt 	    "\n", (int64_t)ma, (int64_t)pa));
    217  1.3.8.2  yamt 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
    218  1.3.8.2  yamt 	xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
    219  1.3.8.2  yamt 	xpq_increment_idx();
    220  1.3.8.2  yamt #ifdef XENDEBUG_SYNC
    221  1.3.8.2  yamt 	xpq_flush_queue();
    222  1.3.8.2  yamt #endif
    223  1.3.8.2  yamt }
    224  1.3.8.2  yamt 
    225  1.3.8.2  yamt void
    226  1.3.8.4  yamt xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
    227  1.3.8.2  yamt {
    228  1.3.8.2  yamt 
    229  1.3.8.4  yamt 	KASSERT((ptr & 3) == 0);
    230  1.3.8.2  yamt 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
    231  1.3.8.2  yamt 	xpq_queue[xpq_idx].val = val;
    232  1.3.8.2  yamt 	xpq_increment_idx();
    233  1.3.8.2  yamt #ifdef XENDEBUG_SYNC
    234  1.3.8.2  yamt 	xpq_flush_queue();
    235  1.3.8.2  yamt #endif
    236  1.3.8.2  yamt }
    237  1.3.8.2  yamt 
    238  1.3.8.2  yamt #ifdef XEN3
    239  1.3.8.2  yamt void
    240  1.3.8.2  yamt xpq_queue_pt_switch(paddr_t pa)
    241  1.3.8.2  yamt {
    242  1.3.8.2  yamt 	struct mmuext_op op;
    243  1.3.8.2  yamt 	xpq_flush_queue();
    244  1.3.8.2  yamt 
    245  1.3.8.4  yamt 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    246  1.3.8.4  yamt 	    (int64_t)pa, (int64_t)pa));
    247  1.3.8.2  yamt 	op.cmd = MMUEXT_NEW_BASEPTR;
    248  1.3.8.2  yamt 	op.arg1.mfn = pa >> PAGE_SHIFT;
    249  1.3.8.2  yamt 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    250  1.3.8.2  yamt 		panic("xpq_queue_pt_switch");
    251  1.3.8.2  yamt }
    252  1.3.8.2  yamt 
    253  1.3.8.2  yamt void
    254  1.3.8.2  yamt xpq_queue_pin_table(paddr_t pa)
    255  1.3.8.2  yamt {
    256  1.3.8.2  yamt 	struct mmuext_op op;
    257  1.3.8.2  yamt 	xpq_flush_queue();
    258  1.3.8.2  yamt 
    259  1.3.8.4  yamt 	XENPRINTK2(("xpq_queue_pin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    260  1.3.8.4  yamt 	    (int64_t)pa, (int64_t)pa));
    261  1.3.8.2  yamt 	op.arg1.mfn = pa >> PAGE_SHIFT;
    262  1.3.8.2  yamt 
    263  1.3.8.4  yamt #if defined(__x86_64__)
    264  1.3.8.2  yamt 	op.cmd = MMUEXT_PIN_L4_TABLE;
    265  1.3.8.2  yamt #else
    266  1.3.8.2  yamt 	op.cmd = MMUEXT_PIN_L2_TABLE;
    267  1.3.8.2  yamt #endif
    268  1.3.8.2  yamt 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    269  1.3.8.2  yamt 		panic("xpq_queue_pin_table");
    270  1.3.8.2  yamt }
    271  1.3.8.2  yamt 
    272  1.3.8.4  yamt #ifdef PAE
    273  1.3.8.4  yamt static void
    274  1.3.8.4  yamt xpq_queue_pin_l3_table(paddr_t pa)
    275  1.3.8.4  yamt {
    276  1.3.8.4  yamt 	struct mmuext_op op;
    277  1.3.8.4  yamt 	xpq_flush_queue();
    278  1.3.8.4  yamt 
    279  1.3.8.4  yamt 	XENPRINTK2(("xpq_queue_pin_l2_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    280  1.3.8.4  yamt 	    (int64_t)pa, (int64_t)pa));
    281  1.3.8.4  yamt 	op.arg1.mfn = pa >> PAGE_SHIFT;
    282  1.3.8.4  yamt 
    283  1.3.8.4  yamt 	op.cmd = MMUEXT_PIN_L3_TABLE;
    284  1.3.8.4  yamt 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    285  1.3.8.4  yamt 		panic("xpq_queue_pin_table");
    286  1.3.8.4  yamt }
    287  1.3.8.4  yamt #endif
    288  1.3.8.4  yamt 
    289  1.3.8.2  yamt void
    290  1.3.8.2  yamt xpq_queue_unpin_table(paddr_t pa)
    291  1.3.8.2  yamt {
    292  1.3.8.2  yamt 	struct mmuext_op op;
    293  1.3.8.2  yamt 	xpq_flush_queue();
    294  1.3.8.2  yamt 
    295  1.3.8.4  yamt 	XENPRINTK2(("xpq_queue_unpin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
    296  1.3.8.4  yamt 	    (int64_t)pa, (int64_t)pa));
    297  1.3.8.2  yamt 	op.arg1.mfn = pa >> PAGE_SHIFT;
    298  1.3.8.2  yamt 	op.cmd = MMUEXT_UNPIN_TABLE;
    299  1.3.8.2  yamt 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    300  1.3.8.2  yamt 		panic("xpq_queue_unpin_table");
    301  1.3.8.2  yamt }
    302  1.3.8.2  yamt 
    303  1.3.8.2  yamt void
    304  1.3.8.2  yamt xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    305  1.3.8.2  yamt {
    306  1.3.8.2  yamt 	struct mmuext_op op;
    307  1.3.8.2  yamt 	xpq_flush_queue();
    308  1.3.8.2  yamt 
    309  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_set_ldt\n"));
    310  1.3.8.2  yamt 	KASSERT(va == (va & ~PAGE_MASK));
    311  1.3.8.2  yamt 	op.cmd = MMUEXT_SET_LDT;
    312  1.3.8.2  yamt 	op.arg1.linear_addr = va;
    313  1.3.8.2  yamt 	op.arg2.nr_ents = entries;
    314  1.3.8.2  yamt 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    315  1.3.8.2  yamt 		panic("xpq_queue_set_ldt");
    316  1.3.8.2  yamt }
    317  1.3.8.2  yamt 
    318  1.3.8.2  yamt void
    319  1.3.8.2  yamt xpq_queue_tlb_flush()
    320  1.3.8.2  yamt {
    321  1.3.8.2  yamt 	struct mmuext_op op;
    322  1.3.8.2  yamt 	xpq_flush_queue();
    323  1.3.8.2  yamt 
    324  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
    325  1.3.8.2  yamt 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
    326  1.3.8.2  yamt 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    327  1.3.8.2  yamt 		panic("xpq_queue_tlb_flush");
    328  1.3.8.2  yamt }
    329  1.3.8.2  yamt 
    330  1.3.8.2  yamt void
    331  1.3.8.2  yamt xpq_flush_cache()
    332  1.3.8.2  yamt {
    333  1.3.8.2  yamt 	struct mmuext_op op;
    334  1.3.8.2  yamt 	int s = splvm();
    335  1.3.8.2  yamt 	xpq_flush_queue();
    336  1.3.8.2  yamt 
    337  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_flush_cache\n"));
    338  1.3.8.2  yamt 	op.cmd = MMUEXT_FLUSH_CACHE;
    339  1.3.8.2  yamt 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    340  1.3.8.2  yamt 		panic("xpq_flush_cache");
    341  1.3.8.2  yamt 	splx(s);
    342  1.3.8.2  yamt }
    343  1.3.8.2  yamt 
    344  1.3.8.2  yamt void
    345  1.3.8.2  yamt xpq_queue_invlpg(vaddr_t va)
    346  1.3.8.2  yamt {
    347  1.3.8.2  yamt 	struct mmuext_op op;
    348  1.3.8.2  yamt 	xpq_flush_queue();
    349  1.3.8.2  yamt 
    350  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
    351  1.3.8.2  yamt 	op.cmd = MMUEXT_INVLPG_LOCAL;
    352  1.3.8.2  yamt 	op.arg1.linear_addr = (va & ~PAGE_MASK);
    353  1.3.8.2  yamt 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    354  1.3.8.2  yamt 		panic("xpq_queue_invlpg");
    355  1.3.8.2  yamt }
    356  1.3.8.2  yamt 
    357  1.3.8.2  yamt int
    358  1.3.8.4  yamt xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
    359  1.3.8.2  yamt {
    360  1.3.8.2  yamt 	mmu_update_t op;
    361  1.3.8.2  yamt 	int ok;
    362  1.3.8.2  yamt 	xpq_flush_queue();
    363  1.3.8.2  yamt 
    364  1.3.8.4  yamt 	op.ptr = ptr;
    365  1.3.8.2  yamt 	op.val = val;
    366  1.3.8.2  yamt 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
    367  1.3.8.2  yamt 		return EFAULT;
    368  1.3.8.2  yamt 	return (0);
    369  1.3.8.2  yamt }
    370  1.3.8.2  yamt #else /* XEN3 */
    371  1.3.8.2  yamt void
    372  1.3.8.2  yamt xpq_queue_pt_switch(paddr_t pa)
    373  1.3.8.2  yamt {
    374  1.3.8.2  yamt 
    375  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_pt_switch: %p %p\n", (void *)pa, (void *)pa));
    376  1.3.8.2  yamt 	xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
    377  1.3.8.2  yamt 	xpq_queue[xpq_idx].val = MMUEXT_NEW_BASEPTR;
    378  1.3.8.2  yamt 	xpq_increment_idx();
    379  1.3.8.2  yamt }
    380  1.3.8.2  yamt 
    381  1.3.8.2  yamt void
    382  1.3.8.2  yamt xpq_queue_pin_table(paddr_t pa)
    383  1.3.8.2  yamt {
    384  1.3.8.2  yamt 
    385  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_pin_table: %p %p\n", (void *)pa, (void *)pa));
    386  1.3.8.2  yamt 	xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
    387  1.3.8.2  yamt 	xpq_queue[xpq_idx].val = MMUEXT_PIN_L2_TABLE;
    388  1.3.8.2  yamt 	xpq_increment_idx();
    389  1.3.8.2  yamt }
    390  1.3.8.2  yamt 
    391  1.3.8.2  yamt void
    392  1.3.8.2  yamt xpq_queue_unpin_table(paddr_t pa)
    393  1.3.8.2  yamt {
    394  1.3.8.2  yamt 
    395  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_unpin_table: %p %p\n", (void *)pa, (void *)pa));
    396  1.3.8.2  yamt 	xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
    397  1.3.8.2  yamt 	xpq_queue[xpq_idx].val = MMUEXT_UNPIN_TABLE;
    398  1.3.8.2  yamt 	xpq_increment_idx();
    399  1.3.8.2  yamt }
    400  1.3.8.2  yamt 
    401  1.3.8.2  yamt void
    402  1.3.8.2  yamt xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    403  1.3.8.2  yamt {
    404  1.3.8.2  yamt 
    405  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_set_ldt\n"));
    406  1.3.8.2  yamt 	KASSERT(va == (va & ~PAGE_MASK));
    407  1.3.8.2  yamt 	xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND | va;
    408  1.3.8.2  yamt 	xpq_queue[xpq_idx].val = MMUEXT_SET_LDT | (entries << MMUEXT_CMD_SHIFT);
    409  1.3.8.2  yamt 	xpq_increment_idx();
    410  1.3.8.2  yamt }
    411  1.3.8.2  yamt 
    412  1.3.8.2  yamt void
    413  1.3.8.2  yamt xpq_queue_tlb_flush()
    414  1.3.8.2  yamt {
    415  1.3.8.2  yamt 
    416  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
    417  1.3.8.2  yamt 	xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
    418  1.3.8.2  yamt 	xpq_queue[xpq_idx].val = MMUEXT_TLB_FLUSH;
    419  1.3.8.2  yamt 	xpq_increment_idx();
    420  1.3.8.2  yamt }
    421  1.3.8.2  yamt 
    422  1.3.8.2  yamt void
    423  1.3.8.2  yamt xpq_flush_cache()
    424  1.3.8.2  yamt {
    425  1.3.8.2  yamt 	int s = splvm();
    426  1.3.8.2  yamt 
    427  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_flush_cache\n"));
    428  1.3.8.2  yamt 	xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
    429  1.3.8.2  yamt 	xpq_queue[xpq_idx].val = MMUEXT_FLUSH_CACHE;
    430  1.3.8.2  yamt 	xpq_increment_idx();
    431  1.3.8.2  yamt 	xpq_flush_queue();
    432  1.3.8.2  yamt 	splx(s);
    433  1.3.8.2  yamt }
    434  1.3.8.2  yamt 
    435  1.3.8.2  yamt void
    436  1.3.8.2  yamt xpq_queue_invlpg(vaddr_t va)
    437  1.3.8.2  yamt {
    438  1.3.8.2  yamt 
    439  1.3.8.2  yamt 	XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
    440  1.3.8.2  yamt 	xpq_queue[xpq_idx].ptr = (va & ~PAGE_MASK) | MMU_EXTENDED_COMMAND;
    441  1.3.8.2  yamt 	xpq_queue[xpq_idx].val = MMUEXT_INVLPG;
    442  1.3.8.2  yamt 	xpq_increment_idx();
    443  1.3.8.2  yamt }
    444  1.3.8.2  yamt 
    445  1.3.8.2  yamt int
    446  1.3.8.4  yamt xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
    447  1.3.8.2  yamt {
    448  1.3.8.2  yamt 	mmu_update_t xpq_up[3];
    449  1.3.8.2  yamt 
    450  1.3.8.2  yamt 	xpq_up[0].ptr = MMU_EXTENDED_COMMAND;
    451  1.3.8.2  yamt 	xpq_up[0].val = MMUEXT_SET_FOREIGNDOM | (dom << 16);
    452  1.3.8.4  yamt 	xpq_up[1].ptr = ptr;
    453  1.3.8.2  yamt 	xpq_up[1].val = val;
    454  1.3.8.2  yamt 	if (HYPERVISOR_mmu_update_self(xpq_up, 2, NULL) < 0)
    455  1.3.8.2  yamt 		return EFAULT;
    456  1.3.8.2  yamt 	return (0);
    457  1.3.8.2  yamt }
    458  1.3.8.2  yamt #endif /* XEN3 */
    459  1.3.8.2  yamt 
    460  1.3.8.2  yamt #ifdef XENDEBUG
    461  1.3.8.2  yamt void
    462  1.3.8.2  yamt xpq_debug_dump()
    463  1.3.8.2  yamt {
    464  1.3.8.2  yamt 	int i;
    465  1.3.8.2  yamt 
    466  1.3.8.2  yamt 	XENPRINTK2(("idx: %d\n", xpq_idx));
    467  1.3.8.2  yamt 	for (i = 0; i < xpq_idx; i++) {
    468  1.3.8.4  yamt 		sprintf(XBUF, "%" PRIx64 " %08" PRIx64,
    469  1.3.8.4  yamt 		    (u_int64_t)xpq_queue[i].ptr, (u_int64_t)xpq_queue[i].val);
    470  1.3.8.2  yamt 		if (++i < xpq_idx)
    471  1.3.8.4  yamt 			sprintf(XBUF + strlen(XBUF), "%" PRIx64 " %08" PRIx64,
    472  1.3.8.4  yamt 			    (u_int64_t)xpq_queue[i].ptr, (u_int64_t)xpq_queue[i].val);
    473  1.3.8.2  yamt 		if (++i < xpq_idx)
    474  1.3.8.4  yamt 			sprintf(XBUF + strlen(XBUF), "%" PRIx64 " %08" PRIx64,
    475  1.3.8.4  yamt 			    (u_int64_t)xpq_queue[i].ptr, (u_int64_t)xpq_queue[i].val);
    476  1.3.8.2  yamt 		if (++i < xpq_idx)
    477  1.3.8.4  yamt 			sprintf(XBUF + strlen(XBUF), "%" PRIx64 " %08" PRIx64,
    478  1.3.8.4  yamt 			    (u_int64_t)xpq_queue[i].ptr, (u_int64_t)xpq_queue[i].val);
    479  1.3.8.2  yamt 		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
    480  1.3.8.2  yamt 	}
    481  1.3.8.2  yamt }
    482  1.3.8.2  yamt #endif
    483  1.3.8.2  yamt 
    484  1.3.8.2  yamt 
    485  1.3.8.2  yamt extern volatile struct xencons_interface *xencons_interface; /* XXX */
    486  1.3.8.2  yamt extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
    487  1.3.8.2  yamt 
    488  1.3.8.2  yamt static void xen_bt_set_readonly (vaddr_t);
    489  1.3.8.2  yamt static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
    490  1.3.8.2  yamt 
    491  1.3.8.2  yamt /* How many PDEs ? */
    492  1.3.8.2  yamt #if L2_SLOT_KERNBASE > 0
    493  1.3.8.2  yamt #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
    494  1.3.8.2  yamt #else
    495  1.3.8.2  yamt #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
    496  1.3.8.2  yamt #endif
    497  1.3.8.2  yamt 
    498  1.3.8.2  yamt /*
    499  1.3.8.2  yamt  * Construct and switch to new pagetables
    500  1.3.8.2  yamt  * first_avail is the first vaddr we can use after
    501  1.3.8.2  yamt  * we get rid of Xen pagetables
    502  1.3.8.2  yamt  */
    503  1.3.8.2  yamt 
    504  1.3.8.2  yamt vaddr_t xen_pmap_bootstrap (void);
    505  1.3.8.2  yamt 
    506  1.3.8.2  yamt /*
    507  1.3.8.2  yamt  * Function to get rid of Xen bootstrap tables
    508  1.3.8.2  yamt  */
    509  1.3.8.2  yamt 
    510  1.3.8.4  yamt /* How many PDP do we need: */
    511  1.3.8.4  yamt #ifdef PAE
    512  1.3.8.4  yamt /*
    513  1.3.8.4  yamt  * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
    514  1.3.8.4  yamt  * all of them mapped by the L3 page. We also need a shadow page
    515  1.3.8.4  yamt  * for L3[3].
    516  1.3.8.4  yamt  */
    517  1.3.8.4  yamt static const int l2_4_count = 6;
    518  1.3.8.4  yamt #else
    519  1.3.8.4  yamt static const int l2_4_count = PTP_LEVELS - 1;
    520  1.3.8.4  yamt #endif
    521  1.3.8.4  yamt 
    522  1.3.8.2  yamt vaddr_t
    523  1.3.8.2  yamt xen_pmap_bootstrap()
    524  1.3.8.2  yamt {
    525  1.3.8.3  yamt 	int count, oldcount;
    526  1.3.8.3  yamt 	long mapsize;
    527  1.3.8.2  yamt 	vaddr_t bootstrap_tables, init_tables;
    528  1.3.8.2  yamt 
    529  1.3.8.4  yamt 	xpmap_phys_to_machine_mapping =
    530  1.3.8.4  yamt 	    (unsigned long *)xen_start_info.mfn_list;
    531  1.3.8.2  yamt 	init_tables = xen_start_info.pt_base;
    532  1.3.8.2  yamt 	__PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
    533  1.3.8.2  yamt 
    534  1.3.8.2  yamt 	/* Space after Xen boostrap tables should be free */
    535  1.3.8.2  yamt 	bootstrap_tables = xen_start_info.pt_base +
    536  1.3.8.2  yamt 		(xen_start_info.nr_pt_frames * PAGE_SIZE);
    537  1.3.8.2  yamt 
    538  1.3.8.3  yamt 	/*
    539  1.3.8.3  yamt 	 * Calculate how many space we need
    540  1.3.8.3  yamt 	 * first everything mapped before the Xen bootstrap tables
    541  1.3.8.3  yamt 	 */
    542  1.3.8.3  yamt 	mapsize = init_tables - KERNTEXTOFF;
    543  1.3.8.3  yamt 	/* after the tables we'll have:
    544  1.3.8.3  yamt 	 *  - UAREA
    545  1.3.8.3  yamt 	 *  - dummy user PGD (x86_64)
    546  1.3.8.3  yamt 	 *  - HYPERVISOR_shared_info
    547  1.3.8.3  yamt 	 *  - ISA I/O mem (if needed)
    548  1.3.8.3  yamt 	 */
    549  1.3.8.3  yamt 	mapsize += UPAGES * NBPG;
    550  1.3.8.3  yamt #ifdef __x86_64__
    551  1.3.8.3  yamt 	mapsize += NBPG;
    552  1.3.8.3  yamt #endif
    553  1.3.8.3  yamt 	mapsize += NBPG;
    554  1.3.8.2  yamt 
    555  1.3.8.2  yamt #ifdef DOM0OPS
    556  1.3.8.2  yamt 	if (xen_start_info.flags & SIF_INITDOMAIN) {
    557  1.3.8.2  yamt 		/* space for ISA I/O mem */
    558  1.3.8.3  yamt 		mapsize += IOM_SIZE;
    559  1.3.8.2  yamt 	}
    560  1.3.8.2  yamt #endif
    561  1.3.8.3  yamt 	/* at this point mapsize doens't include the table size */
    562  1.3.8.2  yamt 
    563  1.3.8.3  yamt #ifdef __x86_64__
    564  1.3.8.3  yamt 	count = TABLE_L2_ENTRIES;
    565  1.3.8.3  yamt #else
    566  1.3.8.3  yamt 	count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
    567  1.3.8.3  yamt #endif /* __x86_64__ */
    568  1.3.8.3  yamt 
    569  1.3.8.3  yamt 	/* now compute how many L2 pages we need exactly */
    570  1.3.8.3  yamt 	XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
    571  1.3.8.3  yamt 	while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
    572  1.3.8.3  yamt 	    ((long)count << L2_SHIFT) + KERNBASE) {
    573  1.3.8.3  yamt 		count++;
    574  1.3.8.3  yamt 	}
    575  1.3.8.3  yamt #ifndef __x86_64__
    576  1.3.8.3  yamt 	/*
    577  1.3.8.3  yamt 	 * one more L2 page: we'll alocate several pages after kva_start
    578  1.3.8.3  yamt 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
    579  1.3.8.3  yamt 	 * counted here. It's not a big issue to allocate one more L2 as
    580  1.3.8.3  yamt 	 * pmap_growkernel() will be called anyway.
    581  1.3.8.3  yamt 	 */
    582  1.3.8.3  yamt 	count++;
    583  1.3.8.3  yamt 	nkptp[1] = count;
    584  1.3.8.3  yamt #endif
    585  1.3.8.3  yamt 
    586  1.3.8.3  yamt 	/*
    587  1.3.8.3  yamt 	 * install bootstrap pages. We may need more L2 pages than will
    588  1.3.8.3  yamt 	 * have the final table here, as it's installed after the final table
    589  1.3.8.3  yamt 	 */
    590  1.3.8.3  yamt 	oldcount = count;
    591  1.3.8.3  yamt 
    592  1.3.8.3  yamt bootstrap_again:
    593  1.3.8.3  yamt 	XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
    594  1.3.8.2  yamt 	/*
    595  1.3.8.2  yamt 	 * Xen space we'll reclaim may not be enough for our new page tables,
    596  1.3.8.2  yamt 	 * move bootstrap tables if necessary
    597  1.3.8.2  yamt 	 */
    598  1.3.8.3  yamt 	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
    599  1.3.8.2  yamt 		bootstrap_tables = init_tables +
    600  1.3.8.3  yamt 					((count + l2_4_count) * PAGE_SIZE);
    601  1.3.8.3  yamt 	/* make sure we have enough to map the bootstrap_tables */
    602  1.3.8.3  yamt 	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
    603  1.3.8.3  yamt 	    ((long)oldcount << L2_SHIFT) + KERNBASE) {
    604  1.3.8.3  yamt 		oldcount++;
    605  1.3.8.3  yamt 		goto bootstrap_again;
    606  1.3.8.3  yamt 	}
    607  1.3.8.2  yamt 
    608  1.3.8.2  yamt 	/* Create temporary tables */
    609  1.3.8.2  yamt 	xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
    610  1.3.8.3  yamt 		xen_start_info.nr_pt_frames, oldcount, 0);
    611  1.3.8.2  yamt 
    612  1.3.8.2  yamt 	/* Create final tables */
    613  1.3.8.2  yamt 	xen_bootstrap_tables(bootstrap_tables, init_tables,
    614  1.3.8.3  yamt 	    oldcount + l2_4_count, count, 1);
    615  1.3.8.2  yamt 
    616  1.3.8.3  yamt 	/* zero out free space after tables */
    617  1.3.8.3  yamt 	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
    618  1.3.8.3  yamt 	    (UPAGES + 1) * NBPG);
    619  1.3.8.3  yamt 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
    620  1.3.8.2  yamt }
    621  1.3.8.2  yamt 
    622  1.3.8.2  yamt 
    623  1.3.8.2  yamt /*
    624  1.3.8.2  yamt  * Build a new table and switch to it
    625  1.3.8.2  yamt  * old_count is # of old tables (including PGD, PDTPE and PDE)
    626  1.3.8.2  yamt  * new_count is # of new tables (PTE only)
    627  1.3.8.2  yamt  * we assume areas don't overlap
    628  1.3.8.2  yamt  */
    629  1.3.8.2  yamt 
    630  1.3.8.2  yamt 
    631  1.3.8.2  yamt static void
    632  1.3.8.2  yamt xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
    633  1.3.8.2  yamt 	int old_count, int new_count, int final)
    634  1.3.8.2  yamt {
    635  1.3.8.2  yamt 	pd_entry_t *pdtpe, *pde, *pte;
    636  1.3.8.2  yamt 	pd_entry_t *cur_pgd, *bt_pgd;
    637  1.3.8.4  yamt 	paddr_t addr;
    638  1.3.8.4  yamt 	vaddr_t page, avail, text_end, map_end;
    639  1.3.8.2  yamt 	int i;
    640  1.3.8.2  yamt 	extern char __data_start;
    641  1.3.8.2  yamt 
    642  1.3.8.2  yamt 	__PRINTK(("xen_bootstrap_tables(0x%lx, 0x%lx, %d, %d)\n",
    643  1.3.8.2  yamt 	    old_pgd, new_pgd, old_count, new_count));
    644  1.3.8.2  yamt 	text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
    645  1.3.8.2  yamt 	/*
    646  1.3.8.2  yamt 	 * size of R/W area after kernel text:
    647  1.3.8.2  yamt 	 *  xencons_interface (if present)
    648  1.3.8.2  yamt 	 *  xenstore_interface (if present)
    649  1.3.8.4  yamt 	 *  table pages (new_count + l2_4_count entries)
    650  1.3.8.2  yamt 	 * extra mappings (only when final is true):
    651  1.3.8.3  yamt 	 *  UAREA
    652  1.3.8.3  yamt 	 *  dummy user PGD (x86_64 only)/gdt page (i386 only)
    653  1.3.8.2  yamt 	 *  HYPERVISOR_shared_info
    654  1.3.8.2  yamt 	 *  ISA I/O mem (if needed)
    655  1.3.8.2  yamt 	 */
    656  1.3.8.4  yamt 	map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
    657  1.3.8.2  yamt 	if (final) {
    658  1.3.8.3  yamt 		map_end += (UPAGES + 1) * NBPG;
    659  1.3.8.3  yamt 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
    660  1.3.8.2  yamt 		map_end += NBPG;
    661  1.3.8.2  yamt 	}
    662  1.3.8.3  yamt 	/*
    663  1.3.8.3  yamt 	 * we always set atdevbase, as it's used by init386 to find the first
    664  1.3.8.3  yamt 	 * available VA. map_end is updated only if we are dom0, so
    665  1.3.8.3  yamt 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
    666  1.3.8.3  yamt 	 * this case.
    667  1.3.8.3  yamt 	 */
    668  1.3.8.3  yamt 	if (final)
    669  1.3.8.3  yamt 		atdevbase = map_end;
    670  1.3.8.2  yamt #ifdef DOM0OPS
    671  1.3.8.2  yamt 	if (final && (xen_start_info.flags & SIF_INITDOMAIN)) {
    672  1.3.8.2  yamt 		/* ISA I/O mem */
    673  1.3.8.2  yamt 		map_end += IOM_SIZE;
    674  1.3.8.2  yamt 	}
    675  1.3.8.2  yamt #endif /* DOM0OPS */
    676  1.3.8.2  yamt 
    677  1.3.8.2  yamt 	__PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
    678  1.3.8.2  yamt 	    text_end, map_end));
    679  1.3.8.5  yamt 	__PRINTK(("console 0x%lx ", xen_start_info.console_mfn));
    680  1.3.8.5  yamt 	__PRINTK(("xenstore 0x%lx\n", xen_start_info.store_mfn));
    681  1.3.8.2  yamt 
    682  1.3.8.2  yamt 	/*
    683  1.3.8.2  yamt 	 * Create bootstrap page tables
    684  1.3.8.2  yamt 	 * What we need:
    685  1.3.8.2  yamt 	 * - a PGD (level 4)
    686  1.3.8.2  yamt 	 * - a PDTPE (level 3)
    687  1.3.8.2  yamt 	 * - a PDE (level2)
    688  1.3.8.2  yamt 	 * - some PTEs (level 1)
    689  1.3.8.2  yamt 	 */
    690  1.3.8.2  yamt 
    691  1.3.8.2  yamt 	cur_pgd = (pd_entry_t *) old_pgd;
    692  1.3.8.2  yamt 	bt_pgd = (pd_entry_t *) new_pgd;
    693  1.3.8.2  yamt 	memset (bt_pgd, 0, PAGE_SIZE);
    694  1.3.8.2  yamt 	avail = new_pgd + PAGE_SIZE;
    695  1.3.8.3  yamt #if PTP_LEVELS > 3
    696  1.3.8.2  yamt 	/* Install level 3 */
    697  1.3.8.2  yamt 	pdtpe = (pd_entry_t *) avail;
    698  1.3.8.2  yamt 	memset (pdtpe, 0, PAGE_SIZE);
    699  1.3.8.2  yamt 	avail += PAGE_SIZE;
    700  1.3.8.2  yamt 
    701  1.3.8.4  yamt 	addr = ((u_long) pdtpe) - KERNBASE;
    702  1.3.8.2  yamt 	bt_pgd[pl4_pi(KERNTEXTOFF)] =
    703  1.3.8.3  yamt 	    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    704  1.3.8.2  yamt 
    705  1.3.8.4  yamt 	__PRINTK(("L3 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64 " -> L4[0x%x]\n",
    706  1.3.8.4  yamt 	    pdtpe, (u_int64_t)addr, (u_int64_t)bt_pgd[pl4_pi(KERNTEXTOFF)],
    707  1.3.8.4  yamt 	    pl4_pi(KERNTEXTOFF)));
    708  1.3.8.3  yamt #else
    709  1.3.8.3  yamt 	pdtpe = bt_pgd;
    710  1.3.8.3  yamt #endif /* PTP_LEVELS > 3 */
    711  1.3.8.2  yamt 
    712  1.3.8.3  yamt #if PTP_LEVELS > 2
    713  1.3.8.2  yamt 	/* Level 2 */
    714  1.3.8.2  yamt 	pde = (pd_entry_t *) avail;
    715  1.3.8.2  yamt 	memset(pde, 0, PAGE_SIZE);
    716  1.3.8.2  yamt 	avail += PAGE_SIZE;
    717  1.3.8.2  yamt 
    718  1.3.8.4  yamt 	addr = ((u_long) pde) - KERNBASE;
    719  1.3.8.2  yamt 	pdtpe[pl3_pi(KERNTEXTOFF)] =
    720  1.3.8.4  yamt 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
    721  1.3.8.4  yamt 	__PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64 " -> L3[0x%x]\n",
    722  1.3.8.4  yamt 	    pde, (int64_t)addr, (int64_t)pdtpe[pl3_pi(KERNTEXTOFF)],
    723  1.3.8.4  yamt 	    pl3_pi(KERNTEXTOFF)));
    724  1.3.8.4  yamt #elif defined(PAE)
    725  1.3.8.4  yamt 	/* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
    726  1.3.8.4  yamt 	pde = (pd_entry_t *) avail;
    727  1.3.8.4  yamt 	memset(pde, 0, PAGE_SIZE * 5);
    728  1.3.8.4  yamt 	avail += PAGE_SIZE * 5;
    729  1.3.8.4  yamt 	addr = ((u_long) pde) - KERNBASE;
    730  1.3.8.4  yamt 	/*
    731  1.3.8.4  yamt 	 * enter L2 pages in the L3.
    732  1.3.8.4  yamt 	 * The real L2 kernel PD will be the last one (so that
    733  1.3.8.4  yamt 	 * pde[L2_SLOT_KERN] always point to the shadow).
    734  1.3.8.4  yamt 	 */
    735  1.3.8.4  yamt 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    736  1.3.8.4  yamt 		/*
    737  1.3.8.4  yamt 		 * Xen doens't want R/W mappings in L3 entries, it'll add it
    738  1.3.8.4  yamt 		 * itself.
    739  1.3.8.4  yamt 		 */
    740  1.3.8.4  yamt 		pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    741  1.3.8.4  yamt 		__PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
    742  1.3.8.4  yamt 		    " -> L3[0x%x]\n", (vaddr_t)pde + PAGE_SIZE * i,
    743  1.3.8.4  yamt 		    (int64_t)addr, (int64_t)pdtpe[i], i));
    744  1.3.8.4  yamt 	}
    745  1.3.8.4  yamt 	addr += PAGE_SIZE;
    746  1.3.8.4  yamt 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    747  1.3.8.4  yamt 	__PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
    748  1.3.8.4  yamt 	    " -> L3[0x%x]\n", (vaddr_t)pde + PAGE_SIZE * 4,
    749  1.3.8.4  yamt 	    (int64_t)addr, (int64_t)pdtpe[3], 3));
    750  1.3.8.4  yamt 
    751  1.3.8.4  yamt #else /* PAE */
    752  1.3.8.3  yamt 	pde = bt_pgd;
    753  1.3.8.4  yamt #endif /* PTP_LEVELS > 2 */
    754  1.3.8.2  yamt 
    755  1.3.8.2  yamt 	/* Level 1 */
    756  1.3.8.2  yamt 	page = KERNTEXTOFF;
    757  1.3.8.2  yamt 	for (i = 0; i < new_count; i ++) {
    758  1.3.8.4  yamt 		vaddr_t cur_page = page;
    759  1.3.8.2  yamt 
    760  1.3.8.2  yamt 		pte = (pd_entry_t *) avail;
    761  1.3.8.2  yamt 		avail += PAGE_SIZE;
    762  1.3.8.2  yamt 
    763  1.3.8.2  yamt 		memset(pte, 0, PAGE_SIZE);
    764  1.3.8.2  yamt 		while (pl2_pi(page) == pl2_pi (cur_page)) {
    765  1.3.8.2  yamt 			if (page >= map_end) {
    766  1.3.8.2  yamt 				/* not mapped at all */
    767  1.3.8.2  yamt 				pte[pl1_pi(page)] = 0;
    768  1.3.8.2  yamt 				page += PAGE_SIZE;
    769  1.3.8.2  yamt 				continue;
    770  1.3.8.2  yamt 			}
    771  1.3.8.2  yamt 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
    772  1.3.8.2  yamt 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
    773  1.3.8.2  yamt 				pte[pl1_pi(page)] = xen_start_info.shared_info;
    774  1.3.8.2  yamt 				__PRINTK(("HYPERVISOR_shared_info "
    775  1.3.8.4  yamt 				    "va 0x%lx pte 0x%" PRIx64 "\n",
    776  1.3.8.4  yamt 				    HYPERVISOR_shared_info, (int64_t)pte[pl1_pi(page)]));
    777  1.3.8.2  yamt 			}
    778  1.3.8.3  yamt #ifdef XEN3
    779  1.3.8.5  yamt 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    780  1.3.8.5  yamt 			    == xen_start_info.console_mfn) {
    781  1.3.8.2  yamt 				xencons_interface = (void *)page;
    782  1.3.8.4  yamt 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
    783  1.3.8.4  yamt 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    784  1.3.8.2  yamt 				__PRINTK(("xencons_interface "
    785  1.3.8.4  yamt 				    "va 0x%lx pte 0x%" PRIx64 "\n",
    786  1.3.8.4  yamt 				    xencons_interface, (int64_t)pte[pl1_pi(page)]));
    787  1.3.8.2  yamt 			}
    788  1.3.8.5  yamt 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
    789  1.3.8.5  yamt 			    == xen_start_info.store_mfn) {
    790  1.3.8.2  yamt 				xenstore_interface = (void *)page;
    791  1.3.8.4  yamt 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
    792  1.3.8.4  yamt 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
    793  1.3.8.2  yamt 				__PRINTK(("xenstore_interface "
    794  1.3.8.4  yamt 				    "va 0x%lx pte 0x%" PRIx64 "\n",
    795  1.3.8.4  yamt 				    xenstore_interface, (int64_t)pte[pl1_pi(page)]));
    796  1.3.8.2  yamt 			}
    797  1.3.8.3  yamt #endif /* XEN3 */
    798  1.3.8.2  yamt #ifdef DOM0OPS
    799  1.3.8.2  yamt 			if (page >= (vaddr_t)atdevbase &&
    800  1.3.8.2  yamt 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
    801  1.3.8.2  yamt 				pte[pl1_pi(page)] =
    802  1.3.8.2  yamt 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
    803  1.3.8.2  yamt 			}
    804  1.3.8.2  yamt #endif
    805  1.3.8.3  yamt 			pte[pl1_pi(page)] |= PG_k | PG_V;
    806  1.3.8.2  yamt 			if (page < text_end) {
    807  1.3.8.2  yamt 				/* map kernel text RO */
    808  1.3.8.2  yamt 				pte[pl1_pi(page)] |= 0;
    809  1.3.8.2  yamt 			} else if (page >= old_pgd
    810  1.3.8.2  yamt 			    && page < old_pgd + (old_count * PAGE_SIZE)) {
    811  1.3.8.2  yamt 				/* map old page tables RO */
    812  1.3.8.2  yamt 				pte[pl1_pi(page)] |= 0;
    813  1.3.8.2  yamt 			} else if (page >= new_pgd &&
    814  1.3.8.4  yamt 			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
    815  1.3.8.2  yamt 				/* map new page tables RO */
    816  1.3.8.2  yamt 				pte[pl1_pi(page)] |= 0;
    817  1.3.8.2  yamt 			} else {
    818  1.3.8.2  yamt 				/* map page RW */
    819  1.3.8.2  yamt 				pte[pl1_pi(page)] |= PG_RW;
    820  1.3.8.2  yamt 			}
    821  1.3.8.4  yamt 
    822  1.3.8.4  yamt 			if ((page  >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE)) || page >= new_pgd)
    823  1.3.8.3  yamt 				__PRINTK(("va 0x%lx pa 0x%lx "
    824  1.3.8.4  yamt 				    "entry 0x%" PRIx64 " -> L1[0x%x]\n",
    825  1.3.8.2  yamt 				    page, page - KERNBASE,
    826  1.3.8.4  yamt 				    (int64_t)pte[pl1_pi(page)], pl1_pi(page)));
    827  1.3.8.2  yamt 			page += PAGE_SIZE;
    828  1.3.8.2  yamt 		}
    829  1.3.8.2  yamt 
    830  1.3.8.4  yamt 		addr = ((u_long) pte) - KERNBASE;
    831  1.3.8.2  yamt 		pde[pl2_pi(cur_page)] =
    832  1.3.8.3  yamt 		    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
    833  1.3.8.4  yamt 		__PRINTK(("L1 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
    834  1.3.8.4  yamt 		    " -> L2[0x%x]\n", pte, (int64_t)addr,
    835  1.3.8.4  yamt 		    (int64_t)pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
    836  1.3.8.2  yamt 		/* Mark readonly */
    837  1.3.8.2  yamt 		xen_bt_set_readonly((vaddr_t) pte);
    838  1.3.8.2  yamt 	}
    839  1.3.8.2  yamt 
    840  1.3.8.2  yamt 	/* Install recursive page tables mapping */
    841  1.3.8.4  yamt #ifdef PAE
    842  1.3.8.4  yamt 	/*
    843  1.3.8.4  yamt 	 * we need a shadow page for the kernel's L2 page
    844  1.3.8.4  yamt 	 * The real L2 kernel PD will be the last one (so that
    845  1.3.8.4  yamt 	 * pde[L2_SLOT_KERN] always point to the shadow.
    846  1.3.8.4  yamt 	 */
    847  1.3.8.4  yamt 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
    848  1.3.8.4  yamt 	pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG];
    849  1.3.8.4  yamt 	pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE;
    850  1.3.8.4  yamt 
    851  1.3.8.4  yamt 	/*
    852  1.3.8.4  yamt 	 * We don't enter a recursive entry from the L3 PD. Instead,
    853  1.3.8.4  yamt 	 * we enter the first 4 L2 pages, which includes the kernel's L2
    854  1.3.8.4  yamt 	 * shadow. But we have to entrer the shadow after switching
    855  1.3.8.4  yamt 	 * %cr3, or Xen will refcount some PTE with the wrong type.
    856  1.3.8.4  yamt 	 */
    857  1.3.8.4  yamt 	addr = (u_long)pde - KERNBASE;
    858  1.3.8.4  yamt 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
    859  1.3.8.4  yamt 		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    860  1.3.8.4  yamt 		__PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
    861  1.3.8.4  yamt 		    (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i, (long)addr,
    862  1.3.8.4  yamt 		    (int64_t)pde[PDIR_SLOT_PTE + i]));
    863  1.3.8.4  yamt 	}
    864  1.3.8.4  yamt #if 0
    865  1.3.8.4  yamt 	addr += PAGE_SIZE; /* point to shadow L2 */
    866  1.3.8.4  yamt 	pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
    867  1.3.8.4  yamt 	__PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
    868  1.3.8.4  yamt 	    (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
    869  1.3.8.4  yamt 	    (int64_t)pde[PDIR_SLOT_PTE + 3]));
    870  1.3.8.4  yamt #endif
    871  1.3.8.4  yamt 	/* Mark tables RO, and pin the kenrel's shadow as L2 */
    872  1.3.8.4  yamt 	addr = (u_long)pde - KERNBASE;
    873  1.3.8.4  yamt 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
    874  1.3.8.4  yamt 		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
    875  1.3.8.4  yamt 		if (i == 2 || i == 3)
    876  1.3.8.4  yamt 			continue;
    877  1.3.8.4  yamt #if 0
    878  1.3.8.4  yamt 		__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
    879  1.3.8.4  yamt 		xpq_queue_pin_table(xpmap_ptom_masked(addr));
    880  1.3.8.4  yamt #endif
    881  1.3.8.4  yamt 	}
    882  1.3.8.4  yamt 	if (final) {
    883  1.3.8.4  yamt 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
    884  1.3.8.4  yamt 		__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
    885  1.3.8.4  yamt 		xpq_queue_pin_table(xpmap_ptom_masked(addr));
    886  1.3.8.4  yamt 	}
    887  1.3.8.4  yamt #if 0
    888  1.3.8.4  yamt 	addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
    889  1.3.8.4  yamt 	__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
    890  1.3.8.4  yamt 	xpq_queue_pin_table(xpmap_ptom_masked(addr));
    891  1.3.8.4  yamt #endif
    892  1.3.8.4  yamt #else /* PAE */
    893  1.3.8.4  yamt 	/* recursive entry in higher-level PD */
    894  1.3.8.2  yamt 	bt_pgd[PDIR_SLOT_PTE] =
    895  1.3.8.3  yamt 	    xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
    896  1.3.8.4  yamt 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] va 0x%lx pa 0x%" PRIx64
    897  1.3.8.4  yamt 	    " entry 0x%" PRIx64 "\n", new_pgd, (int64_t)new_pgd - KERNBASE,
    898  1.3.8.4  yamt 	    (int64_t)bt_pgd[PDIR_SLOT_PTE]));
    899  1.3.8.2  yamt 	/* Mark tables RO */
    900  1.3.8.2  yamt 	xen_bt_set_readonly((vaddr_t) pde);
    901  1.3.8.4  yamt #endif
    902  1.3.8.4  yamt #if PTP_LEVELS > 2 || defined(PAE)
    903  1.3.8.2  yamt 	xen_bt_set_readonly((vaddr_t) pdtpe);
    904  1.3.8.3  yamt #endif
    905  1.3.8.3  yamt #if PTP_LEVELS > 3
    906  1.3.8.2  yamt 	xen_bt_set_readonly(new_pgd);
    907  1.3.8.3  yamt #endif
    908  1.3.8.2  yamt 	/* Pin the PGD */
    909  1.3.8.2  yamt 	__PRINTK(("pin PDG\n"));
    910  1.3.8.4  yamt #ifdef PAE
    911  1.3.8.4  yamt 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    912  1.3.8.4  yamt #else
    913  1.3.8.2  yamt 	xpq_queue_pin_table(xpmap_ptom_masked(new_pgd - KERNBASE));
    914  1.3.8.4  yamt #endif
    915  1.3.8.3  yamt #ifdef __i386__
    916  1.3.8.3  yamt 	/* Save phys. addr of PDP, for libkvm. */
    917  1.3.8.4  yamt 	PDPpaddr = (long)pde;
    918  1.3.8.4  yamt #ifdef PAE
    919  1.3.8.4  yamt 	/* also save the address of the L3 page */
    920  1.3.8.4  yamt 	pmap_l3pd = pdtpe;
    921  1.3.8.4  yamt 	pmap_l3paddr = (new_pgd - KERNBASE);
    922  1.3.8.4  yamt #endif /* PAE */
    923  1.3.8.4  yamt #endif /* i386 */
    924  1.3.8.2  yamt 	/* Switch to new tables */
    925  1.3.8.2  yamt 	__PRINTK(("switch to PDG\n"));
    926  1.3.8.2  yamt 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
    927  1.3.8.4  yamt 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry 0x%" PRIx64 "\n",
    928  1.3.8.4  yamt 	    (int64_t)bt_pgd[PDIR_SLOT_PTE]));
    929  1.3.8.4  yamt #ifdef PAE
    930  1.3.8.4  yamt 	if (final) {
    931  1.3.8.4  yamt 		/* now enter kernel's PTE mappings */
    932  1.3.8.4  yamt 		addr =  (u_long)pde - KERNBASE + PAGE_SIZE * 3;
    933  1.3.8.4  yamt 		xpq_queue_pte_update(
    934  1.3.8.4  yamt 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
    935  1.3.8.4  yamt 		    xpmap_ptom_masked(addr) | PG_k | PG_V);
    936  1.3.8.4  yamt 		xpq_flush_queue();
    937  1.3.8.4  yamt 	}
    938  1.3.8.4  yamt #endif
    939  1.3.8.4  yamt 
    940  1.3.8.4  yamt 
    941  1.3.8.2  yamt 
    942  1.3.8.2  yamt 	/* Now we can safely reclaim space taken by old tables */
    943  1.3.8.2  yamt 
    944  1.3.8.2  yamt 	__PRINTK(("unpin old PDG\n"));
    945  1.3.8.2  yamt 	/* Unpin old PGD */
    946  1.3.8.2  yamt 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
    947  1.3.8.2  yamt 	/* Mark old tables RW */
    948  1.3.8.2  yamt 	page = old_pgd;
    949  1.3.8.2  yamt 	addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
    950  1.3.8.2  yamt 	addr = xpmap_mtop(addr);
    951  1.3.8.4  yamt 	pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
    952  1.3.8.2  yamt 	pte += pl1_pi(page);
    953  1.3.8.4  yamt 	__PRINTK(("*pde 0x%" PRIx64 " addr 0x%" PRIx64 " pte 0x%lx\n",
    954  1.3.8.4  yamt 	    (int64_t)pde[pl2_pi(page)], (int64_t)addr, (long)pte));
    955  1.3.8.2  yamt 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
    956  1.3.8.4  yamt 		addr = xpmap_ptom(((u_long) pte) - KERNBASE);
    957  1.3.8.4  yamt 		XENPRINTK(("addr 0x%" PRIx64 " pte 0x%lx *pte 0x%" PRIx64 "\n",
    958  1.3.8.4  yamt 		   (int64_t)addr, (long)pte, (int64_t)*pte));
    959  1.3.8.4  yamt 		xpq_queue_pte_update(addr, *pte | PG_RW);
    960  1.3.8.2  yamt 		page += PAGE_SIZE;
    961  1.3.8.2  yamt 		/*
    962  1.3.8.2  yamt 		 * Our ptes are contiguous
    963  1.3.8.2  yamt 		 * so it's safe to just "++" here
    964  1.3.8.2  yamt 		 */
    965  1.3.8.2  yamt 		pte++;
    966  1.3.8.2  yamt 	}
    967  1.3.8.2  yamt 	xpq_flush_queue();
    968  1.3.8.2  yamt }
    969  1.3.8.2  yamt 
    970  1.3.8.2  yamt 
    971  1.3.8.2  yamt /*
    972  1.3.8.2  yamt  * Bootstrap helper functions
    973  1.3.8.2  yamt  */
    974  1.3.8.2  yamt 
    975  1.3.8.2  yamt /*
    976  1.3.8.2  yamt  * Mark a page readonly
    977  1.3.8.2  yamt  * XXX: assuming vaddr = paddr + KERNBASE
    978  1.3.8.2  yamt  */
    979  1.3.8.2  yamt 
    980  1.3.8.2  yamt static void
    981  1.3.8.2  yamt xen_bt_set_readonly (vaddr_t page)
    982  1.3.8.2  yamt {
    983  1.3.8.2  yamt 	pt_entry_t entry;
    984  1.3.8.2  yamt 
    985  1.3.8.2  yamt 	entry = xpmap_ptom_masked(page - KERNBASE);
    986  1.3.8.3  yamt 	entry |= PG_k | PG_V;
    987  1.3.8.2  yamt 
    988  1.3.8.2  yamt 	HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
    989  1.3.8.2  yamt }
    990  1.3.8.3  yamt 
    991  1.3.8.3  yamt #ifdef __x86_64__
    992  1.3.8.3  yamt void
    993  1.3.8.3  yamt xen_set_user_pgd(paddr_t page)
    994  1.3.8.3  yamt {
    995  1.3.8.3  yamt 	struct mmuext_op op;
    996  1.3.8.3  yamt 	int s = splvm();
    997  1.3.8.3  yamt 
    998  1.3.8.3  yamt 	xpq_flush_queue();
    999  1.3.8.3  yamt 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
   1000  1.3.8.3  yamt 	op.arg1.mfn = xpmap_phys_to_machine_mapping[page >> PAGE_SHIFT];
   1001  1.3.8.3  yamt         if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
   1002  1.3.8.3  yamt 		panic("xen_set_user_pgd: failed to install new user page"
   1003  1.3.8.3  yamt 			" directory %lx", page);
   1004  1.3.8.3  yamt 	splx(s);
   1005  1.3.8.3  yamt }
   1006  1.3.8.3  yamt #endif /* __x86_64__ */
   1007