Home | History | Annotate | Line # | Download | only in x86
x86_xpmap.c revision 1.1.2.2
      1 /*	$NetBSD: x86_xpmap.c,v 1.1.2.2 2007/10/25 23:59:24 bouyer Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2006 Manuel Bouyer.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. All advertising materials mentioning features or use of this software
     15  *    must display the following acknowledgement:
     16  *	This product includes software developed by Manuel Bouyer.
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  *
     31  */
     32 
     33 /*
     34  *
     35  * Copyright (c) 2004 Christian Limpach.
     36  * All rights reserved.
     37  *
     38  * Redistribution and use in source and binary forms, with or without
     39  * modification, are permitted provided that the following conditions
     40  * are met:
     41  * 1. Redistributions of source code must retain the above copyright
     42  *    notice, this list of conditions and the following disclaimer.
     43  * 2. Redistributions in binary form must reproduce the above copyright
     44  *    notice, this list of conditions and the following disclaimer in the
     45  *    documentation and/or other materials provided with the distribution.
     46  * 3. All advertising materials mentioning features or use of this software
     47  *    must display the following acknowledgement:
     48  *      This product includes software developed by Christian Limpach.
     49  * 4. The name of the author may not be used to endorse or promote products
     50  *    derived from this software without specific prior written permission.
     51  *
     52  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     53  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     54  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     55  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     56  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     57  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     61  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     62  */
     63 
     64 
     65 #include <sys/cdefs.h>
     66 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.1.2.2 2007/10/25 23:59:24 bouyer Exp $");
     67 
     68 #include "opt_xen.h"
     69 
     70 #include <sys/param.h>
     71 #include <sys/systm.h>
     72 
     73 #include <uvm/uvm.h>
     74 
     75 #include <machine/gdt.h>
     76 #include <xen/xenfunc.h>
     77 
     78 #undef	XENDEBUG
     79 /* #define XENDEBUG_SYNC */
     80 /* #define	XENDEBUG_LOW */
     81 
     82 #ifdef XENDEBUG
     83 #define	XENPRINTF(x) printf x
     84 #define	XENPRINTK(x) printk x
     85 #define	XENPRINTK2(x) /* printk x */
     86 
     87 static char XBUF[256];
     88 #else
     89 #define	XENPRINTF(x)
     90 #define	XENPRINTK(x)
     91 #define	XENPRINTK2(x)
     92 #endif
     93 #define	PRINTF(x) printf x
     94 #define	PRINTK(x) printk x
     95 
     96 volatile shared_info_t *HYPERVISOR_shared_info;
     97 union start_info_union start_info_union;
     98 
     99 void xen_failsafe_handler(void);
    100 
    101 #ifdef XEN3
    102 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
    103 	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
    104 #else
    105 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
    106 	HYPERVISOR_mmu_update((req), (count), (success_count))
    107 #endif
    108 
    109 void
    110 xen_failsafe_handler(void)
    111 {
    112 
    113 	panic("xen_failsafe_handler called!\n");
    114 }
    115 
    116 
    117 #ifndef __x86_64__
    118 void
    119 xen_update_descriptor(union descriptor *table, union descriptor *entry)
    120 {
    121 	paddr_t pa;
    122 	pt_entry_t *ptp;
    123 
    124 	ptp = kvtopte((vaddr_t)table);
    125 	pa = (*ptp & PG_FRAME) | ((vaddr_t)table & ~PG_FRAME);
    126 	if (HYPERVISOR_update_descriptor(pa, entry->raw[0], entry->raw[1]))
    127 		panic("HYPERVISOR_update_descriptor failed\n");
    128 }
    129 #endif
    130 
    131 void
    132 xen_set_ldt(vaddr_t base, uint32_t entries)
    133 {
    134 	vaddr_t va;
    135 	vaddr_t end;
    136 	pt_entry_t *ptp, *maptp;
    137 	int s;
    138 
    139 #ifdef __x86_64__
    140 	end = base + (entries << 3);
    141 #else
    142 	end = base + entries * sizeof(union descriptor);
    143 #endif
    144 
    145 	for (va = base; va < end; va += PAGE_SIZE) {
    146 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    147 		ptp = kvtopte(va);
    148 		maptp = (pt_entry_t *)vtomach((vaddr_t)ptp);
    149 		XENPRINTF(("xen_set_ldt %p %d %p %p\n", (void *)base,
    150 			      entries, ptp, maptp));
    151 		printf("xen_set_ldt %p %d %p %p\n", (void *)base,
    152 			      entries, ptp, maptp);
    153 		PTE_CLEARBITS(ptp, maptp, PG_RW);
    154 	}
    155 	s = splvm();
    156 	PTE_UPDATES_FLUSH();
    157 
    158 	xpq_queue_set_ldt(base, entries);
    159 	xpq_flush_queue();
    160 	splx(s);
    161 }
    162 
    163 #ifdef XENDEBUG
    164 void xpq_debug_dump(void);
    165 #endif
    166 
    167 #define XPQUEUE_SIZE 2048
    168 static mmu_update_t xpq_queue[XPQUEUE_SIZE];
    169 static int xpq_idx = 0;
    170 
    171 void
    172 xpq_flush_queue()
    173 {
    174 	int i, ok;
    175 
    176 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
    177 	for (i = 0; i < xpq_idx; i++)
    178 		XENPRINTK2(("%d: %p %08x\n", i, (u_int)xpq_queue[i].ptr,
    179 		    (u_int)xpq_queue[i].val));
    180 	if (xpq_idx != 0 &&
    181 	    HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok) < 0)
    182 		panic("HYPERVISOR_mmu_update failed\n");
    183 	xpq_idx = 0;
    184 }
    185 
    186 static inline void
    187 xpq_increment_idx(void)
    188 {
    189 
    190 	xpq_idx++;
    191 	if (__predict_false(xpq_idx == XPQUEUE_SIZE))
    192 		xpq_flush_queue();
    193 }
    194 
    195 void
    196 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
    197 {
    198 	XENPRINTK2(("xpq_queue_machphys_update ma=%p pa=%p\n", (void *)ma, (void *)pa));
    199 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
    200 	xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
    201 	xpq_increment_idx();
    202 #ifdef XENDEBUG_SYNC
    203 	xpq_flush_queue();
    204 #endif
    205 }
    206 
    207 void
    208 xpq_queue_pde_update(pd_entry_t *ptr, pd_entry_t val)
    209 {
    210 
    211 	KASSERT(((paddr_t)ptr & 3) == 0);
    212 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
    213 	xpq_queue[xpq_idx].val = val;
    214 	xpq_increment_idx();
    215 #ifdef XENDEBUG_SYNC
    216 	xpq_flush_queue();
    217 #endif
    218 }
    219 
    220 void
    221 xpq_queue_pte_update(pt_entry_t *ptr, pt_entry_t val)
    222 {
    223 
    224 	KASSERT(((paddr_t)ptr & 3) == 0);
    225 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
    226 	xpq_queue[xpq_idx].val = val;
    227 	xpq_increment_idx();
    228 #ifdef XENDEBUG_SYNC
    229 	xpq_flush_queue();
    230 #endif
    231 }
    232 
    233 #ifdef XEN3
    234 void
    235 xpq_queue_pt_switch(paddr_t pa)
    236 {
    237 	struct mmuext_op op;
    238 	xpq_flush_queue();
    239 
    240 	XENPRINTK2(("xpq_queue_pt_switch: %p %p\n", (void *)pa, (void *)pa));
    241 	op.cmd = MMUEXT_NEW_BASEPTR;
    242 	op.arg1.mfn = pa >> PAGE_SHIFT;
    243 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    244 		panic("xpq_queue_pt_switch");
    245 }
    246 
    247 void
    248 xpq_queue_pin_table(paddr_t pa)
    249 {
    250 	struct mmuext_op op;
    251 	xpq_flush_queue();
    252 
    253 	XENPRINTK2(("xpq_queue_pin_table: %p %p\n", (void *)pa, (void *)pa));
    254 	op.arg1.mfn = pa >> PAGE_SHIFT;
    255 
    256 #ifdef __x86_64__
    257 	op.cmd = MMUEXT_PIN_L4_TABLE;
    258 #else
    259 	op.cmd = MMUEXT_PIN_L2_TABLE;
    260 #endif
    261 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    262 		panic("xpq_queue_pin_table");
    263 }
    264 
    265 void
    266 xpq_queue_unpin_table(paddr_t pa)
    267 {
    268 	struct mmuext_op op;
    269 	xpq_flush_queue();
    270 
    271 	XENPRINTK2(("xpq_queue_unpin_table: %p %p\n", (void *)pa, (void *)pa));
    272 	op.arg1.mfn = pa >> PAGE_SHIFT;
    273 	op.cmd = MMUEXT_UNPIN_TABLE;
    274 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    275 		panic("xpq_queue_unpin_table");
    276 }
    277 
    278 void
    279 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    280 {
    281 	struct mmuext_op op;
    282 	xpq_flush_queue();
    283 
    284 	XENPRINTK2(("xpq_queue_set_ldt\n"));
    285 	KASSERT(va == (va & ~PAGE_MASK));
    286 	op.cmd = MMUEXT_SET_LDT;
    287 	op.arg1.linear_addr = va;
    288 	op.arg2.nr_ents = entries;
    289 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    290 		panic("xpq_queue_set_ldt");
    291 }
    292 
    293 void
    294 xpq_queue_tlb_flush()
    295 {
    296 	struct mmuext_op op;
    297 	xpq_flush_queue();
    298 
    299 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
    300 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
    301 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    302 		panic("xpq_queue_tlb_flush");
    303 }
    304 
    305 void
    306 xpq_flush_cache()
    307 {
    308 	struct mmuext_op op;
    309 	int s = splvm();
    310 	xpq_flush_queue();
    311 
    312 	XENPRINTK2(("xpq_queue_flush_cache\n"));
    313 	op.cmd = MMUEXT_FLUSH_CACHE;
    314 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    315 		panic("xpq_flush_cache");
    316 	splx(s);
    317 }
    318 
    319 void
    320 xpq_queue_invlpg(vaddr_t va)
    321 {
    322 	struct mmuext_op op;
    323 	xpq_flush_queue();
    324 
    325 	XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
    326 	op.cmd = MMUEXT_INVLPG_LOCAL;
    327 	op.arg1.linear_addr = (va & ~PAGE_MASK);
    328 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
    329 		panic("xpq_queue_invlpg");
    330 }
    331 
    332 int
    333 xpq_update_foreign(pt_entry_t *ptr, pt_entry_t val, int dom)
    334 {
    335 	mmu_update_t op;
    336 	int ok;
    337 	xpq_flush_queue();
    338 
    339 	op.ptr = (paddr_t)ptr;
    340 	op.val = val;
    341 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
    342 		return EFAULT;
    343 	return (0);
    344 }
    345 #else /* XEN3 */
    346 void
    347 xpq_queue_pt_switch(paddr_t pa)
    348 {
    349 
    350 	XENPRINTK2(("xpq_queue_pt_switch: %p %p\n", (void *)pa, (void *)pa));
    351 	xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
    352 	xpq_queue[xpq_idx].val = MMUEXT_NEW_BASEPTR;
    353 	xpq_increment_idx();
    354 }
    355 
    356 void
    357 xpq_queue_pin_table(paddr_t pa)
    358 {
    359 
    360 	XENPRINTK2(("xpq_queue_pin_table: %p %p\n", (void *)pa, (void *)pa));
    361 	xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
    362 	xpq_queue[xpq_idx].val = MMUEXT_PIN_L2_TABLE;
    363 	xpq_increment_idx();
    364 }
    365 
    366 void
    367 xpq_queue_unpin_table(paddr_t pa)
    368 {
    369 
    370 	XENPRINTK2(("xpq_queue_unpin_table: %p %p\n", (void *)pa, (void *)pa));
    371 	xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
    372 	xpq_queue[xpq_idx].val = MMUEXT_UNPIN_TABLE;
    373 	xpq_increment_idx();
    374 }
    375 
    376 void
    377 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
    378 {
    379 
    380 	XENPRINTK2(("xpq_queue_set_ldt\n"));
    381 	KASSERT(va == (va & ~PAGE_MASK));
    382 	xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND | va;
    383 	xpq_queue[xpq_idx].val = MMUEXT_SET_LDT | (entries << MMUEXT_CMD_SHIFT);
    384 	xpq_increment_idx();
    385 }
    386 
    387 void
    388 xpq_queue_tlb_flush()
    389 {
    390 
    391 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
    392 	xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
    393 	xpq_queue[xpq_idx].val = MMUEXT_TLB_FLUSH;
    394 	xpq_increment_idx();
    395 }
    396 
    397 void
    398 xpq_flush_cache()
    399 {
    400 	int s = splvm();
    401 
    402 	XENPRINTK2(("xpq_queue_flush_cache\n"));
    403 	xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
    404 	xpq_queue[xpq_idx].val = MMUEXT_FLUSH_CACHE;
    405 	xpq_increment_idx();
    406 	xpq_flush_queue();
    407 	splx(s);
    408 }
    409 
    410 void
    411 xpq_queue_invlpg(vaddr_t va)
    412 {
    413 
    414 	XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
    415 	xpq_queue[xpq_idx].ptr = (va & ~PAGE_MASK) | MMU_EXTENDED_COMMAND;
    416 	xpq_queue[xpq_idx].val = MMUEXT_INVLPG;
    417 	xpq_increment_idx();
    418 }
    419 
    420 int
    421 xpq_update_foreign(pt_entry_t *ptr, pt_entry_t val, int dom)
    422 {
    423 	mmu_update_t xpq_up[3];
    424 
    425 	xpq_up[0].ptr = MMU_EXTENDED_COMMAND;
    426 	xpq_up[0].val = MMUEXT_SET_FOREIGNDOM | (dom << 16);
    427 	xpq_up[1].ptr = (paddr_t)ptr;
    428 	xpq_up[1].val = val;
    429 	if (HYPERVISOR_mmu_update_self(xpq_up, 2, NULL) < 0)
    430 		return EFAULT;
    431 	return (0);
    432 }
    433 #endif /* XEN3 */
    434 
    435 #ifdef XENDEBUG
    436 void
    437 xpq_debug_dump()
    438 {
    439 	int i;
    440 
    441 	XENPRINTK2(("idx: %d\n", xpq_idx));
    442 	for (i = 0; i < xpq_idx; i++) {
    443 		sprintf(XBUF, "%x %08x ", (u_int)xpq_queue[i].ptr,
    444 		    (u_int)xpq_queue[i].val);
    445 		if (++i < xpq_idx)
    446 			sprintf(XBUF + strlen(XBUF), "%x %08x ",
    447 			    (u_int)xpq_queue[i].ptr, (u_int)xpq_queue[i].val);
    448 		if (++i < xpq_idx)
    449 			sprintf(XBUF + strlen(XBUF), "%x %08x ",
    450 			    (u_int)xpq_queue[i].ptr, (u_int)xpq_queue[i].val);
    451 		if (++i < xpq_idx)
    452 			sprintf(XBUF + strlen(XBUF), "%x %08x ",
    453 			    (u_int)xpq_queue[i].ptr, (u_int)xpq_queue[i].val);
    454 		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
    455 	}
    456 }
    457 #endif
    458