Home | History | Annotate | Line # | Download | only in x86
      1 /*	$NetBSD: nvmm_x86_vmxfunc.S,v 1.6 2020/09/05 07:22:26 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
      5  * All rights reserved.
      6  *
      7  * This code is part of the NVMM hypervisor.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  */
     30 
     31 /* Override user-land alignment before including asm.h */
     32 #define	ALIGN_DATA	.align	8
     33 #define ALIGN_TEXT	.align 16,0x90
     34 #define _ALIGN_TEXT	ALIGN_TEXT
     35 
     36 #define _LOCORE
     37 #include "assym.h"
     38 #include <machine/asm.h>
     39 #include <machine/segments.h>
     40 #include <x86/specialreg.h>
     41 
     42 #define ASM_NVMM
     43 #include <dev/nvmm/x86/nvmm_x86.h>
     44 
     45 	.text
     46 
     47 /*
     48  * %rdi = *pa
     49  */
     50 ENTRY(_vmx_vmxon)
     51 	vmxon	(%rdi)
     52 	jz	.Lfail_vmxon
     53 	jc	.Lfail_vmxon
     54 	xorq	%rax,%rax
     55 	retq
     56 .Lfail_vmxon:
     57 	movq	$-1,%rax
     58 	retq
     59 END(_vmx_vmxon)
     60 
     61 /*
     62  * no arg
     63  */
     64 ENTRY(_vmx_vmxoff)
     65 	vmxoff
     66 	jz	.Lfail_vmxoff
     67 	jc	.Lfail_vmxoff
     68 	xorq	%rax,%rax
     69 	retq
     70 .Lfail_vmxoff:
     71 	movq	$-1,%rax
     72 	retq
     73 END(_vmx_vmxoff)
     74 
     75 /* redef */
     76 #define VMCS_HOST_RSP				0x00006C14
     77 
     78 #define HOST_SAVE_GPRS		\
     79 	pushq	%rbx		;\
     80 	pushq	%rbp		;\
     81 	pushq	%r12		;\
     82 	pushq	%r13		;\
     83 	pushq	%r14		;\
     84 	pushq	%r15
     85 
     86 #define HOST_RESTORE_GPRS	\
     87 	popq	%r15		;\
     88 	popq	%r14		;\
     89 	popq	%r13		;\
     90 	popq	%r12		;\
     91 	popq	%rbp		;\
     92 	popq	%rbx
     93 
     94 #define HOST_SAVE_RAX		\
     95 	pushq	%rax
     96 
     97 #define HOST_RESTORE_RAX	\
     98 	popq	%rax
     99 
    100 #define HOST_SAVE_LDT		\
    101 	sldtw	%ax		;\
    102 	pushq	%rax
    103 
    104 #define HOST_RESTORE_LDT	\
    105 	popq	%rax		;\
    106 	lldtw	%ax
    107 
    108 /*
    109  * We don't save RAX (done manually), but we do restore it.
    110  */
    111 
    112 #define GUEST_SAVE_GPRS(reg)				\
    113 	movq	%rcx,(NVMM_X64_GPR_RCX * 8)(reg)	;\
    114 	movq	%rdx,(NVMM_X64_GPR_RDX * 8)(reg)	;\
    115 	movq	%rbx,(NVMM_X64_GPR_RBX * 8)(reg)	;\
    116 	movq	%rbp,(NVMM_X64_GPR_RBP * 8)(reg)	;\
    117 	movq	%rsi,(NVMM_X64_GPR_RSI * 8)(reg)	;\
    118 	movq	%rdi,(NVMM_X64_GPR_RDI * 8)(reg)	;\
    119 	movq	%r8,(NVMM_X64_GPR_R8 * 8)(reg)		;\
    120 	movq	%r9,(NVMM_X64_GPR_R9 * 8)(reg)		;\
    121 	movq	%r10,(NVMM_X64_GPR_R10 * 8)(reg)	;\
    122 	movq	%r11,(NVMM_X64_GPR_R11 * 8)(reg)	;\
    123 	movq	%r12,(NVMM_X64_GPR_R12 * 8)(reg)	;\
    124 	movq	%r13,(NVMM_X64_GPR_R13 * 8)(reg)	;\
    125 	movq	%r14,(NVMM_X64_GPR_R14 * 8)(reg)	;\
    126 	movq	%r15,(NVMM_X64_GPR_R15 * 8)(reg)
    127 
    128 #define GUEST_RESTORE_GPRS(reg)				\
    129 	movq	(NVMM_X64_GPR_RCX * 8)(reg),%rcx	;\
    130 	movq	(NVMM_X64_GPR_RDX * 8)(reg),%rdx	;\
    131 	movq	(NVMM_X64_GPR_RBX * 8)(reg),%rbx	;\
    132 	movq	(NVMM_X64_GPR_RBP * 8)(reg),%rbp	;\
    133 	movq	(NVMM_X64_GPR_RSI * 8)(reg),%rsi	;\
    134 	movq	(NVMM_X64_GPR_RDI * 8)(reg),%rdi	;\
    135 	movq	(NVMM_X64_GPR_R8 * 8)(reg),%r8		;\
    136 	movq	(NVMM_X64_GPR_R9 * 8)(reg),%r9		;\
    137 	movq	(NVMM_X64_GPR_R10 * 8)(reg),%r10	;\
    138 	movq	(NVMM_X64_GPR_R11 * 8)(reg),%r11	;\
    139 	movq	(NVMM_X64_GPR_R12 * 8)(reg),%r12	;\
    140 	movq	(NVMM_X64_GPR_R13 * 8)(reg),%r13	;\
    141 	movq	(NVMM_X64_GPR_R14 * 8)(reg),%r14	;\
    142 	movq	(NVMM_X64_GPR_R15 * 8)(reg),%r15	;\
    143 	movq	(NVMM_X64_GPR_RAX * 8)(reg),%rax
    144 
    145 /*
    146  * %rdi = VA of guest GPR state
    147  */
    148 ENTRY(vmx_vmlaunch)
    149 	/* Save the Host GPRs. */
    150 	HOST_SAVE_GPRS
    151 
    152 	/* Save the Host LDT. */
    153 	HOST_SAVE_LDT
    154 
    155 	/* Save the Host RAX. */
    156 	movq	%rdi,%rax
    157 	pushq	%rax
    158 
    159 	/* Save the Host RSP. */
    160 	movq	$VMCS_HOST_RSP,%rdi
    161 	movq	%rsp,%rsi
    162 	vmwrite	%rsi,%rdi
    163 
    164 	/* Restore the Guest GPRs. */
    165 	GUEST_RESTORE_GPRS(%rax)
    166 
    167 	/* Run the VM. */
    168 	vmlaunch
    169 
    170 	/* Failure. */
    171 	addq	$8,%rsp
    172 	HOST_RESTORE_LDT
    173 	HOST_RESTORE_GPRS
    174 	movq	$-1,%rax
    175 	retq
    176 END(vmx_vmlaunch)
    177 
    178 /*
    179  * %rdi = VA of guest GPR state
    180  */
    181 ENTRY(vmx_vmresume)
    182 	/* Save the Host GPRs. */
    183 	HOST_SAVE_GPRS
    184 
    185 	/* Save the Host LDT. */
    186 	HOST_SAVE_LDT
    187 
    188 	/* Save the Host RAX. */
    189 	movq	%rdi,%rax
    190 	pushq	%rax
    191 
    192 	/* Save the Host RSP. */
    193 	movq	$VMCS_HOST_RSP,%rdi
    194 	movq	%rsp,%rsi
    195 	vmwrite	%rsi,%rdi
    196 
    197 	/* Restore the Guest GPRs. */
    198 	GUEST_RESTORE_GPRS(%rax)
    199 
    200 	/* Run the VM. */
    201 	vmresume
    202 
    203 	/* Failure. */
    204 	addq	$8,%rsp
    205 	HOST_RESTORE_LDT
    206 	HOST_RESTORE_GPRS
    207 	movq	$-1,%rax
    208 	retq
    209 END(vmx_vmresume)
    210 
    211 /*
    212  * The CPU jumps here after a #VMEXIT.
    213  */
    214 ENTRY(vmx_resume_rip)
    215 	/* Save the Guest GPRs. RAX done manually. */
    216 	pushq	%rax
    217 	movq	8(%rsp),%rax
    218 	GUEST_SAVE_GPRS(%rax)
    219 	popq	%rbx
    220 	movq	%rbx,(NVMM_X64_GPR_RAX * 8)(%rax)
    221 	addq	$8,%rsp
    222 
    223 	/* Restore the Host LDT. */
    224 	HOST_RESTORE_LDT
    225 
    226 	/* Restore the Host GPRs. */
    227 	HOST_RESTORE_GPRS
    228 
    229 	xorq	%rax,%rax
    230 	retq
    231 END(vmx_resume_rip)
    232 
    233 ENTRY(vmx_insn_failvalid)
    234 	movq	$.Lvmx_validstr,%rdi
    235 	call	_C_LABEL(panic)
    236 END(vmx_insn_failvalid)
    237 
    238 ENTRY(vmx_insn_failinvalid)
    239 	movq	$.Lvmx_invalidstr,%rdi
    240 	call	_C_LABEL(panic)
    241 END(vmx_insn_failinvalid)
    242 
    243 	.section ".rodata"
    244 
    245 .Lvmx_validstr:
    246 	.string	"VMX fail valid\0"
    247 .Lvmx_invalidstr:
    248 	.string "VMX fail invalid\0"
    249