Home | History | Annotate | Line # | Download | only in x86
nvmm_x86_vmxfunc.S revision 1.5
      1 /*	$NetBSD: nvmm_x86_vmxfunc.S,v 1.5 2020/08/11 15:48:42 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /* Override user-land alignment before including asm.h */
     33 #define	ALIGN_DATA	.align	8
     34 #define ALIGN_TEXT	.align 16,0x90
     35 #define _ALIGN_TEXT	ALIGN_TEXT
     36 
     37 #define _LOCORE
     38 #include "assym.h"
     39 #include <machine/asm.h>
     40 #include <machine/segments.h>
     41 #include <x86/specialreg.h>
     42 
     43 #define ASM_NVMM
     44 #include <dev/nvmm/x86/nvmm_x86.h>
     45 
     46 	.text
     47 
     48 /*
     49  * %rdi = *pa
     50  */
     51 ENTRY(_vmx_vmxon)
     52 	vmxon	(%rdi)
     53 	jz	.Lfail_vmxon
     54 	jc	.Lfail_vmxon
     55 	xorq	%rax,%rax
     56 	retq
     57 .Lfail_vmxon:
     58 	movq	$-1,%rax
     59 	retq
     60 END(_vmx_vmxon)
     61 
     62 /*
     63  * no arg
     64  */
     65 ENTRY(_vmx_vmxoff)
     66 	vmxoff
     67 	jz	.Lfail_vmxoff
     68 	jc	.Lfail_vmxoff
     69 	xorq	%rax,%rax
     70 	retq
     71 .Lfail_vmxoff:
     72 	movq	$-1,%rax
     73 	retq
     74 END(_vmx_vmxoff)
     75 
     76 /* redef */
     77 #define VMCS_HOST_RSP				0x00006C14
     78 
     79 #define HOST_SAVE_GPRS		\
     80 	pushq	%rbx		;\
     81 	pushq	%rbp		;\
     82 	pushq	%r12		;\
     83 	pushq	%r13		;\
     84 	pushq	%r14		;\
     85 	pushq	%r15
     86 
     87 #define HOST_RESTORE_GPRS	\
     88 	popq	%r15		;\
     89 	popq	%r14		;\
     90 	popq	%r13		;\
     91 	popq	%r12		;\
     92 	popq	%rbp		;\
     93 	popq	%rbx
     94 
     95 #define HOST_SAVE_RAX		\
     96 	pushq	%rax
     97 
     98 #define HOST_RESTORE_RAX	\
     99 	popq	%rax
    100 
    101 #define HOST_SAVE_LDT		\
    102 	sldtw	%ax		;\
    103 	pushq	%rax
    104 
    105 #define HOST_RESTORE_LDT	\
    106 	popq	%rax		;\
    107 	lldtw	%ax
    108 
    109 /*
    110  * We don't save RAX (done manually), but we do restore it.
    111  */
    112 
    113 #define GUEST_SAVE_GPRS(reg)				\
    114 	movq	%rcx,(NVMM_X64_GPR_RCX * 8)(reg)	;\
    115 	movq	%rdx,(NVMM_X64_GPR_RDX * 8)(reg)	;\
    116 	movq	%rbx,(NVMM_X64_GPR_RBX * 8)(reg)	;\
    117 	movq	%rbp,(NVMM_X64_GPR_RBP * 8)(reg)	;\
    118 	movq	%rsi,(NVMM_X64_GPR_RSI * 8)(reg)	;\
    119 	movq	%rdi,(NVMM_X64_GPR_RDI * 8)(reg)	;\
    120 	movq	%r8,(NVMM_X64_GPR_R8 * 8)(reg)		;\
    121 	movq	%r9,(NVMM_X64_GPR_R9 * 8)(reg)		;\
    122 	movq	%r10,(NVMM_X64_GPR_R10 * 8)(reg)	;\
    123 	movq	%r11,(NVMM_X64_GPR_R11 * 8)(reg)	;\
    124 	movq	%r12,(NVMM_X64_GPR_R12 * 8)(reg)	;\
    125 	movq	%r13,(NVMM_X64_GPR_R13 * 8)(reg)	;\
    126 	movq	%r14,(NVMM_X64_GPR_R14 * 8)(reg)	;\
    127 	movq	%r15,(NVMM_X64_GPR_R15 * 8)(reg)
    128 
    129 #define GUEST_RESTORE_GPRS(reg)				\
    130 	movq	(NVMM_X64_GPR_RCX * 8)(reg),%rcx	;\
    131 	movq	(NVMM_X64_GPR_RDX * 8)(reg),%rdx	;\
    132 	movq	(NVMM_X64_GPR_RBX * 8)(reg),%rbx	;\
    133 	movq	(NVMM_X64_GPR_RBP * 8)(reg),%rbp	;\
    134 	movq	(NVMM_X64_GPR_RSI * 8)(reg),%rsi	;\
    135 	movq	(NVMM_X64_GPR_RDI * 8)(reg),%rdi	;\
    136 	movq	(NVMM_X64_GPR_R8 * 8)(reg),%r8		;\
    137 	movq	(NVMM_X64_GPR_R9 * 8)(reg),%r9		;\
    138 	movq	(NVMM_X64_GPR_R10 * 8)(reg),%r10	;\
    139 	movq	(NVMM_X64_GPR_R11 * 8)(reg),%r11	;\
    140 	movq	(NVMM_X64_GPR_R12 * 8)(reg),%r12	;\
    141 	movq	(NVMM_X64_GPR_R13 * 8)(reg),%r13	;\
    142 	movq	(NVMM_X64_GPR_R14 * 8)(reg),%r14	;\
    143 	movq	(NVMM_X64_GPR_R15 * 8)(reg),%r15	;\
    144 	movq	(NVMM_X64_GPR_RAX * 8)(reg),%rax
    145 
    146 /*
    147  * %rdi = VA of guest GPR state
    148  */
    149 ENTRY(vmx_vmlaunch)
    150 	/* Save the Host GPRs. */
    151 	HOST_SAVE_GPRS
    152 
    153 	/* Save the Host LDT. */
    154 	HOST_SAVE_LDT
    155 
    156 	/* Save the Host RAX. */
    157 	movq	%rdi,%rax
    158 	pushq	%rax
    159 
    160 	/* Save the Host RSP. */
    161 	movq	$VMCS_HOST_RSP,%rdi
    162 	movq	%rsp,%rsi
    163 	vmwrite	%rsi,%rdi
    164 
    165 	/* Restore the Guest GPRs. */
    166 	GUEST_RESTORE_GPRS(%rax)
    167 
    168 	/* Run the VM. */
    169 	vmlaunch
    170 
    171 	/* Failure. */
    172 	addq	$8,%rsp
    173 	HOST_RESTORE_LDT
    174 	HOST_RESTORE_GPRS
    175 	movq	$-1,%rax
    176 	retq
    177 END(vmx_vmlaunch)
    178 
    179 /*
    180  * %rdi = VA of guest GPR state
    181  */
    182 ENTRY(vmx_vmresume)
    183 	/* Save the Host GPRs. */
    184 	HOST_SAVE_GPRS
    185 
    186 	/* Save the Host LDT. */
    187 	HOST_SAVE_LDT
    188 
    189 	/* Save the Host RAX. */
    190 	movq	%rdi,%rax
    191 	pushq	%rax
    192 
    193 	/* Save the Host RSP. */
    194 	movq	$VMCS_HOST_RSP,%rdi
    195 	movq	%rsp,%rsi
    196 	vmwrite	%rsi,%rdi
    197 
    198 	/* Restore the Guest GPRs. */
    199 	GUEST_RESTORE_GPRS(%rax)
    200 
    201 	/* Run the VM. */
    202 	vmresume
    203 
    204 	/* Failure. */
    205 	addq	$8,%rsp
    206 	HOST_RESTORE_LDT
    207 	HOST_RESTORE_GPRS
    208 	movq	$-1,%rax
    209 	retq
    210 END(vmx_vmresume)
    211 
    212 /*
    213  * The CPU jumps here after a #VMEXIT.
    214  */
    215 ENTRY(vmx_resume_rip)
    216 	/* Save the Guest GPRs. RAX done manually. */
    217 	pushq	%rax
    218 	movq	8(%rsp),%rax
    219 	GUEST_SAVE_GPRS(%rax)
    220 	popq	%rbx
    221 	movq	%rbx,(NVMM_X64_GPR_RAX * 8)(%rax)
    222 	addq	$8,%rsp
    223 
    224 	/* Restore the Host LDT. */
    225 	HOST_RESTORE_LDT
    226 
    227 	/* Restore the Host GPRs. */
    228 	HOST_RESTORE_GPRS
    229 
    230 	xorq	%rax,%rax
    231 	retq
    232 END(vmx_resume_rip)
    233 
    234 ENTRY(vmx_insn_failvalid)
    235 	movq	$.Lvmx_validstr,%rdi
    236 	call	_C_LABEL(panic)
    237 END(vmx_insn_failvalid)
    238 
    239 ENTRY(vmx_insn_failinvalid)
    240 	movq	$.Lvmx_invalidstr,%rdi
    241 	call	_C_LABEL(panic)
    242 END(vmx_insn_failinvalid)
    243 
    244 	.section ".rodata"
    245 
    246 .Lvmx_validstr:
    247 	.string	"VMX fail valid\0"
    248 .Lvmx_invalidstr:
    249 	.string "VMX fail invalid\0"
    250