Home | History | Annotate | Line # | Download | only in x86
nvmm_x86_vmxfunc.S revision 1.3.4.1
      1 /*	$NetBSD: nvmm_x86_vmxfunc.S,v 1.3.4.1 2020/08/29 17:00:28 martin Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /* Override user-land alignment before including asm.h */
     33 #define	ALIGN_DATA	.align	8
     34 #define ALIGN_TEXT	.align 16,0x90
     35 #define _ALIGN_TEXT	ALIGN_TEXT
     36 
     37 #define _LOCORE
     38 #include "assym.h"
     39 #include <machine/asm.h>
     40 #include <machine/segments.h>
     41 #include <x86/specialreg.h>
     42 
     43 #define ASM_NVMM
     44 #include <dev/nvmm/x86/nvmm_x86.h>
     45 
     46 	.text
     47 
     48 /*
     49  * %rdi = *pa
     50  */
     51 ENTRY(_vmx_vmxon)
     52 	vmxon	(%rdi)
     53 	jz	.Lfail_vmxon
     54 	jc	.Lfail_vmxon
     55 	xorq	%rax,%rax
     56 	retq
     57 .Lfail_vmxon:
     58 	movq	$-1,%rax
     59 	retq
     60 END(_vmx_vmxon)
     61 
     62 /*
     63  * no arg
     64  */
     65 ENTRY(_vmx_vmxoff)
     66 	vmxoff
     67 	jz	.Lfail_vmxoff
     68 	jc	.Lfail_vmxoff
     69 	xorq	%rax,%rax
     70 	retq
     71 .Lfail_vmxoff:
     72 	movq	$-1,%rax
     73 	retq
     74 END(_vmx_vmxoff)
     75 
     76 /* redef */
     77 #define VMCS_HOST_RSP				0x00006C14
     78 
     79 #define HOST_SAVE_GPRS		\
     80 	pushq	%rbx		;\
     81 	pushq	%rbp		;\
     82 	pushq	%r12		;\
     83 	pushq	%r13		;\
     84 	pushq	%r14		;\
     85 	pushq	%r15
     86 
     87 #define HOST_RESTORE_GPRS	\
     88 	popq	%r15		;\
     89 	popq	%r14		;\
     90 	popq	%r13		;\
     91 	popq	%r12		;\
     92 	popq	%rbp		;\
     93 	popq	%rbx
     94 
     95 #define HOST_SAVE_RAX		\
     96 	pushq	%rax
     97 
     98 #define HOST_RESTORE_RAX	\
     99 	popq	%rax
    100 
    101 #define HOST_SAVE_LDT		\
    102 	sldtw	%ax		;\
    103 	pushq	%rax
    104 
    105 #define HOST_RESTORE_LDT	\
    106 	popq	%rax		;\
    107 	lldtw	%ax
    108 
    109 /*
    110  * We don't save RAX (done manually), but we do restore it.
    111  */
    112 
    113 #define GUEST_SAVE_GPRS(reg)				\
    114 	movq	%rcx,(NVMM_X64_GPR_RCX * 8)(reg)	;\
    115 	movq	%rdx,(NVMM_X64_GPR_RDX * 8)(reg)	;\
    116 	movq	%rbx,(NVMM_X64_GPR_RBX * 8)(reg)	;\
    117 	movq	%rbp,(NVMM_X64_GPR_RBP * 8)(reg)	;\
    118 	movq	%rsi,(NVMM_X64_GPR_RSI * 8)(reg)	;\
    119 	movq	%rdi,(NVMM_X64_GPR_RDI * 8)(reg)	;\
    120 	movq	%r8,(NVMM_X64_GPR_R8 * 8)(reg)		;\
    121 	movq	%r9,(NVMM_X64_GPR_R9 * 8)(reg)		;\
    122 	movq	%r10,(NVMM_X64_GPR_R10 * 8)(reg)	;\
    123 	movq	%r11,(NVMM_X64_GPR_R11 * 8)(reg)	;\
    124 	movq	%r12,(NVMM_X64_GPR_R12 * 8)(reg)	;\
    125 	movq	%r13,(NVMM_X64_GPR_R13 * 8)(reg)	;\
    126 	movq	%r14,(NVMM_X64_GPR_R14 * 8)(reg)	;\
    127 	movq	%r15,(NVMM_X64_GPR_R15 * 8)(reg)
    128 
    129 #define GUEST_RESTORE_GPRS(reg)				\
    130 	movq	(NVMM_X64_GPR_RCX * 8)(reg),%rcx	;\
    131 	movq	(NVMM_X64_GPR_RDX * 8)(reg),%rdx	;\
    132 	movq	(NVMM_X64_GPR_RBX * 8)(reg),%rbx	;\
    133 	movq	(NVMM_X64_GPR_RBP * 8)(reg),%rbp	;\
    134 	movq	(NVMM_X64_GPR_RSI * 8)(reg),%rsi	;\
    135 	movq	(NVMM_X64_GPR_RDI * 8)(reg),%rdi	;\
    136 	movq	(NVMM_X64_GPR_R8 * 8)(reg),%r8		;\
    137 	movq	(NVMM_X64_GPR_R9 * 8)(reg),%r9		;\
    138 	movq	(NVMM_X64_GPR_R10 * 8)(reg),%r10	;\
    139 	movq	(NVMM_X64_GPR_R11 * 8)(reg),%r11	;\
    140 	movq	(NVMM_X64_GPR_R12 * 8)(reg),%r12	;\
    141 	movq	(NVMM_X64_GPR_R13 * 8)(reg),%r13	;\
    142 	movq	(NVMM_X64_GPR_R14 * 8)(reg),%r14	;\
    143 	movq	(NVMM_X64_GPR_R15 * 8)(reg),%r15	;\
    144 	movq	(NVMM_X64_GPR_RAX * 8)(reg),%rax
    145 
    146 /*
    147  * %rdi = VA of guest GPR state
    148  */
    149 ENTRY(vmx_vmlaunch)
    150 	/* Save the Host GPRs. */
    151 	HOST_SAVE_GPRS
    152 
    153 	/* Disable Host interrupts. */
    154 	cli
    155 
    156 	/* Save the Host LDT. */
    157 	HOST_SAVE_LDT
    158 
    159 	/* Save the Host RAX. */
    160 	movq	%rdi,%rax
    161 	pushq	%rax
    162 
    163 	/* Save the Host RSP. */
    164 	movq	$VMCS_HOST_RSP,%rdi
    165 	movq	%rsp,%rsi
    166 	vmwrite	%rsi,%rdi
    167 
    168 	/* Restore the Guest GPRs. */
    169 	GUEST_RESTORE_GPRS(%rax)
    170 
    171 	/* Run the VM. */
    172 	vmlaunch
    173 
    174 	/* Failure. */
    175 	addq	$8,%rsp
    176 	HOST_RESTORE_LDT
    177 	sti
    178 	HOST_RESTORE_GPRS
    179 	movq	$-1,%rax
    180 	retq
    181 END(vmx_vmlaunch)
    182 
    183 /*
    184  * %rdi = VA of guest GPR state
    185  */
    186 ENTRY(vmx_vmresume)
    187 	/* Save the Host GPRs. */
    188 	HOST_SAVE_GPRS
    189 
    190 	/* Disable Host interrupts. */
    191 	cli
    192 
    193 	/* Save the Host LDT. */
    194 	HOST_SAVE_LDT
    195 
    196 	/* Save the Host RAX. */
    197 	movq	%rdi,%rax
    198 	pushq	%rax
    199 
    200 	/* Save the Host RSP. */
    201 	movq	$VMCS_HOST_RSP,%rdi
    202 	movq	%rsp,%rsi
    203 	vmwrite	%rsi,%rdi
    204 
    205 	/* Restore the Guest GPRs. */
    206 	GUEST_RESTORE_GPRS(%rax)
    207 
    208 	/* Run the VM. */
    209 	vmresume
    210 
    211 	/* Failure. */
    212 	addq	$8,%rsp
    213 	HOST_RESTORE_LDT
    214 	sti
    215 	HOST_RESTORE_GPRS
    216 	movq	$-1,%rax
    217 	retq
    218 END(vmx_vmresume)
    219 
    220 /*
    221  * The CPU jumps here after a #VMEXIT.
    222  */
    223 ENTRY(vmx_resume_rip)
    224 	/* Save the Guest GPRs. RAX done manually. */
    225 	pushq	%rax
    226 	movq	8(%rsp),%rax
    227 	GUEST_SAVE_GPRS(%rax)
    228 	popq	%rbx
    229 	movq	%rbx,(NVMM_X64_GPR_RAX * 8)(%rax)
    230 	addq	$8,%rsp
    231 
    232 	/* Restore the Host LDT. */
    233 	HOST_RESTORE_LDT
    234 
    235 	/* Enable Host interrupts. */
    236 	sti
    237 
    238 	/* Restore the Host GPRs. */
    239 	HOST_RESTORE_GPRS
    240 
    241 	xorq	%rax,%rax
    242 	retq
    243 END(vmx_resume_rip)
    244 
    245 ENTRY(vmx_insn_failvalid)
    246 	movq	$.Lvmx_validstr,%rdi
    247 	call	_C_LABEL(panic)
    248 END(vmx_insn_failvalid)
    249 
    250 ENTRY(vmx_insn_failinvalid)
    251 	movq	$.Lvmx_invalidstr,%rdi
    252 	call	_C_LABEL(panic)
    253 END(vmx_insn_failinvalid)
    254 
    255 	.section ".rodata"
    256 
    257 .Lvmx_validstr:
    258 	.string	"VMX fail valid\0"
    259 .Lvmx_invalidstr:
    260 	.string "VMX fail invalid\0"
    261