nvmm_x86_vmxfunc.S revision 1.1 1 /* $NetBSD: nvmm_x86_vmxfunc.S,v 1.1 2019/02/13 16:03:16 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /* Override user-land alignment before including asm.h */
33 #define ALIGN_DATA .align 8
34 #define ALIGN_TEXT .align 16,0x90
35 #define _ALIGN_TEXT ALIGN_TEXT
36
37 #define _LOCORE
38 #include "assym.h"
39 #include <machine/asm.h>
40 #include <machine/segments.h>
41 #include <x86/specialreg.h>
42
43 #define ASM_NVMM
44 #include <dev/nvmm/x86/nvmm_x86.h>
45
46 .text
47
48 /*
49 * %rdi = *pa
50 */
51 ENTRY(_vmx_vmxon)
52 vmxon (%rdi)
53 jz .Lfail_vmxon
54 jc .Lfail_vmxon
55 xorq %rax,%rax
56 retq
57 .Lfail_vmxon:
58 movq $-1,%rax
59 retq
60 END(_vmx_vmxon)
61
62 /*
63 * no arg
64 */
65 ENTRY(_vmx_vmxoff)
66 vmxoff
67 jz .Lfail_vmxoff
68 jc .Lfail_vmxoff
69 xorq %rax,%rax
70 retq
71 .Lfail_vmxoff:
72 movq $-1,%rax
73 retq
74 END(_vmx_vmxoff)
75
76 /*
77 * %rdi = op
78 * %rsi = *descriptor
79 */
80 ENTRY(_vmx_invept)
81 invept (%rsi),%rdi
82 jz .Linvept_failvalid
83 jc .Linvept_failinvalid
84 xorq %rax,%rax
85 retq
86 .Linvept_failvalid:
87 movq $-1,%rax
88 retq
89 .Linvept_failinvalid:
90 movq $-2,%rax
91 retq
92 END(_vmx_invept)
93
94 /*
95 * %rdi = op
96 * %rsi = *descriptor
97 */
98 ENTRY(_vmx_invvpid)
99 invvpid (%rsi),%rdi
100 jz .Linvvpid_failvalid
101 jc .Linvvpid_failinvalid
102 xorq %rax,%rax
103 retq
104 .Linvvpid_failvalid:
105 movq $-1,%rax
106 retq
107 .Linvvpid_failinvalid:
108 movq $-2,%rax
109 retq
110 END(_vmx_invvpid)
111
112 /*
113 * %rdi = op
114 * %rsi = *val
115 */
116 ENTRY(_vmx_vmread)
117 vmread %rdi,(%rsi)
118 jz .Lvmread_failvalid
119 jc .Lvmread_failinvalid
120 xorq %rax,%rax
121 retq
122 .Lvmread_failvalid:
123 movq $-1,%rax
124 retq
125 .Lvmread_failinvalid:
126 movq $-2,%rax
127 retq
128 END(_vmx_vmread)
129
130 /*
131 * %rdi = op
132 * %rsi = val
133 */
134 ENTRY(_vmx_vmwrite)
135 vmwrite %rsi,%rdi
136 jz .Lvmwrite_failvalid
137 jc .Lvmwrite_failinvalid
138 xorq %rax,%rax
139 retq
140 .Lvmwrite_failvalid:
141 movq $-1,%rax
142 retq
143 .Lvmwrite_failinvalid:
144 movq $-2,%rax
145 retq
146 END(_vmx_vmwrite)
147
148 /*
149 * %rdi = *pa
150 */
151 ENTRY(_vmx_vmptrld)
152 vmptrld (%rdi)
153 jz .Lfail_vmptrld
154 jc .Lfail_vmptrld
155 xorq %rax,%rax
156 retq
157 .Lfail_vmptrld:
158 movq $-1,%rax
159 retq
160 END(_vmx_vmptrld)
161
162 /*
163 * %rdi = *pa
164 */
165 ENTRY(_vmx_vmptrst)
166 vmptrst (%rdi)
167 jz .Lfail_vmptrst
168 jc .Lfail_vmptrst
169 xorq %rax,%rax
170 retq
171 .Lfail_vmptrst:
172 movq $-1,%rax
173 retq
174 END(_vmx_vmptrst)
175
176 /*
177 * %rdi = pa
178 */
179 ENTRY(_vmx_vmclear)
180 vmclear (%rdi)
181 jz .Lfail_vmclear
182 jc .Lfail_vmclear
183 xorq %rax,%rax
184 retq
185 .Lfail_vmclear:
186 movq $-1,%rax
187 retq
188 END(_vmx_vmclear)
189
190 /* redef */
191 #define VMCS_HOST_RSP 0x00006C14
192
193 #define HOST_SAVE_GPRS \
194 pushq %rbx ;\
195 pushq %rbp ;\
196 pushq %r12 ;\
197 pushq %r13 ;\
198 pushq %r14 ;\
199 pushq %r15
200
201 #define HOST_RESTORE_GPRS \
202 popq %r15 ;\
203 popq %r14 ;\
204 popq %r13 ;\
205 popq %r12 ;\
206 popq %rbp ;\
207 popq %rbx
208
209 #define HOST_SAVE_RAX \
210 pushq %rax
211
212 #define HOST_RESTORE_RAX \
213 popq %rax
214
215 #define HOST_SAVE_LDT \
216 sldtw %ax ;\
217 pushw %ax
218
219 #define HOST_RESTORE_LDT \
220 popw %ax ;\
221 lldtw %ax
222
223 /*
224 * We don't save RAX (done manually), but we do restore it.
225 */
226
227 #define GUEST_SAVE_GPRS(reg) \
228 movq %rbx,(NVMM_X64_GPR_RBX * 8)(reg) ;\
229 movq %rcx,(NVMM_X64_GPR_RCX * 8)(reg) ;\
230 movq %rdx,(NVMM_X64_GPR_RDX * 8)(reg) ;\
231 movq %r8,(NVMM_X64_GPR_R8 * 8)(reg) ;\
232 movq %r9,(NVMM_X64_GPR_R9 * 8)(reg) ;\
233 movq %r10,(NVMM_X64_GPR_R10 * 8)(reg) ;\
234 movq %r11,(NVMM_X64_GPR_R11 * 8)(reg) ;\
235 movq %r12,(NVMM_X64_GPR_R12 * 8)(reg) ;\
236 movq %r13,(NVMM_X64_GPR_R13 * 8)(reg) ;\
237 movq %r14,(NVMM_X64_GPR_R14 * 8)(reg) ;\
238 movq %r15,(NVMM_X64_GPR_R15 * 8)(reg) ;\
239 movq %rbp,(NVMM_X64_GPR_RBP * 8)(reg) ;\
240 movq %rdi,(NVMM_X64_GPR_RDI * 8)(reg) ;\
241 movq %rsi,(NVMM_X64_GPR_RSI * 8)(reg)
242
243 #define GUEST_RESTORE_GPRS(reg) \
244 movq (NVMM_X64_GPR_RBX * 8)(reg),%rbx ;\
245 movq (NVMM_X64_GPR_RCX * 8)(reg),%rcx ;\
246 movq (NVMM_X64_GPR_RDX * 8)(reg),%rdx ;\
247 movq (NVMM_X64_GPR_R8 * 8)(reg),%r8 ;\
248 movq (NVMM_X64_GPR_R9 * 8)(reg),%r9 ;\
249 movq (NVMM_X64_GPR_R10 * 8)(reg),%r10 ;\
250 movq (NVMM_X64_GPR_R11 * 8)(reg),%r11 ;\
251 movq (NVMM_X64_GPR_R12 * 8)(reg),%r12 ;\
252 movq (NVMM_X64_GPR_R13 * 8)(reg),%r13 ;\
253 movq (NVMM_X64_GPR_R14 * 8)(reg),%r14 ;\
254 movq (NVMM_X64_GPR_R15 * 8)(reg),%r15 ;\
255 movq (NVMM_X64_GPR_RBP * 8)(reg),%rbp ;\
256 movq (NVMM_X64_GPR_RDI * 8)(reg),%rdi ;\
257 movq (NVMM_X64_GPR_RSI * 8)(reg),%rsi ;\
258 movq (NVMM_X64_GPR_RAX * 8)(reg),%rax
259
260 /*
261 * %rdi = VA of guest GPR state
262 */
263 ENTRY(vmx_vmlaunch)
264 /* Save the Host GPRs. */
265 HOST_SAVE_GPRS
266
267 /* Disable Host interrupts. */
268 cli
269
270 /* Save the Host LDT. */
271 HOST_SAVE_LDT
272
273 /* Save the Host RAX. */
274 movq %rdi,%rax
275 pushq %rax
276
277 /* Save the Host RSP. */
278 movq $VMCS_HOST_RSP,%rdi
279 movq %rsp,%rsi
280 vmwrite %rsi,%rdi
281
282 /* Restore the Guest GPRs. */
283 GUEST_RESTORE_GPRS(%rax)
284
285 /* Run the VM. */
286 vmlaunch
287
288 /* Failure. */
289 addq $8,%rsp
290 HOST_RESTORE_LDT
291 sti
292 HOST_RESTORE_GPRS
293 movq $-1,%rax
294 retq
295 END(vmx_vmlaunch)
296
297 /*
298 * %rdi = VA of guest GPR state
299 */
300 ENTRY(vmx_vmresume)
301 /* Save the Host GPRs. */
302 HOST_SAVE_GPRS
303
304 /* Disable Host interrupts. */
305 cli
306
307 /* Save the Host LDT. */
308 HOST_SAVE_LDT
309
310 /* Save the Host RAX. */
311 movq %rdi,%rax
312 pushq %rax
313
314 /* Save the Host RSP. */
315 movq $VMCS_HOST_RSP,%rdi
316 movq %rsp,%rsi
317 vmwrite %rsi,%rdi
318
319 /* Restore the Guest GPRs. */
320 GUEST_RESTORE_GPRS(%rax)
321
322 /* Run the VM. */
323 vmresume
324
325 /* Failure. */
326 addq $8,%rsp
327 HOST_RESTORE_LDT
328 sti
329 HOST_RESTORE_GPRS
330 movq $-1,%rax
331 retq
332 END(vmx_vmresume)
333
334 /*
335 * The CPU jumps here after a #VMEXIT.
336 */
337 ENTRY(vmx_resume_rip)
338 /* Save the Guest GPRs. RAX done manually. */
339 pushq %rax
340 movq 8(%rsp),%rax
341 GUEST_SAVE_GPRS(%rax)
342 popq %rbx
343 movq %rbx,(NVMM_X64_GPR_RAX * 8)(%rax)
344 addq $8,%rsp
345
346 /* Restore the Host LDT. */
347 HOST_RESTORE_LDT
348
349 /* Enable Host interrupts. */
350 sti
351
352 /* Restore the Host GPRs. */
353 HOST_RESTORE_GPRS
354
355 xorq %rax,%rax
356 retq
357 END(vmx_resume_rip)
358