nvmm_x86_vmxfunc.S revision 1.2 1 1.2 maxv /* $NetBSD: nvmm_x86_vmxfunc.S,v 1.2 2019/04/24 18:45:15 maxv Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.1 maxv * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 1.1 maxv * All rights reserved.
6 1.1 maxv *
7 1.1 maxv * This code is derived from software contributed to The NetBSD Foundation
8 1.1 maxv * by Maxime Villard.
9 1.1 maxv *
10 1.1 maxv * Redistribution and use in source and binary forms, with or without
11 1.1 maxv * modification, are permitted provided that the following conditions
12 1.1 maxv * are met:
13 1.1 maxv * 1. Redistributions of source code must retain the above copyright
14 1.1 maxv * notice, this list of conditions and the following disclaimer.
15 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 maxv * notice, this list of conditions and the following disclaimer in the
17 1.1 maxv * documentation and/or other materials provided with the distribution.
18 1.1 maxv *
19 1.1 maxv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 maxv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 maxv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 maxv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 maxv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 maxv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 maxv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 maxv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 maxv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 maxv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 maxv * POSSIBILITY OF SUCH DAMAGE.
30 1.1 maxv */
31 1.1 maxv
32 1.1 maxv /* Override user-land alignment before including asm.h */
33 1.1 maxv #define ALIGN_DATA .align 8
34 1.1 maxv #define ALIGN_TEXT .align 16,0x90
35 1.1 maxv #define _ALIGN_TEXT ALIGN_TEXT
36 1.1 maxv
37 1.1 maxv #define _LOCORE
38 1.1 maxv #include "assym.h"
39 1.1 maxv #include <machine/asm.h>
40 1.1 maxv #include <machine/segments.h>
41 1.1 maxv #include <x86/specialreg.h>
42 1.1 maxv
43 1.1 maxv #define ASM_NVMM
44 1.1 maxv #include <dev/nvmm/x86/nvmm_x86.h>
45 1.1 maxv
46 1.1 maxv .text
47 1.1 maxv
48 1.1 maxv /*
49 1.1 maxv * %rdi = *pa
50 1.1 maxv */
51 1.1 maxv ENTRY(_vmx_vmxon)
52 1.1 maxv vmxon (%rdi)
53 1.1 maxv jz .Lfail_vmxon
54 1.1 maxv jc .Lfail_vmxon
55 1.1 maxv xorq %rax,%rax
56 1.1 maxv retq
57 1.1 maxv .Lfail_vmxon:
58 1.1 maxv movq $-1,%rax
59 1.1 maxv retq
60 1.1 maxv END(_vmx_vmxon)
61 1.1 maxv
62 1.1 maxv /*
63 1.1 maxv * no arg
64 1.1 maxv */
65 1.1 maxv ENTRY(_vmx_vmxoff)
66 1.1 maxv vmxoff
67 1.1 maxv jz .Lfail_vmxoff
68 1.1 maxv jc .Lfail_vmxoff
69 1.1 maxv xorq %rax,%rax
70 1.1 maxv retq
71 1.1 maxv .Lfail_vmxoff:
72 1.1 maxv movq $-1,%rax
73 1.1 maxv retq
74 1.1 maxv END(_vmx_vmxoff)
75 1.1 maxv
76 1.1 maxv /*
77 1.1 maxv * %rdi = op
78 1.1 maxv * %rsi = *descriptor
79 1.1 maxv */
80 1.1 maxv ENTRY(_vmx_invept)
81 1.1 maxv invept (%rsi),%rdi
82 1.1 maxv jz .Linvept_failvalid
83 1.1 maxv jc .Linvept_failinvalid
84 1.1 maxv xorq %rax,%rax
85 1.1 maxv retq
86 1.1 maxv .Linvept_failvalid:
87 1.1 maxv movq $-1,%rax
88 1.1 maxv retq
89 1.1 maxv .Linvept_failinvalid:
90 1.1 maxv movq $-2,%rax
91 1.1 maxv retq
92 1.1 maxv END(_vmx_invept)
93 1.1 maxv
94 1.1 maxv /*
95 1.1 maxv * %rdi = op
96 1.1 maxv * %rsi = *descriptor
97 1.1 maxv */
98 1.1 maxv ENTRY(_vmx_invvpid)
99 1.1 maxv invvpid (%rsi),%rdi
100 1.1 maxv jz .Linvvpid_failvalid
101 1.1 maxv jc .Linvvpid_failinvalid
102 1.1 maxv xorq %rax,%rax
103 1.1 maxv retq
104 1.1 maxv .Linvvpid_failvalid:
105 1.1 maxv movq $-1,%rax
106 1.1 maxv retq
107 1.1 maxv .Linvvpid_failinvalid:
108 1.1 maxv movq $-2,%rax
109 1.1 maxv retq
110 1.1 maxv END(_vmx_invvpid)
111 1.1 maxv
112 1.1 maxv /*
113 1.1 maxv * %rdi = op
114 1.1 maxv * %rsi = *val
115 1.1 maxv */
116 1.1 maxv ENTRY(_vmx_vmread)
117 1.1 maxv vmread %rdi,(%rsi)
118 1.1 maxv jz .Lvmread_failvalid
119 1.1 maxv jc .Lvmread_failinvalid
120 1.1 maxv xorq %rax,%rax
121 1.1 maxv retq
122 1.1 maxv .Lvmread_failvalid:
123 1.1 maxv movq $-1,%rax
124 1.1 maxv retq
125 1.1 maxv .Lvmread_failinvalid:
126 1.1 maxv movq $-2,%rax
127 1.1 maxv retq
128 1.1 maxv END(_vmx_vmread)
129 1.1 maxv
130 1.1 maxv /*
131 1.1 maxv * %rdi = op
132 1.1 maxv * %rsi = val
133 1.1 maxv */
134 1.1 maxv ENTRY(_vmx_vmwrite)
135 1.1 maxv vmwrite %rsi,%rdi
136 1.1 maxv jz .Lvmwrite_failvalid
137 1.1 maxv jc .Lvmwrite_failinvalid
138 1.1 maxv xorq %rax,%rax
139 1.1 maxv retq
140 1.1 maxv .Lvmwrite_failvalid:
141 1.1 maxv movq $-1,%rax
142 1.1 maxv retq
143 1.1 maxv .Lvmwrite_failinvalid:
144 1.1 maxv movq $-2,%rax
145 1.1 maxv retq
146 1.1 maxv END(_vmx_vmwrite)
147 1.1 maxv
148 1.1 maxv /*
149 1.1 maxv * %rdi = *pa
150 1.1 maxv */
151 1.1 maxv ENTRY(_vmx_vmptrld)
152 1.1 maxv vmptrld (%rdi)
153 1.1 maxv jz .Lfail_vmptrld
154 1.1 maxv jc .Lfail_vmptrld
155 1.1 maxv xorq %rax,%rax
156 1.1 maxv retq
157 1.1 maxv .Lfail_vmptrld:
158 1.1 maxv movq $-1,%rax
159 1.1 maxv retq
160 1.1 maxv END(_vmx_vmptrld)
161 1.1 maxv
162 1.1 maxv /*
163 1.1 maxv * %rdi = *pa
164 1.1 maxv */
165 1.1 maxv ENTRY(_vmx_vmptrst)
166 1.1 maxv vmptrst (%rdi)
167 1.1 maxv jz .Lfail_vmptrst
168 1.1 maxv jc .Lfail_vmptrst
169 1.1 maxv xorq %rax,%rax
170 1.1 maxv retq
171 1.1 maxv .Lfail_vmptrst:
172 1.1 maxv movq $-1,%rax
173 1.1 maxv retq
174 1.1 maxv END(_vmx_vmptrst)
175 1.1 maxv
176 1.1 maxv /*
177 1.1 maxv * %rdi = pa
178 1.1 maxv */
179 1.1 maxv ENTRY(_vmx_vmclear)
180 1.1 maxv vmclear (%rdi)
181 1.1 maxv jz .Lfail_vmclear
182 1.1 maxv jc .Lfail_vmclear
183 1.1 maxv xorq %rax,%rax
184 1.1 maxv retq
185 1.1 maxv .Lfail_vmclear:
186 1.1 maxv movq $-1,%rax
187 1.1 maxv retq
188 1.1 maxv END(_vmx_vmclear)
189 1.1 maxv
190 1.1 maxv /* redef */
191 1.1 maxv #define VMCS_HOST_RSP 0x00006C14
192 1.1 maxv
193 1.1 maxv #define HOST_SAVE_GPRS \
194 1.1 maxv pushq %rbx ;\
195 1.1 maxv pushq %rbp ;\
196 1.1 maxv pushq %r12 ;\
197 1.1 maxv pushq %r13 ;\
198 1.1 maxv pushq %r14 ;\
199 1.1 maxv pushq %r15
200 1.1 maxv
201 1.1 maxv #define HOST_RESTORE_GPRS \
202 1.1 maxv popq %r15 ;\
203 1.1 maxv popq %r14 ;\
204 1.1 maxv popq %r13 ;\
205 1.1 maxv popq %r12 ;\
206 1.1 maxv popq %rbp ;\
207 1.1 maxv popq %rbx
208 1.1 maxv
209 1.1 maxv #define HOST_SAVE_RAX \
210 1.1 maxv pushq %rax
211 1.1 maxv
212 1.1 maxv #define HOST_RESTORE_RAX \
213 1.1 maxv popq %rax
214 1.1 maxv
215 1.1 maxv #define HOST_SAVE_LDT \
216 1.1 maxv sldtw %ax ;\
217 1.1 maxv pushw %ax
218 1.1 maxv
219 1.1 maxv #define HOST_RESTORE_LDT \
220 1.1 maxv popw %ax ;\
221 1.1 maxv lldtw %ax
222 1.1 maxv
223 1.1 maxv /*
224 1.1 maxv * We don't save RAX (done manually), but we do restore it.
225 1.1 maxv */
226 1.1 maxv
227 1.1 maxv #define GUEST_SAVE_GPRS(reg) \
228 1.1 maxv movq %rcx,(NVMM_X64_GPR_RCX * 8)(reg) ;\
229 1.1 maxv movq %rdx,(NVMM_X64_GPR_RDX * 8)(reg) ;\
230 1.2 maxv movq %rbx,(NVMM_X64_GPR_RBX * 8)(reg) ;\
231 1.2 maxv movq %rbp,(NVMM_X64_GPR_RBP * 8)(reg) ;\
232 1.2 maxv movq %rsi,(NVMM_X64_GPR_RSI * 8)(reg) ;\
233 1.2 maxv movq %rdi,(NVMM_X64_GPR_RDI * 8)(reg) ;\
234 1.1 maxv movq %r8,(NVMM_X64_GPR_R8 * 8)(reg) ;\
235 1.1 maxv movq %r9,(NVMM_X64_GPR_R9 * 8)(reg) ;\
236 1.1 maxv movq %r10,(NVMM_X64_GPR_R10 * 8)(reg) ;\
237 1.1 maxv movq %r11,(NVMM_X64_GPR_R11 * 8)(reg) ;\
238 1.1 maxv movq %r12,(NVMM_X64_GPR_R12 * 8)(reg) ;\
239 1.1 maxv movq %r13,(NVMM_X64_GPR_R13 * 8)(reg) ;\
240 1.1 maxv movq %r14,(NVMM_X64_GPR_R14 * 8)(reg) ;\
241 1.2 maxv movq %r15,(NVMM_X64_GPR_R15 * 8)(reg)
242 1.1 maxv
243 1.1 maxv #define GUEST_RESTORE_GPRS(reg) \
244 1.1 maxv movq (NVMM_X64_GPR_RCX * 8)(reg),%rcx ;\
245 1.1 maxv movq (NVMM_X64_GPR_RDX * 8)(reg),%rdx ;\
246 1.2 maxv movq (NVMM_X64_GPR_RBX * 8)(reg),%rbx ;\
247 1.2 maxv movq (NVMM_X64_GPR_RBP * 8)(reg),%rbp ;\
248 1.2 maxv movq (NVMM_X64_GPR_RSI * 8)(reg),%rsi ;\
249 1.2 maxv movq (NVMM_X64_GPR_RDI * 8)(reg),%rdi ;\
250 1.1 maxv movq (NVMM_X64_GPR_R8 * 8)(reg),%r8 ;\
251 1.1 maxv movq (NVMM_X64_GPR_R9 * 8)(reg),%r9 ;\
252 1.1 maxv movq (NVMM_X64_GPR_R10 * 8)(reg),%r10 ;\
253 1.1 maxv movq (NVMM_X64_GPR_R11 * 8)(reg),%r11 ;\
254 1.1 maxv movq (NVMM_X64_GPR_R12 * 8)(reg),%r12 ;\
255 1.1 maxv movq (NVMM_X64_GPR_R13 * 8)(reg),%r13 ;\
256 1.1 maxv movq (NVMM_X64_GPR_R14 * 8)(reg),%r14 ;\
257 1.1 maxv movq (NVMM_X64_GPR_R15 * 8)(reg),%r15 ;\
258 1.1 maxv movq (NVMM_X64_GPR_RAX * 8)(reg),%rax
259 1.1 maxv
260 1.1 maxv /*
261 1.1 maxv * %rdi = VA of guest GPR state
262 1.1 maxv */
263 1.1 maxv ENTRY(vmx_vmlaunch)
264 1.1 maxv /* Save the Host GPRs. */
265 1.1 maxv HOST_SAVE_GPRS
266 1.1 maxv
267 1.1 maxv /* Disable Host interrupts. */
268 1.1 maxv cli
269 1.1 maxv
270 1.1 maxv /* Save the Host LDT. */
271 1.1 maxv HOST_SAVE_LDT
272 1.1 maxv
273 1.1 maxv /* Save the Host RAX. */
274 1.1 maxv movq %rdi,%rax
275 1.1 maxv pushq %rax
276 1.1 maxv
277 1.1 maxv /* Save the Host RSP. */
278 1.1 maxv movq $VMCS_HOST_RSP,%rdi
279 1.1 maxv movq %rsp,%rsi
280 1.1 maxv vmwrite %rsi,%rdi
281 1.1 maxv
282 1.1 maxv /* Restore the Guest GPRs. */
283 1.1 maxv GUEST_RESTORE_GPRS(%rax)
284 1.1 maxv
285 1.1 maxv /* Run the VM. */
286 1.1 maxv vmlaunch
287 1.1 maxv
288 1.1 maxv /* Failure. */
289 1.1 maxv addq $8,%rsp
290 1.1 maxv HOST_RESTORE_LDT
291 1.1 maxv sti
292 1.1 maxv HOST_RESTORE_GPRS
293 1.1 maxv movq $-1,%rax
294 1.1 maxv retq
295 1.1 maxv END(vmx_vmlaunch)
296 1.1 maxv
297 1.1 maxv /*
298 1.1 maxv * %rdi = VA of guest GPR state
299 1.1 maxv */
300 1.1 maxv ENTRY(vmx_vmresume)
301 1.1 maxv /* Save the Host GPRs. */
302 1.1 maxv HOST_SAVE_GPRS
303 1.1 maxv
304 1.1 maxv /* Disable Host interrupts. */
305 1.1 maxv cli
306 1.1 maxv
307 1.1 maxv /* Save the Host LDT. */
308 1.1 maxv HOST_SAVE_LDT
309 1.1 maxv
310 1.1 maxv /* Save the Host RAX. */
311 1.1 maxv movq %rdi,%rax
312 1.1 maxv pushq %rax
313 1.1 maxv
314 1.1 maxv /* Save the Host RSP. */
315 1.1 maxv movq $VMCS_HOST_RSP,%rdi
316 1.1 maxv movq %rsp,%rsi
317 1.1 maxv vmwrite %rsi,%rdi
318 1.1 maxv
319 1.1 maxv /* Restore the Guest GPRs. */
320 1.1 maxv GUEST_RESTORE_GPRS(%rax)
321 1.1 maxv
322 1.1 maxv /* Run the VM. */
323 1.1 maxv vmresume
324 1.1 maxv
325 1.1 maxv /* Failure. */
326 1.1 maxv addq $8,%rsp
327 1.1 maxv HOST_RESTORE_LDT
328 1.1 maxv sti
329 1.1 maxv HOST_RESTORE_GPRS
330 1.1 maxv movq $-1,%rax
331 1.1 maxv retq
332 1.1 maxv END(vmx_vmresume)
333 1.1 maxv
334 1.1 maxv /*
335 1.1 maxv * The CPU jumps here after a #VMEXIT.
336 1.1 maxv */
337 1.1 maxv ENTRY(vmx_resume_rip)
338 1.1 maxv /* Save the Guest GPRs. RAX done manually. */
339 1.1 maxv pushq %rax
340 1.1 maxv movq 8(%rsp),%rax
341 1.1 maxv GUEST_SAVE_GPRS(%rax)
342 1.1 maxv popq %rbx
343 1.1 maxv movq %rbx,(NVMM_X64_GPR_RAX * 8)(%rax)
344 1.1 maxv addq $8,%rsp
345 1.1 maxv
346 1.1 maxv /* Restore the Host LDT. */
347 1.1 maxv HOST_RESTORE_LDT
348 1.1 maxv
349 1.1 maxv /* Enable Host interrupts. */
350 1.1 maxv sti
351 1.1 maxv
352 1.1 maxv /* Restore the Host GPRs. */
353 1.1 maxv HOST_RESTORE_GPRS
354 1.1 maxv
355 1.1 maxv xorq %rax,%rax
356 1.1 maxv retq
357 1.1 maxv END(vmx_resume_rip)
358