nvmm_x86_vmx.c revision 1.79 1 /* $NetBSD: nvmm_x86_vmx.c,v 1.79 2020/09/08 17:00:07 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5 * All rights reserved.
6 *
7 * This code is part of the NVMM hypervisor.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.79 2020/09/08 17:00:07 maxv Exp $");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kmem.h>
38 #include <sys/cpu.h>
39 #include <sys/xcall.h>
40 #include <sys/mman.h>
41 #include <sys/bitops.h>
42
43 #include <uvm/uvm_extern.h>
44 #include <uvm/uvm_page.h>
45
46 #include <x86/cputypes.h>
47 #include <x86/specialreg.h>
48 #include <x86/dbregs.h>
49 #include <x86/cpu_counter.h>
50
51 #include <machine/cpuvar.h>
52
53 #include <dev/nvmm/nvmm.h>
54 #include <dev/nvmm/nvmm_internal.h>
55 #include <dev/nvmm/x86/nvmm_x86.h>
56
57 int _vmx_vmxon(paddr_t *pa);
58 int _vmx_vmxoff(void);
59 int vmx_vmlaunch(uint64_t *gprs);
60 int vmx_vmresume(uint64_t *gprs);
61
62 #define vmx_vmxon(a) \
63 if (__predict_false(_vmx_vmxon(a) != 0)) { \
64 panic("%s: VMXON failed", __func__); \
65 }
66 #define vmx_vmxoff() \
67 if (__predict_false(_vmx_vmxoff() != 0)) { \
68 panic("%s: VMXOFF failed", __func__); \
69 }
70
71 struct ept_desc {
72 uint64_t eptp;
73 uint64_t mbz;
74 } __packed;
75
76 struct vpid_desc {
77 uint64_t vpid;
78 uint64_t addr;
79 } __packed;
80
81 static inline void
82 vmx_invept(uint64_t op, struct ept_desc *desc)
83 {
84 asm volatile (
85 "invept %[desc],%[op];"
86 "jz vmx_insn_failvalid;"
87 "jc vmx_insn_failinvalid;"
88 :
89 : [desc] "m" (*desc), [op] "r" (op)
90 : "memory", "cc"
91 );
92 }
93
94 static inline void
95 vmx_invvpid(uint64_t op, struct vpid_desc *desc)
96 {
97 asm volatile (
98 "invvpid %[desc],%[op];"
99 "jz vmx_insn_failvalid;"
100 "jc vmx_insn_failinvalid;"
101 :
102 : [desc] "m" (*desc), [op] "r" (op)
103 : "memory", "cc"
104 );
105 }
106
107 static inline uint64_t
108 vmx_vmread(uint64_t field)
109 {
110 uint64_t value;
111
112 asm volatile (
113 "vmread %[field],%[value];"
114 "jz vmx_insn_failvalid;"
115 "jc vmx_insn_failinvalid;"
116 : [value] "=r" (value)
117 : [field] "r" (field)
118 : "cc"
119 );
120
121 return value;
122 }
123
124 static inline void
125 vmx_vmwrite(uint64_t field, uint64_t value)
126 {
127 asm volatile (
128 "vmwrite %[value],%[field];"
129 "jz vmx_insn_failvalid;"
130 "jc vmx_insn_failinvalid;"
131 :
132 : [field] "r" (field), [value] "r" (value)
133 : "cc"
134 );
135 }
136
137 #ifdef DIAGNOSTIC
138 static inline paddr_t
139 vmx_vmptrst(void)
140 {
141 paddr_t pa;
142
143 asm volatile (
144 "vmptrst %[pa];"
145 :
146 : [pa] "m" (*(paddr_t *)&pa)
147 : "memory"
148 );
149
150 return pa;
151 }
152 #endif
153
154 static inline void
155 vmx_vmptrld(paddr_t *pa)
156 {
157 asm volatile (
158 "vmptrld %[pa];"
159 "jz vmx_insn_failvalid;"
160 "jc vmx_insn_failinvalid;"
161 :
162 : [pa] "m" (*pa)
163 : "memory", "cc"
164 );
165 }
166
167 static inline void
168 vmx_vmclear(paddr_t *pa)
169 {
170 asm volatile (
171 "vmclear %[pa];"
172 "jz vmx_insn_failvalid;"
173 "jc vmx_insn_failinvalid;"
174 :
175 : [pa] "m" (*pa)
176 : "memory", "cc"
177 );
178 }
179
180 static inline void
181 vmx_cli(void)
182 {
183 asm volatile ("cli" ::: "memory");
184 }
185
186 static inline void
187 vmx_sti(void)
188 {
189 asm volatile ("sti" ::: "memory");
190 }
191
192 #define MSR_IA32_FEATURE_CONTROL 0x003A
193 #define IA32_FEATURE_CONTROL_LOCK __BIT(0)
194 #define IA32_FEATURE_CONTROL_IN_SMX __BIT(1)
195 #define IA32_FEATURE_CONTROL_OUT_SMX __BIT(2)
196
197 #define MSR_IA32_VMX_BASIC 0x0480
198 #define IA32_VMX_BASIC_IDENT __BITS(30,0)
199 #define IA32_VMX_BASIC_DATA_SIZE __BITS(44,32)
200 #define IA32_VMX_BASIC_MEM_WIDTH __BIT(48)
201 #define IA32_VMX_BASIC_DUAL __BIT(49)
202 #define IA32_VMX_BASIC_MEM_TYPE __BITS(53,50)
203 #define MEM_TYPE_UC 0
204 #define MEM_TYPE_WB 6
205 #define IA32_VMX_BASIC_IO_REPORT __BIT(54)
206 #define IA32_VMX_BASIC_TRUE_CTLS __BIT(55)
207
208 #define MSR_IA32_VMX_PINBASED_CTLS 0x0481
209 #define MSR_IA32_VMX_PROCBASED_CTLS 0x0482
210 #define MSR_IA32_VMX_EXIT_CTLS 0x0483
211 #define MSR_IA32_VMX_ENTRY_CTLS 0x0484
212 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x048B
213
214 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x048D
215 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x048E
216 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x048F
217 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x0490
218
219 #define MSR_IA32_VMX_CR0_FIXED0 0x0486
220 #define MSR_IA32_VMX_CR0_FIXED1 0x0487
221 #define MSR_IA32_VMX_CR4_FIXED0 0x0488
222 #define MSR_IA32_VMX_CR4_FIXED1 0x0489
223
224 #define MSR_IA32_VMX_EPT_VPID_CAP 0x048C
225 #define IA32_VMX_EPT_VPID_XO __BIT(0)
226 #define IA32_VMX_EPT_VPID_WALKLENGTH_4 __BIT(6)
227 #define IA32_VMX_EPT_VPID_UC __BIT(8)
228 #define IA32_VMX_EPT_VPID_WB __BIT(14)
229 #define IA32_VMX_EPT_VPID_2MB __BIT(16)
230 #define IA32_VMX_EPT_VPID_1GB __BIT(17)
231 #define IA32_VMX_EPT_VPID_INVEPT __BIT(20)
232 #define IA32_VMX_EPT_VPID_FLAGS_AD __BIT(21)
233 #define IA32_VMX_EPT_VPID_ADVANCED_VMEXIT_INFO __BIT(22)
234 #define IA32_VMX_EPT_VPID_SHSTK __BIT(23)
235 #define IA32_VMX_EPT_VPID_INVEPT_CONTEXT __BIT(25)
236 #define IA32_VMX_EPT_VPID_INVEPT_ALL __BIT(26)
237 #define IA32_VMX_EPT_VPID_INVVPID __BIT(32)
238 #define IA32_VMX_EPT_VPID_INVVPID_ADDR __BIT(40)
239 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT __BIT(41)
240 #define IA32_VMX_EPT_VPID_INVVPID_ALL __BIT(42)
241 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG __BIT(43)
242
243 /* -------------------------------------------------------------------------- */
244
245 /* 16-bit control fields */
246 #define VMCS_VPID 0x00000000
247 #define VMCS_PIR_VECTOR 0x00000002
248 #define VMCS_EPTP_INDEX 0x00000004
249 /* 16-bit guest-state fields */
250 #define VMCS_GUEST_ES_SELECTOR 0x00000800
251 #define VMCS_GUEST_CS_SELECTOR 0x00000802
252 #define VMCS_GUEST_SS_SELECTOR 0x00000804
253 #define VMCS_GUEST_DS_SELECTOR 0x00000806
254 #define VMCS_GUEST_FS_SELECTOR 0x00000808
255 #define VMCS_GUEST_GS_SELECTOR 0x0000080A
256 #define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
257 #define VMCS_GUEST_TR_SELECTOR 0x0000080E
258 #define VMCS_GUEST_INTR_STATUS 0x00000810
259 #define VMCS_PML_INDEX 0x00000812
260 /* 16-bit host-state fields */
261 #define VMCS_HOST_ES_SELECTOR 0x00000C00
262 #define VMCS_HOST_CS_SELECTOR 0x00000C02
263 #define VMCS_HOST_SS_SELECTOR 0x00000C04
264 #define VMCS_HOST_DS_SELECTOR 0x00000C06
265 #define VMCS_HOST_FS_SELECTOR 0x00000C08
266 #define VMCS_HOST_GS_SELECTOR 0x00000C0A
267 #define VMCS_HOST_TR_SELECTOR 0x00000C0C
268 /* 64-bit control fields */
269 #define VMCS_IO_BITMAP_A 0x00002000
270 #define VMCS_IO_BITMAP_B 0x00002002
271 #define VMCS_MSR_BITMAP 0x00002004
272 #define VMCS_EXIT_MSR_STORE_ADDRESS 0x00002006
273 #define VMCS_EXIT_MSR_LOAD_ADDRESS 0x00002008
274 #define VMCS_ENTRY_MSR_LOAD_ADDRESS 0x0000200A
275 #define VMCS_EXECUTIVE_VMCS 0x0000200C
276 #define VMCS_PML_ADDRESS 0x0000200E
277 #define VMCS_TSC_OFFSET 0x00002010
278 #define VMCS_VIRTUAL_APIC 0x00002012
279 #define VMCS_APIC_ACCESS 0x00002014
280 #define VMCS_PIR_DESC 0x00002016
281 #define VMCS_VM_CONTROL 0x00002018
282 #define VMCS_EPTP 0x0000201A
283 #define EPTP_TYPE __BITS(2,0)
284 #define EPTP_TYPE_UC 0
285 #define EPTP_TYPE_WB 6
286 #define EPTP_WALKLEN __BITS(5,3)
287 #define EPTP_FLAGS_AD __BIT(6)
288 #define EPTP_SSS __BIT(7)
289 #define EPTP_PHYSADDR __BITS(63,12)
290 #define VMCS_EOI_EXIT0 0x0000201C
291 #define VMCS_EOI_EXIT1 0x0000201E
292 #define VMCS_EOI_EXIT2 0x00002020
293 #define VMCS_EOI_EXIT3 0x00002022
294 #define VMCS_EPTP_LIST 0x00002024
295 #define VMCS_VMREAD_BITMAP 0x00002026
296 #define VMCS_VMWRITE_BITMAP 0x00002028
297 #define VMCS_VIRTUAL_EXCEPTION 0x0000202A
298 #define VMCS_XSS_EXIT_BITMAP 0x0000202C
299 #define VMCS_ENCLS_EXIT_BITMAP 0x0000202E
300 #define VMCS_SUBPAGE_PERM_TABLE_PTR 0x00002030
301 #define VMCS_TSC_MULTIPLIER 0x00002032
302 #define VMCS_ENCLV_EXIT_BITMAP 0x00002036
303 /* 64-bit read-only fields */
304 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
305 /* 64-bit guest-state fields */
306 #define VMCS_LINK_POINTER 0x00002800
307 #define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
308 #define VMCS_GUEST_IA32_PAT 0x00002804
309 #define VMCS_GUEST_IA32_EFER 0x00002806
310 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
311 #define VMCS_GUEST_PDPTE0 0x0000280A
312 #define VMCS_GUEST_PDPTE1 0x0000280C
313 #define VMCS_GUEST_PDPTE2 0x0000280E
314 #define VMCS_GUEST_PDPTE3 0x00002810
315 #define VMCS_GUEST_BNDCFGS 0x00002812
316 #define VMCS_GUEST_RTIT_CTL 0x00002814
317 #define VMCS_GUEST_PKRS 0x00002818
318 /* 64-bit host-state fields */
319 #define VMCS_HOST_IA32_PAT 0x00002C00
320 #define VMCS_HOST_IA32_EFER 0x00002C02
321 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
322 #define VMCS_HOST_IA32_PKRS 0x00002C06
323 /* 32-bit control fields */
324 #define VMCS_PINBASED_CTLS 0x00004000
325 #define PIN_CTLS_INT_EXITING __BIT(0)
326 #define PIN_CTLS_NMI_EXITING __BIT(3)
327 #define PIN_CTLS_VIRTUAL_NMIS __BIT(5)
328 #define PIN_CTLS_ACTIVATE_PREEMPT_TIMER __BIT(6)
329 #define PIN_CTLS_PROCESS_POSTED_INTS __BIT(7)
330 #define VMCS_PROCBASED_CTLS 0x00004002
331 #define PROC_CTLS_INT_WINDOW_EXITING __BIT(2)
332 #define PROC_CTLS_USE_TSC_OFFSETTING __BIT(3)
333 #define PROC_CTLS_HLT_EXITING __BIT(7)
334 #define PROC_CTLS_INVLPG_EXITING __BIT(9)
335 #define PROC_CTLS_MWAIT_EXITING __BIT(10)
336 #define PROC_CTLS_RDPMC_EXITING __BIT(11)
337 #define PROC_CTLS_RDTSC_EXITING __BIT(12)
338 #define PROC_CTLS_RCR3_EXITING __BIT(15)
339 #define PROC_CTLS_LCR3_EXITING __BIT(16)
340 #define PROC_CTLS_RCR8_EXITING __BIT(19)
341 #define PROC_CTLS_LCR8_EXITING __BIT(20)
342 #define PROC_CTLS_USE_TPR_SHADOW __BIT(21)
343 #define PROC_CTLS_NMI_WINDOW_EXITING __BIT(22)
344 #define PROC_CTLS_DR_EXITING __BIT(23)
345 #define PROC_CTLS_UNCOND_IO_EXITING __BIT(24)
346 #define PROC_CTLS_USE_IO_BITMAPS __BIT(25)
347 #define PROC_CTLS_MONITOR_TRAP_FLAG __BIT(27)
348 #define PROC_CTLS_USE_MSR_BITMAPS __BIT(28)
349 #define PROC_CTLS_MONITOR_EXITING __BIT(29)
350 #define PROC_CTLS_PAUSE_EXITING __BIT(30)
351 #define PROC_CTLS_ACTIVATE_CTLS2 __BIT(31)
352 #define VMCS_EXCEPTION_BITMAP 0x00004004
353 #define VMCS_PF_ERROR_MASK 0x00004006
354 #define VMCS_PF_ERROR_MATCH 0x00004008
355 #define VMCS_CR3_TARGET_COUNT 0x0000400A
356 #define VMCS_EXIT_CTLS 0x0000400C
357 #define EXIT_CTLS_SAVE_DEBUG_CONTROLS __BIT(2)
358 #define EXIT_CTLS_HOST_LONG_MODE __BIT(9)
359 #define EXIT_CTLS_LOAD_PERFGLOBALCTRL __BIT(12)
360 #define EXIT_CTLS_ACK_INTERRUPT __BIT(15)
361 #define EXIT_CTLS_SAVE_PAT __BIT(18)
362 #define EXIT_CTLS_LOAD_PAT __BIT(19)
363 #define EXIT_CTLS_SAVE_EFER __BIT(20)
364 #define EXIT_CTLS_LOAD_EFER __BIT(21)
365 #define EXIT_CTLS_SAVE_PREEMPT_TIMER __BIT(22)
366 #define EXIT_CTLS_CLEAR_BNDCFGS __BIT(23)
367 #define EXIT_CTLS_CONCEAL_PT __BIT(24)
368 #define EXIT_CTLS_CLEAR_RTIT_CTL __BIT(25)
369 #define EXIT_CTLS_LOAD_CET __BIT(28)
370 #define EXIT_CTLS_LOAD_PKRS __BIT(29)
371 #define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
372 #define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
373 #define VMCS_ENTRY_CTLS 0x00004012
374 #define ENTRY_CTLS_LOAD_DEBUG_CONTROLS __BIT(2)
375 #define ENTRY_CTLS_LONG_MODE __BIT(9)
376 #define ENTRY_CTLS_SMM __BIT(10)
377 #define ENTRY_CTLS_DISABLE_DUAL __BIT(11)
378 #define ENTRY_CTLS_LOAD_PERFGLOBALCTRL __BIT(13)
379 #define ENTRY_CTLS_LOAD_PAT __BIT(14)
380 #define ENTRY_CTLS_LOAD_EFER __BIT(15)
381 #define ENTRY_CTLS_LOAD_BNDCFGS __BIT(16)
382 #define ENTRY_CTLS_CONCEAL_PT __BIT(17)
383 #define ENTRY_CTLS_LOAD_RTIT_CTL __BIT(18)
384 #define ENTRY_CTLS_LOAD_CET __BIT(20)
385 #define ENTRY_CTLS_LOAD_PKRS __BIT(22)
386 #define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
387 #define VMCS_ENTRY_INTR_INFO 0x00004016
388 #define INTR_INFO_VECTOR __BITS(7,0)
389 #define INTR_INFO_TYPE __BITS(10,8)
390 #define INTR_TYPE_EXT_INT 0
391 #define INTR_TYPE_NMI 2
392 #define INTR_TYPE_HW_EXC 3
393 #define INTR_TYPE_SW_INT 4
394 #define INTR_TYPE_PRIV_SW_EXC 5
395 #define INTR_TYPE_SW_EXC 6
396 #define INTR_TYPE_OTHER 7
397 #define INTR_INFO_ERROR __BIT(11)
398 #define INTR_INFO_VALID __BIT(31)
399 #define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
400 #define VMCS_ENTRY_INSTRUCTION_LENGTH 0x0000401A
401 #define VMCS_TPR_THRESHOLD 0x0000401C
402 #define VMCS_PROCBASED_CTLS2 0x0000401E
403 #define PROC_CTLS2_VIRT_APIC_ACCESSES __BIT(0)
404 #define PROC_CTLS2_ENABLE_EPT __BIT(1)
405 #define PROC_CTLS2_DESC_TABLE_EXITING __BIT(2)
406 #define PROC_CTLS2_ENABLE_RDTSCP __BIT(3)
407 #define PROC_CTLS2_VIRT_X2APIC __BIT(4)
408 #define PROC_CTLS2_ENABLE_VPID __BIT(5)
409 #define PROC_CTLS2_WBINVD_EXITING __BIT(6)
410 #define PROC_CTLS2_UNRESTRICTED_GUEST __BIT(7)
411 #define PROC_CTLS2_APIC_REG_VIRT __BIT(8)
412 #define PROC_CTLS2_VIRT_INT_DELIVERY __BIT(9)
413 #define PROC_CTLS2_PAUSE_LOOP_EXITING __BIT(10)
414 #define PROC_CTLS2_RDRAND_EXITING __BIT(11)
415 #define PROC_CTLS2_INVPCID_ENABLE __BIT(12)
416 #define PROC_CTLS2_VMFUNC_ENABLE __BIT(13)
417 #define PROC_CTLS2_VMCS_SHADOWING __BIT(14)
418 #define PROC_CTLS2_ENCLS_EXITING __BIT(15)
419 #define PROC_CTLS2_RDSEED_EXITING __BIT(16)
420 #define PROC_CTLS2_PML_ENABLE __BIT(17)
421 #define PROC_CTLS2_EPT_VIOLATION __BIT(18)
422 #define PROC_CTLS2_CONCEAL_VMX_FROM_PT __BIT(19)
423 #define PROC_CTLS2_XSAVES_ENABLE __BIT(20)
424 #define PROC_CTLS2_MODE_BASED_EXEC_EPT __BIT(22)
425 #define PROC_CTLS2_SUBPAGE_PERMISSIONS __BIT(23)
426 #define PROC_CTLS2_PT_USES_GPA __BIT(24)
427 #define PROC_CTLS2_USE_TSC_SCALING __BIT(25)
428 #define PROC_CTLS2_WAIT_PAUSE_ENABLE __BIT(26)
429 #define PROC_CTLS2_ENCLV_EXITING __BIT(28)
430 #define VMCS_PLE_GAP 0x00004020
431 #define VMCS_PLE_WINDOW 0x00004022
432 /* 32-bit read-only data fields */
433 #define VMCS_INSTRUCTION_ERROR 0x00004400
434 #define VMCS_EXIT_REASON 0x00004402
435 #define VMCS_EXIT_INTR_INFO 0x00004404
436 #define VMCS_EXIT_INTR_ERRCODE 0x00004406
437 #define VMCS_IDT_VECTORING_INFO 0x00004408
438 #define VMCS_IDT_VECTORING_ERROR 0x0000440A
439 #define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
440 #define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
441 /* 32-bit guest-state fields */
442 #define VMCS_GUEST_ES_LIMIT 0x00004800
443 #define VMCS_GUEST_CS_LIMIT 0x00004802
444 #define VMCS_GUEST_SS_LIMIT 0x00004804
445 #define VMCS_GUEST_DS_LIMIT 0x00004806
446 #define VMCS_GUEST_FS_LIMIT 0x00004808
447 #define VMCS_GUEST_GS_LIMIT 0x0000480A
448 #define VMCS_GUEST_LDTR_LIMIT 0x0000480C
449 #define VMCS_GUEST_TR_LIMIT 0x0000480E
450 #define VMCS_GUEST_GDTR_LIMIT 0x00004810
451 #define VMCS_GUEST_IDTR_LIMIT 0x00004812
452 #define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
453 #define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
454 #define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
455 #define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
456 #define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
457 #define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
458 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
459 #define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
460 #define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
461 #define INT_STATE_STI __BIT(0)
462 #define INT_STATE_MOVSS __BIT(1)
463 #define INT_STATE_SMI __BIT(2)
464 #define INT_STATE_NMI __BIT(3)
465 #define INT_STATE_ENCLAVE __BIT(4)
466 #define VMCS_GUEST_ACTIVITY 0x00004826
467 #define VMCS_GUEST_SMBASE 0x00004828
468 #define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
469 #define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
470 /* 32-bit host state fields */
471 #define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
472 /* Natural-Width control fields */
473 #define VMCS_CR0_MASK 0x00006000
474 #define VMCS_CR4_MASK 0x00006002
475 #define VMCS_CR0_SHADOW 0x00006004
476 #define VMCS_CR4_SHADOW 0x00006006
477 #define VMCS_CR3_TARGET0 0x00006008
478 #define VMCS_CR3_TARGET1 0x0000600A
479 #define VMCS_CR3_TARGET2 0x0000600C
480 #define VMCS_CR3_TARGET3 0x0000600E
481 /* Natural-Width read-only fields */
482 #define VMCS_EXIT_QUALIFICATION 0x00006400
483 #define VMCS_IO_RCX 0x00006402
484 #define VMCS_IO_RSI 0x00006404
485 #define VMCS_IO_RDI 0x00006406
486 #define VMCS_IO_RIP 0x00006408
487 #define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
488 /* Natural-Width guest-state fields */
489 #define VMCS_GUEST_CR0 0x00006800
490 #define VMCS_GUEST_CR3 0x00006802
491 #define VMCS_GUEST_CR4 0x00006804
492 #define VMCS_GUEST_ES_BASE 0x00006806
493 #define VMCS_GUEST_CS_BASE 0x00006808
494 #define VMCS_GUEST_SS_BASE 0x0000680A
495 #define VMCS_GUEST_DS_BASE 0x0000680C
496 #define VMCS_GUEST_FS_BASE 0x0000680E
497 #define VMCS_GUEST_GS_BASE 0x00006810
498 #define VMCS_GUEST_LDTR_BASE 0x00006812
499 #define VMCS_GUEST_TR_BASE 0x00006814
500 #define VMCS_GUEST_GDTR_BASE 0x00006816
501 #define VMCS_GUEST_IDTR_BASE 0x00006818
502 #define VMCS_GUEST_DR7 0x0000681A
503 #define VMCS_GUEST_RSP 0x0000681C
504 #define VMCS_GUEST_RIP 0x0000681E
505 #define VMCS_GUEST_RFLAGS 0x00006820
506 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
507 #define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
508 #define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
509 #define VMCS_GUEST_IA32_S_CET 0x00006828
510 #define VMCS_GUEST_SSP 0x0000682A
511 #define VMCS_GUEST_IA32_INTR_SSP_TABLE 0x0000682C
512 /* Natural-Width host-state fields */
513 #define VMCS_HOST_CR0 0x00006C00
514 #define VMCS_HOST_CR3 0x00006C02
515 #define VMCS_HOST_CR4 0x00006C04
516 #define VMCS_HOST_FS_BASE 0x00006C06
517 #define VMCS_HOST_GS_BASE 0x00006C08
518 #define VMCS_HOST_TR_BASE 0x00006C0A
519 #define VMCS_HOST_GDTR_BASE 0x00006C0C
520 #define VMCS_HOST_IDTR_BASE 0x00006C0E
521 #define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
522 #define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
523 #define VMCS_HOST_RSP 0x00006C14
524 #define VMCS_HOST_RIP 0x00006C16
525 #define VMCS_HOST_IA32_S_CET 0x00006C18
526 #define VMCS_HOST_SSP 0x00006C1A
527 #define VMCS_HOST_IA32_INTR_SSP_TABLE 0x00006C1C
528
529 /* VMX basic exit reasons. */
530 #define VMCS_EXITCODE_EXC_NMI 0
531 #define VMCS_EXITCODE_EXT_INT 1
532 #define VMCS_EXITCODE_SHUTDOWN 2
533 #define VMCS_EXITCODE_INIT 3
534 #define VMCS_EXITCODE_SIPI 4
535 #define VMCS_EXITCODE_SMI 5
536 #define VMCS_EXITCODE_OTHER_SMI 6
537 #define VMCS_EXITCODE_INT_WINDOW 7
538 #define VMCS_EXITCODE_NMI_WINDOW 8
539 #define VMCS_EXITCODE_TASK_SWITCH 9
540 #define VMCS_EXITCODE_CPUID 10
541 #define VMCS_EXITCODE_GETSEC 11
542 #define VMCS_EXITCODE_HLT 12
543 #define VMCS_EXITCODE_INVD 13
544 #define VMCS_EXITCODE_INVLPG 14
545 #define VMCS_EXITCODE_RDPMC 15
546 #define VMCS_EXITCODE_RDTSC 16
547 #define VMCS_EXITCODE_RSM 17
548 #define VMCS_EXITCODE_VMCALL 18
549 #define VMCS_EXITCODE_VMCLEAR 19
550 #define VMCS_EXITCODE_VMLAUNCH 20
551 #define VMCS_EXITCODE_VMPTRLD 21
552 #define VMCS_EXITCODE_VMPTRST 22
553 #define VMCS_EXITCODE_VMREAD 23
554 #define VMCS_EXITCODE_VMRESUME 24
555 #define VMCS_EXITCODE_VMWRITE 25
556 #define VMCS_EXITCODE_VMXOFF 26
557 #define VMCS_EXITCODE_VMXON 27
558 #define VMCS_EXITCODE_CR 28
559 #define VMCS_EXITCODE_DR 29
560 #define VMCS_EXITCODE_IO 30
561 #define VMCS_EXITCODE_RDMSR 31
562 #define VMCS_EXITCODE_WRMSR 32
563 #define VMCS_EXITCODE_FAIL_GUEST_INVALID 33
564 #define VMCS_EXITCODE_FAIL_MSR_INVALID 34
565 #define VMCS_EXITCODE_MWAIT 36
566 #define VMCS_EXITCODE_TRAP_FLAG 37
567 #define VMCS_EXITCODE_MONITOR 39
568 #define VMCS_EXITCODE_PAUSE 40
569 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK 41
570 #define VMCS_EXITCODE_TPR_BELOW 43
571 #define VMCS_EXITCODE_APIC_ACCESS 44
572 #define VMCS_EXITCODE_VEOI 45
573 #define VMCS_EXITCODE_GDTR_IDTR 46
574 #define VMCS_EXITCODE_LDTR_TR 47
575 #define VMCS_EXITCODE_EPT_VIOLATION 48
576 #define VMCS_EXITCODE_EPT_MISCONFIG 49
577 #define VMCS_EXITCODE_INVEPT 50
578 #define VMCS_EXITCODE_RDTSCP 51
579 #define VMCS_EXITCODE_PREEMPT_TIMEOUT 52
580 #define VMCS_EXITCODE_INVVPID 53
581 #define VMCS_EXITCODE_WBINVD 54
582 #define VMCS_EXITCODE_XSETBV 55
583 #define VMCS_EXITCODE_APIC_WRITE 56
584 #define VMCS_EXITCODE_RDRAND 57
585 #define VMCS_EXITCODE_INVPCID 58
586 #define VMCS_EXITCODE_VMFUNC 59
587 #define VMCS_EXITCODE_ENCLS 60
588 #define VMCS_EXITCODE_RDSEED 61
589 #define VMCS_EXITCODE_PAGE_LOG_FULL 62
590 #define VMCS_EXITCODE_XSAVES 63
591 #define VMCS_EXITCODE_XRSTORS 64
592 #define VMCS_EXITCODE_SPP 66
593 #define VMCS_EXITCODE_UMWAIT 67
594 #define VMCS_EXITCODE_TPAUSE 68
595
596 /* -------------------------------------------------------------------------- */
597
598 static void vmx_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
599 static void vmx_vcpu_state_commit(struct nvmm_cpu *);
600
601 #define VMX_MSRLIST_STAR 0
602 #define VMX_MSRLIST_LSTAR 1
603 #define VMX_MSRLIST_CSTAR 2
604 #define VMX_MSRLIST_SFMASK 3
605 #define VMX_MSRLIST_KERNELGSBASE 4
606 #define VMX_MSRLIST_EXIT_NMSR 5
607 #define VMX_MSRLIST_L1DFLUSH 5
608
609 /* On entry, we may do +1 to include L1DFLUSH. */
610 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR;
611
612 struct vmxon {
613 uint32_t ident;
614 #define VMXON_IDENT_REVISION __BITS(30,0)
615
616 uint8_t data[PAGE_SIZE - 4];
617 } __packed;
618
619 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE);
620
621 struct vmxoncpu {
622 vaddr_t va;
623 paddr_t pa;
624 };
625
626 static struct vmxoncpu vmxoncpu[MAXCPUS];
627
628 struct vmcs {
629 uint32_t ident;
630 #define VMCS_IDENT_REVISION __BITS(30,0)
631 #define VMCS_IDENT_SHADOW __BIT(31)
632
633 uint32_t abort;
634 uint8_t data[PAGE_SIZE - 8];
635 } __packed;
636
637 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
638
639 struct msr_entry {
640 uint32_t msr;
641 uint32_t rsvd;
642 uint64_t val;
643 } __packed;
644
645 #define VPID_MAX 0xFFFF
646
647 /* Make sure we never run out of VPIDs. */
648 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS);
649
650 static uint64_t vmx_tlb_flush_op __read_mostly;
651 static uint64_t vmx_ept_flush_op __read_mostly;
652 static uint64_t vmx_eptp_type __read_mostly;
653
654 static uint64_t vmx_pinbased_ctls __read_mostly;
655 static uint64_t vmx_procbased_ctls __read_mostly;
656 static uint64_t vmx_procbased_ctls2 __read_mostly;
657 static uint64_t vmx_entry_ctls __read_mostly;
658 static uint64_t vmx_exit_ctls __read_mostly;
659
660 static uint64_t vmx_cr0_fixed0 __read_mostly;
661 static uint64_t vmx_cr0_fixed1 __read_mostly;
662 static uint64_t vmx_cr4_fixed0 __read_mostly;
663 static uint64_t vmx_cr4_fixed1 __read_mostly;
664
665 extern bool pmap_ept_has_ad;
666
667 #define VMX_PINBASED_CTLS_ONE \
668 (PIN_CTLS_INT_EXITING| \
669 PIN_CTLS_NMI_EXITING| \
670 PIN_CTLS_VIRTUAL_NMIS)
671
672 #define VMX_PINBASED_CTLS_ZERO 0
673
674 #define VMX_PROCBASED_CTLS_ONE \
675 (PROC_CTLS_USE_TSC_OFFSETTING| \
676 PROC_CTLS_HLT_EXITING| \
677 PROC_CTLS_MWAIT_EXITING | \
678 PROC_CTLS_RDPMC_EXITING | \
679 PROC_CTLS_RCR8_EXITING | \
680 PROC_CTLS_LCR8_EXITING | \
681 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \
682 PROC_CTLS_USE_MSR_BITMAPS | \
683 PROC_CTLS_MONITOR_EXITING | \
684 PROC_CTLS_ACTIVATE_CTLS2)
685
686 #define VMX_PROCBASED_CTLS_ZERO \
687 (PROC_CTLS_RCR3_EXITING| \
688 PROC_CTLS_LCR3_EXITING)
689
690 #define VMX_PROCBASED_CTLS2_ONE \
691 (PROC_CTLS2_ENABLE_EPT| \
692 PROC_CTLS2_ENABLE_VPID| \
693 PROC_CTLS2_UNRESTRICTED_GUEST)
694
695 #define VMX_PROCBASED_CTLS2_ZERO 0
696
697 #define VMX_ENTRY_CTLS_ONE \
698 (ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \
699 ENTRY_CTLS_LOAD_EFER| \
700 ENTRY_CTLS_LOAD_PAT)
701
702 #define VMX_ENTRY_CTLS_ZERO \
703 (ENTRY_CTLS_SMM| \
704 ENTRY_CTLS_DISABLE_DUAL)
705
706 #define VMX_EXIT_CTLS_ONE \
707 (EXIT_CTLS_SAVE_DEBUG_CONTROLS| \
708 EXIT_CTLS_HOST_LONG_MODE| \
709 EXIT_CTLS_SAVE_PAT| \
710 EXIT_CTLS_LOAD_PAT| \
711 EXIT_CTLS_SAVE_EFER| \
712 EXIT_CTLS_LOAD_EFER)
713
714 #define VMX_EXIT_CTLS_ZERO 0
715
716 static uint8_t *vmx_asidmap __read_mostly;
717 static uint32_t vmx_maxasid __read_mostly;
718 static kmutex_t vmx_asidlock __cacheline_aligned;
719
720 #define VMX_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE)
721 static uint64_t vmx_xcr0_mask __read_mostly;
722
723 #define VMX_NCPUIDS 32
724
725 #define VMCS_NPAGES 1
726 #define VMCS_SIZE (VMCS_NPAGES * PAGE_SIZE)
727
728 #define MSRBM_NPAGES 1
729 #define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE)
730
731 #define CR0_STATIC_MASK \
732 (CR0_ET | CR0_NW | CR0_CD)
733
734 #define CR4_VALID \
735 (CR4_VME | \
736 CR4_PVI | \
737 CR4_TSD | \
738 CR4_DE | \
739 CR4_PSE | \
740 CR4_PAE | \
741 CR4_MCE | \
742 CR4_PGE | \
743 CR4_PCE | \
744 CR4_OSFXSR | \
745 CR4_OSXMMEXCPT | \
746 CR4_UMIP | \
747 /* CR4_LA57 excluded */ \
748 /* CR4_VMXE excluded */ \
749 /* CR4_SMXE excluded */ \
750 CR4_FSGSBASE | \
751 CR4_PCIDE | \
752 CR4_OSXSAVE | \
753 CR4_SMEP | \
754 CR4_SMAP \
755 /* CR4_PKE excluded */ \
756 /* CR4_CET excluded */ \
757 /* CR4_PKS excluded */)
758 #define CR4_INVALID \
759 (0xFFFFFFFFFFFFFFFFULL & ~CR4_VALID)
760
761 #define EFER_TLB_FLUSH \
762 (EFER_NXE|EFER_LMA|EFER_LME)
763 #define CR0_TLB_FLUSH \
764 (CR0_PG|CR0_WP|CR0_CD|CR0_NW)
765 #define CR4_TLB_FLUSH \
766 (CR4_PSE|CR4_PAE|CR4_PGE|CR4_PCIDE|CR4_SMEP)
767
768 /* -------------------------------------------------------------------------- */
769
770 struct vmx_machdata {
771 volatile uint64_t mach_htlb_gen;
772 };
773
774 static const size_t vmx_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
775 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
776 sizeof(struct nvmm_vcpu_conf_cpuid),
777 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] =
778 sizeof(struct nvmm_vcpu_conf_tpr)
779 };
780
781 struct vmx_cpudata {
782 /* General */
783 uint64_t asid;
784 bool gtlb_want_flush;
785 bool gtsc_want_update;
786 uint64_t vcpu_htlb_gen;
787 kcpuset_t *htlb_want_flush;
788
789 /* VMCS */
790 struct vmcs *vmcs;
791 paddr_t vmcs_pa;
792 size_t vmcs_refcnt;
793 struct cpu_info *vmcs_ci;
794 bool vmcs_launched;
795
796 /* MSR bitmap */
797 uint8_t *msrbm;
798 paddr_t msrbm_pa;
799
800 /* Host state */
801 uint64_t hxcr0;
802 uint64_t star;
803 uint64_t lstar;
804 uint64_t cstar;
805 uint64_t sfmask;
806 uint64_t kernelgsbase;
807
808 /* Intr state */
809 bool int_window_exit;
810 bool nmi_window_exit;
811 bool evt_pending;
812
813 /* Guest state */
814 struct msr_entry *gmsr;
815 paddr_t gmsr_pa;
816 uint64_t gmsr_misc_enable;
817 uint64_t gcr2;
818 uint64_t gcr8;
819 uint64_t gxcr0;
820 uint64_t gprs[NVMM_X64_NGPR];
821 uint64_t drs[NVMM_X64_NDR];
822 uint64_t gtsc;
823 struct xsave_header gfpu __aligned(64);
824
825 /* VCPU configuration. */
826 bool cpuidpresent[VMX_NCPUIDS];
827 struct nvmm_vcpu_conf_cpuid cpuid[VMX_NCPUIDS];
828 struct nvmm_vcpu_conf_tpr tpr;
829 };
830
831 static const struct {
832 uint64_t selector;
833 uint64_t attrib;
834 uint64_t limit;
835 uint64_t base;
836 } vmx_guest_segs[NVMM_X64_NSEG] = {
837 [NVMM_X64_SEG_ES] = {
838 VMCS_GUEST_ES_SELECTOR,
839 VMCS_GUEST_ES_ACCESS_RIGHTS,
840 VMCS_GUEST_ES_LIMIT,
841 VMCS_GUEST_ES_BASE
842 },
843 [NVMM_X64_SEG_CS] = {
844 VMCS_GUEST_CS_SELECTOR,
845 VMCS_GUEST_CS_ACCESS_RIGHTS,
846 VMCS_GUEST_CS_LIMIT,
847 VMCS_GUEST_CS_BASE
848 },
849 [NVMM_X64_SEG_SS] = {
850 VMCS_GUEST_SS_SELECTOR,
851 VMCS_GUEST_SS_ACCESS_RIGHTS,
852 VMCS_GUEST_SS_LIMIT,
853 VMCS_GUEST_SS_BASE
854 },
855 [NVMM_X64_SEG_DS] = {
856 VMCS_GUEST_DS_SELECTOR,
857 VMCS_GUEST_DS_ACCESS_RIGHTS,
858 VMCS_GUEST_DS_LIMIT,
859 VMCS_GUEST_DS_BASE
860 },
861 [NVMM_X64_SEG_FS] = {
862 VMCS_GUEST_FS_SELECTOR,
863 VMCS_GUEST_FS_ACCESS_RIGHTS,
864 VMCS_GUEST_FS_LIMIT,
865 VMCS_GUEST_FS_BASE
866 },
867 [NVMM_X64_SEG_GS] = {
868 VMCS_GUEST_GS_SELECTOR,
869 VMCS_GUEST_GS_ACCESS_RIGHTS,
870 VMCS_GUEST_GS_LIMIT,
871 VMCS_GUEST_GS_BASE
872 },
873 [NVMM_X64_SEG_GDT] = {
874 0, /* doesn't exist */
875 0, /* doesn't exist */
876 VMCS_GUEST_GDTR_LIMIT,
877 VMCS_GUEST_GDTR_BASE
878 },
879 [NVMM_X64_SEG_IDT] = {
880 0, /* doesn't exist */
881 0, /* doesn't exist */
882 VMCS_GUEST_IDTR_LIMIT,
883 VMCS_GUEST_IDTR_BASE
884 },
885 [NVMM_X64_SEG_LDT] = {
886 VMCS_GUEST_LDTR_SELECTOR,
887 VMCS_GUEST_LDTR_ACCESS_RIGHTS,
888 VMCS_GUEST_LDTR_LIMIT,
889 VMCS_GUEST_LDTR_BASE
890 },
891 [NVMM_X64_SEG_TR] = {
892 VMCS_GUEST_TR_SELECTOR,
893 VMCS_GUEST_TR_ACCESS_RIGHTS,
894 VMCS_GUEST_TR_LIMIT,
895 VMCS_GUEST_TR_BASE
896 }
897 };
898
899 /* -------------------------------------------------------------------------- */
900
901 static uint64_t
902 vmx_get_revision(void)
903 {
904 uint64_t msr;
905
906 msr = rdmsr(MSR_IA32_VMX_BASIC);
907 msr &= IA32_VMX_BASIC_IDENT;
908
909 return msr;
910 }
911
912 static void
913 vmx_vmclear_ipi(void *arg1, void *arg2)
914 {
915 paddr_t vmcs_pa = (paddr_t)arg1;
916 vmx_vmclear(&vmcs_pa);
917 }
918
919 static void
920 vmx_vmclear_remote(struct cpu_info *ci, paddr_t vmcs_pa)
921 {
922 uint64_t xc;
923 int bound;
924
925 KASSERT(kpreempt_disabled());
926
927 bound = curlwp_bind();
928 kpreempt_enable();
929
930 xc = xc_unicast(XC_HIGHPRI, vmx_vmclear_ipi, (void *)vmcs_pa, NULL, ci);
931 xc_wait(xc);
932
933 kpreempt_disable();
934 curlwp_bindx(bound);
935 }
936
937 static void
938 vmx_vmcs_enter(struct nvmm_cpu *vcpu)
939 {
940 struct vmx_cpudata *cpudata = vcpu->cpudata;
941 struct cpu_info *vmcs_ci;
942
943 cpudata->vmcs_refcnt++;
944 if (cpudata->vmcs_refcnt > 1) {
945 KASSERT(kpreempt_disabled());
946 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
947 return;
948 }
949
950 vmcs_ci = cpudata->vmcs_ci;
951 cpudata->vmcs_ci = (void *)0x00FFFFFFFFFFFFFF; /* clobber */
952
953 kpreempt_disable();
954
955 if (vmcs_ci == NULL) {
956 /* This VMCS is loaded for the first time. */
957 vmx_vmclear(&cpudata->vmcs_pa);
958 cpudata->vmcs_launched = false;
959 } else if (vmcs_ci != curcpu()) {
960 /* This VMCS is active on a remote CPU. */
961 vmx_vmclear_remote(vmcs_ci, cpudata->vmcs_pa);
962 cpudata->vmcs_launched = false;
963 } else {
964 /* This VMCS is active on curcpu, nothing to do. */
965 }
966
967 vmx_vmptrld(&cpudata->vmcs_pa);
968 }
969
970 static void
971 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
972 {
973 struct vmx_cpudata *cpudata = vcpu->cpudata;
974
975 KASSERT(kpreempt_disabled());
976 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
977 KASSERT(cpudata->vmcs_refcnt > 0);
978 cpudata->vmcs_refcnt--;
979
980 if (cpudata->vmcs_refcnt > 0) {
981 return;
982 }
983
984 cpudata->vmcs_ci = curcpu();
985 kpreempt_enable();
986 }
987
988 static void
989 vmx_vmcs_destroy(struct nvmm_cpu *vcpu)
990 {
991 struct vmx_cpudata *cpudata = vcpu->cpudata;
992
993 KASSERT(kpreempt_disabled());
994 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
995 KASSERT(cpudata->vmcs_refcnt == 1);
996 cpudata->vmcs_refcnt--;
997
998 vmx_vmclear(&cpudata->vmcs_pa);
999 kpreempt_enable();
1000 }
1001
1002 /* -------------------------------------------------------------------------- */
1003
1004 static void
1005 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
1006 {
1007 struct vmx_cpudata *cpudata = vcpu->cpudata;
1008 uint64_t ctls1;
1009
1010 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
1011
1012 if (nmi) {
1013 // XXX INT_STATE_NMI?
1014 ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING;
1015 cpudata->nmi_window_exit = true;
1016 } else {
1017 ctls1 |= PROC_CTLS_INT_WINDOW_EXITING;
1018 cpudata->int_window_exit = true;
1019 }
1020
1021 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
1022 }
1023
1024 static void
1025 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
1026 {
1027 struct vmx_cpudata *cpudata = vcpu->cpudata;
1028 uint64_t ctls1;
1029
1030 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
1031
1032 if (nmi) {
1033 ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
1034 cpudata->nmi_window_exit = false;
1035 } else {
1036 ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING;
1037 cpudata->int_window_exit = false;
1038 }
1039
1040 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
1041 }
1042
1043 static inline bool
1044 vmx_excp_has_rf(uint8_t vector)
1045 {
1046 switch (vector) {
1047 case 1: /* #DB */
1048 case 4: /* #OF */
1049 case 8: /* #DF */
1050 case 18: /* #MC */
1051 return false;
1052 default:
1053 return true;
1054 }
1055 }
1056
1057 static inline int
1058 vmx_excp_has_error(uint8_t vector)
1059 {
1060 switch (vector) {
1061 case 8: /* #DF */
1062 case 10: /* #TS */
1063 case 11: /* #NP */
1064 case 12: /* #SS */
1065 case 13: /* #GP */
1066 case 14: /* #PF */
1067 case 17: /* #AC */
1068 case 30: /* #SX */
1069 return 1;
1070 default:
1071 return 0;
1072 }
1073 }
1074
1075 static int
1076 vmx_vcpu_inject(struct nvmm_cpu *vcpu)
1077 {
1078 struct nvmm_comm_page *comm = vcpu->comm;
1079 struct vmx_cpudata *cpudata = vcpu->cpudata;
1080 int type = 0, err = 0, ret = EINVAL;
1081 uint64_t rflags, info, error;
1082 u_int evtype;
1083 uint8_t vector;
1084
1085 evtype = comm->event.type;
1086 vector = comm->event.vector;
1087 error = comm->event.u.excp.error;
1088 __insn_barrier();
1089
1090 vmx_vmcs_enter(vcpu);
1091
1092 switch (evtype) {
1093 case NVMM_VCPU_EVENT_EXCP:
1094 if (vector == 2 || vector >= 32)
1095 goto out;
1096 if (vector == 3 || vector == 0)
1097 goto out;
1098 if (vmx_excp_has_rf(vector)) {
1099 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1100 vmx_vmwrite(VMCS_GUEST_RFLAGS, rflags | PSL_RF);
1101 }
1102 type = INTR_TYPE_HW_EXC;
1103 err = vmx_excp_has_error(vector);
1104 break;
1105 case NVMM_VCPU_EVENT_INTR:
1106 type = INTR_TYPE_EXT_INT;
1107 if (vector == 2) {
1108 type = INTR_TYPE_NMI;
1109 vmx_event_waitexit_enable(vcpu, true);
1110 }
1111 err = 0;
1112 break;
1113 default:
1114 goto out;
1115 }
1116
1117 info =
1118 __SHIFTIN((uint64_t)vector, INTR_INFO_VECTOR) |
1119 __SHIFTIN((uint64_t)type, INTR_INFO_TYPE) |
1120 __SHIFTIN((uint64_t)err, INTR_INFO_ERROR) |
1121 __SHIFTIN((uint64_t)1, INTR_INFO_VALID);
1122 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
1123 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, error);
1124
1125 cpudata->evt_pending = true;
1126 ret = 0;
1127
1128 out:
1129 vmx_vmcs_leave(vcpu);
1130 return ret;
1131 }
1132
1133 static void
1134 vmx_inject_ud(struct nvmm_cpu *vcpu)
1135 {
1136 struct nvmm_comm_page *comm = vcpu->comm;
1137 int ret __diagused;
1138
1139 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1140 comm->event.vector = 6;
1141 comm->event.u.excp.error = 0;
1142
1143 ret = vmx_vcpu_inject(vcpu);
1144 KASSERT(ret == 0);
1145 }
1146
1147 static void
1148 vmx_inject_gp(struct nvmm_cpu *vcpu)
1149 {
1150 struct nvmm_comm_page *comm = vcpu->comm;
1151 int ret __diagused;
1152
1153 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1154 comm->event.vector = 13;
1155 comm->event.u.excp.error = 0;
1156
1157 ret = vmx_vcpu_inject(vcpu);
1158 KASSERT(ret == 0);
1159 }
1160
1161 static inline int
1162 vmx_vcpu_event_commit(struct nvmm_cpu *vcpu)
1163 {
1164 if (__predict_true(!vcpu->comm->event_commit)) {
1165 return 0;
1166 }
1167 vcpu->comm->event_commit = false;
1168 return vmx_vcpu_inject(vcpu);
1169 }
1170
1171 static inline void
1172 vmx_inkernel_advance(void)
1173 {
1174 uint64_t rip, inslen, intstate, rflags;
1175
1176 /*
1177 * Maybe we should also apply single-stepping and debug exceptions.
1178 * Matters for guest-ring3, because it can execute 'cpuid' under a
1179 * debugger.
1180 */
1181
1182 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1183 rip = vmx_vmread(VMCS_GUEST_RIP);
1184 vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
1185
1186 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1187 vmx_vmwrite(VMCS_GUEST_RFLAGS, rflags & ~PSL_RF);
1188
1189 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
1190 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
1191 intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
1192 }
1193
1194 static void
1195 vmx_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
1196 {
1197 exit->u.inv.hwcode = code;
1198 exit->reason = NVMM_VCPU_EXIT_INVALID;
1199 }
1200
1201 static void
1202 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1203 struct nvmm_vcpu_exit *exit)
1204 {
1205 uint64_t qual;
1206
1207 qual = vmx_vmread(VMCS_EXIT_INTR_INFO);
1208
1209 if ((qual & INTR_INFO_VALID) == 0) {
1210 goto error;
1211 }
1212 if (__SHIFTOUT(qual, INTR_INFO_TYPE) != INTR_TYPE_NMI) {
1213 goto error;
1214 }
1215
1216 exit->reason = NVMM_VCPU_EXIT_NONE;
1217 return;
1218
1219 error:
1220 vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI);
1221 }
1222
1223 #define VMX_CPUID_MAX_BASIC 0x16
1224 #define VMX_CPUID_MAX_HYPERVISOR 0x40000000
1225 #define VMX_CPUID_MAX_EXTENDED 0x80000008
1226 static uint32_t vmx_cpuid_max_basic __read_mostly;
1227 static uint32_t vmx_cpuid_max_extended __read_mostly;
1228
1229 static void
1230 vmx_inkernel_exec_cpuid(struct vmx_cpudata *cpudata, uint64_t eax, uint64_t ecx)
1231 {
1232 u_int descs[4];
1233
1234 x86_cpuid2(eax, ecx, descs);
1235 cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0];
1236 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
1237 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
1238 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
1239 }
1240
1241 static void
1242 vmx_inkernel_handle_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1243 uint64_t eax, uint64_t ecx)
1244 {
1245 struct vmx_cpudata *cpudata = vcpu->cpudata;
1246 unsigned int ncpus;
1247 uint64_t cr4;
1248
1249 if (eax < 0x40000000) {
1250 if (__predict_false(eax > vmx_cpuid_max_basic)) {
1251 eax = vmx_cpuid_max_basic;
1252 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1253 }
1254 } else if (eax < 0x80000000) {
1255 if (__predict_false(eax > VMX_CPUID_MAX_HYPERVISOR)) {
1256 eax = vmx_cpuid_max_basic;
1257 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1258 }
1259 } else {
1260 if (__predict_false(eax > vmx_cpuid_max_extended)) {
1261 eax = vmx_cpuid_max_basic;
1262 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1263 }
1264 }
1265
1266 switch (eax) {
1267 case 0x00000000:
1268 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_basic;
1269 break;
1270 case 0x00000001:
1271 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax;
1272
1273 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
1274 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
1275 CPUID_LOCAL_APIC_ID);
1276
1277 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
1278 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
1279 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1280 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_PCID;
1281 }
1282
1283 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
1284
1285 /* CPUID2_OSXSAVE depends on CR4. */
1286 cr4 = vmx_vmread(VMCS_GUEST_CR4);
1287 if (!(cr4 & CR4_OSXSAVE)) {
1288 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
1289 }
1290 break;
1291 case 0x00000002:
1292 break;
1293 case 0x00000003:
1294 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1295 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1296 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1297 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1298 break;
1299 case 0x00000004: /* Deterministic Cache Parameters */
1300 break; /* TODO? */
1301 case 0x00000005: /* MONITOR/MWAIT */
1302 case 0x00000006: /* Thermal and Power Management */
1303 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1304 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1305 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1306 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1307 break;
1308 case 0x00000007: /* Structured Extended Feature Flags Enumeration */
1309 switch (ecx) {
1310 case 0:
1311 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1312 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
1313 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
1314 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
1315 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1316 cpudata->gprs[NVMM_X64_GPR_RBX] |= CPUID_SEF_INVPCID;
1317 }
1318 break;
1319 default:
1320 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1321 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1322 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1323 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1324 break;
1325 }
1326 break;
1327 case 0x00000008: /* Empty */
1328 case 0x00000009: /* Direct Cache Access Information */
1329 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1330 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1331 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1332 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1333 break;
1334 case 0x0000000A: /* Architectural Performance Monitoring */
1335 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1336 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1337 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1338 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1339 break;
1340 case 0x0000000B: /* Extended Topology Enumeration */
1341 switch (ecx) {
1342 case 0: /* Threads */
1343 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1344 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1345 cpudata->gprs[NVMM_X64_GPR_RCX] =
1346 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
1347 __SHIFTIN(CPUID_TOP_LVLTYPE_SMT, CPUID_TOP_LVLTYPE);
1348 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1349 break;
1350 case 1: /* Cores */
1351 ncpus = atomic_load_relaxed(&mach->ncpus);
1352 cpudata->gprs[NVMM_X64_GPR_RAX] = ilog2(ncpus);
1353 cpudata->gprs[NVMM_X64_GPR_RBX] = ncpus;
1354 cpudata->gprs[NVMM_X64_GPR_RCX] =
1355 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
1356 __SHIFTIN(CPUID_TOP_LVLTYPE_CORE, CPUID_TOP_LVLTYPE);
1357 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1358 break;
1359 default:
1360 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1361 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1362 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; /* LVLTYPE_INVAL */
1363 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1364 break;
1365 }
1366 break;
1367 case 0x0000000C: /* Empty */
1368 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1369 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1370 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1371 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1372 break;
1373 case 0x0000000D: /* Processor Extended State Enumeration */
1374 if (vmx_xcr0_mask == 0) {
1375 break;
1376 }
1377 switch (ecx) {
1378 case 0:
1379 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF;
1380 if (cpudata->gxcr0 & XCR0_SSE) {
1381 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
1382 } else {
1383 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
1384 }
1385 cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
1386 cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64;
1387 cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32;
1388 break;
1389 case 1:
1390 cpudata->gprs[NVMM_X64_GPR_RAX] &=
1391 (CPUID_PES1_XSAVEOPT | CPUID_PES1_XSAVEC |
1392 CPUID_PES1_XGETBV);
1393 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1394 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1395 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1396 break;
1397 default:
1398 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1399 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1400 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1401 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1402 break;
1403 }
1404 break;
1405 case 0x0000000E: /* Empty */
1406 case 0x0000000F: /* Intel RDT Monitoring Enumeration */
1407 case 0x00000010: /* Intel RDT Allocation Enumeration */
1408 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1409 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1410 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1411 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1412 break;
1413 case 0x00000011: /* Empty */
1414 case 0x00000012: /* Intel SGX Capability Enumeration */
1415 case 0x00000013: /* Empty */
1416 case 0x00000014: /* Intel Processor Trace Enumeration */
1417 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1418 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1419 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1420 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1421 break;
1422 case 0x00000015: /* TSC and Nominal Core Crystal Clock Information */
1423 case 0x00000016: /* Processor Frequency Information */
1424 break;
1425
1426 case 0x40000000: /* Hypervisor Information */
1427 cpudata->gprs[NVMM_X64_GPR_RAX] = VMX_CPUID_MAX_HYPERVISOR;
1428 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1429 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1430 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1431 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
1432 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
1433 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
1434 break;
1435
1436 case 0x80000000:
1437 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_extended;
1438 break;
1439 case 0x80000001:
1440 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax;
1441 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
1442 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
1443 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
1444 break;
1445 case 0x80000002: /* Processor Brand String */
1446 case 0x80000003: /* Processor Brand String */
1447 case 0x80000004: /* Processor Brand String */
1448 case 0x80000005: /* Reserved Zero */
1449 case 0x80000006: /* Cache Information */
1450 break;
1451 case 0x80000007: /* TSC Information */
1452 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000007.eax;
1453 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000007.ebx;
1454 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000007.ecx;
1455 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000007.edx;
1456 break;
1457 case 0x80000008: /* Address Sizes */
1458 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000008.eax;
1459 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000008.ebx;
1460 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000008.ecx;
1461 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000008.edx;
1462 break;
1463
1464 default:
1465 break;
1466 }
1467 }
1468
1469 static void
1470 vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason)
1471 {
1472 uint64_t inslen, rip;
1473
1474 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1475 rip = vmx_vmread(VMCS_GUEST_RIP);
1476 exit->u.insn.npc = rip + inslen;
1477 exit->reason = reason;
1478 }
1479
1480 static void
1481 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1482 struct nvmm_vcpu_exit *exit)
1483 {
1484 struct vmx_cpudata *cpudata = vcpu->cpudata;
1485 struct nvmm_vcpu_conf_cpuid *cpuid;
1486 uint64_t eax, ecx;
1487 size_t i;
1488
1489 eax = cpudata->gprs[NVMM_X64_GPR_RAX];
1490 ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
1491 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1492 vmx_inkernel_handle_cpuid(mach, vcpu, eax, ecx);
1493
1494 for (i = 0; i < VMX_NCPUIDS; i++) {
1495 if (!cpudata->cpuidpresent[i]) {
1496 continue;
1497 }
1498 cpuid = &cpudata->cpuid[i];
1499 if (cpuid->leaf != eax) {
1500 continue;
1501 }
1502
1503 if (cpuid->exit) {
1504 vmx_exit_insn(exit, NVMM_VCPU_EXIT_CPUID);
1505 return;
1506 }
1507 KASSERT(cpuid->mask);
1508
1509 /* del */
1510 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->u.mask.del.eax;
1511 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
1512 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
1513 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
1514
1515 /* set */
1516 cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->u.mask.set.eax;
1517 cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
1518 cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
1519 cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
1520
1521 break;
1522 }
1523
1524 vmx_inkernel_advance();
1525 exit->reason = NVMM_VCPU_EXIT_NONE;
1526 }
1527
1528 static void
1529 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1530 struct nvmm_vcpu_exit *exit)
1531 {
1532 struct vmx_cpudata *cpudata = vcpu->cpudata;
1533 uint64_t rflags;
1534
1535 if (cpudata->int_window_exit) {
1536 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1537 if (rflags & PSL_I) {
1538 vmx_event_waitexit_disable(vcpu, false);
1539 }
1540 }
1541
1542 vmx_inkernel_advance();
1543 exit->reason = NVMM_VCPU_EXIT_HALTED;
1544 }
1545
1546 #define VMX_QUAL_CR_NUM __BITS(3,0)
1547 #define VMX_QUAL_CR_TYPE __BITS(5,4)
1548 #define CR_TYPE_WRITE 0
1549 #define CR_TYPE_READ 1
1550 #define CR_TYPE_CLTS 2
1551 #define CR_TYPE_LMSW 3
1552 #define VMX_QUAL_CR_LMSW_OPMEM __BIT(6)
1553 #define VMX_QUAL_CR_GPR __BITS(11,8)
1554 #define VMX_QUAL_CR_LMSW_SRC __BIT(31,16)
1555
1556 static inline int
1557 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1)
1558 {
1559 /* Bits set to 1 in fixed0 are fixed to 1. */
1560 if ((crval & fixed0) != fixed0) {
1561 return -1;
1562 }
1563 /* Bits set to 0 in fixed1 are fixed to 0. */
1564 if (crval & ~fixed1) {
1565 return -1;
1566 }
1567 return 0;
1568 }
1569
1570 static int
1571 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1572 uint64_t qual)
1573 {
1574 struct vmx_cpudata *cpudata = vcpu->cpudata;
1575 uint64_t type, gpr, oldcr0, realcr0, fakecr0;
1576 uint64_t efer, ctls1;
1577
1578 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1579 if (type != CR_TYPE_WRITE) {
1580 return -1;
1581 }
1582
1583 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1584 KASSERT(gpr < 16);
1585
1586 if (gpr == NVMM_X64_GPR_RSP) {
1587 fakecr0 = vmx_vmread(VMCS_GUEST_RSP);
1588 } else {
1589 fakecr0 = cpudata->gprs[gpr];
1590 }
1591
1592 /*
1593 * fakecr0 is the value the guest believes is in %cr0. realcr0 is the
1594 * actual value in %cr0.
1595 *
1596 * In fakecr0 we must force CR0_ET to 1.
1597 *
1598 * In realcr0 we must force CR0_NW and CR0_CD to 0, and CR0_ET and
1599 * CR0_NE to 1.
1600 */
1601 fakecr0 |= CR0_ET;
1602 realcr0 = (fakecr0 & ~CR0_STATIC_MASK) | CR0_ET | CR0_NE;
1603
1604 if (vmx_check_cr(realcr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) {
1605 return -1;
1606 }
1607
1608 /*
1609 * XXX Handle 32bit PAE paging, need to set PDPTEs, fetched manually
1610 * from CR3.
1611 */
1612
1613 if (realcr0 & CR0_PG) {
1614 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
1615 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
1616 if (efer & EFER_LME) {
1617 ctls1 |= ENTRY_CTLS_LONG_MODE;
1618 efer |= EFER_LMA;
1619 } else {
1620 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
1621 efer &= ~EFER_LMA;
1622 }
1623 vmx_vmwrite(VMCS_GUEST_IA32_EFER, efer);
1624 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
1625 }
1626
1627 oldcr0 = (vmx_vmread(VMCS_CR0_SHADOW) & CR0_STATIC_MASK) |
1628 (vmx_vmread(VMCS_GUEST_CR0) & ~CR0_STATIC_MASK);
1629 if ((oldcr0 ^ fakecr0) & CR0_TLB_FLUSH) {
1630 cpudata->gtlb_want_flush = true;
1631 }
1632
1633 vmx_vmwrite(VMCS_CR0_SHADOW, fakecr0);
1634 vmx_vmwrite(VMCS_GUEST_CR0, realcr0);
1635 vmx_inkernel_advance();
1636 return 0;
1637 }
1638
1639 static int
1640 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1641 uint64_t qual)
1642 {
1643 struct vmx_cpudata *cpudata = vcpu->cpudata;
1644 uint64_t type, gpr, oldcr4, cr4;
1645
1646 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1647 if (type != CR_TYPE_WRITE) {
1648 return -1;
1649 }
1650
1651 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1652 KASSERT(gpr < 16);
1653
1654 if (gpr == NVMM_X64_GPR_RSP) {
1655 gpr = vmx_vmread(VMCS_GUEST_RSP);
1656 } else {
1657 gpr = cpudata->gprs[gpr];
1658 }
1659
1660 if (gpr & CR4_INVALID) {
1661 return -1;
1662 }
1663 cr4 = gpr | CR4_VMXE;
1664 if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) {
1665 return -1;
1666 }
1667
1668 oldcr4 = vmx_vmread(VMCS_GUEST_CR4);
1669 if ((oldcr4 ^ gpr) & CR4_TLB_FLUSH) {
1670 cpudata->gtlb_want_flush = true;
1671 }
1672
1673 vmx_vmwrite(VMCS_GUEST_CR4, cr4);
1674 vmx_inkernel_advance();
1675 return 0;
1676 }
1677
1678 static int
1679 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1680 uint64_t qual, struct nvmm_vcpu_exit *exit)
1681 {
1682 struct vmx_cpudata *cpudata = vcpu->cpudata;
1683 uint64_t type, gpr;
1684 bool write;
1685
1686 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1687 if (type == CR_TYPE_WRITE) {
1688 write = true;
1689 } else if (type == CR_TYPE_READ) {
1690 write = false;
1691 } else {
1692 return -1;
1693 }
1694
1695 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1696 KASSERT(gpr < 16);
1697
1698 if (write) {
1699 if (gpr == NVMM_X64_GPR_RSP) {
1700 cpudata->gcr8 = vmx_vmread(VMCS_GUEST_RSP);
1701 } else {
1702 cpudata->gcr8 = cpudata->gprs[gpr];
1703 }
1704 if (cpudata->tpr.exit_changed) {
1705 exit->reason = NVMM_VCPU_EXIT_TPR_CHANGED;
1706 }
1707 } else {
1708 if (gpr == NVMM_X64_GPR_RSP) {
1709 vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8);
1710 } else {
1711 cpudata->gprs[gpr] = cpudata->gcr8;
1712 }
1713 }
1714
1715 vmx_inkernel_advance();
1716 return 0;
1717 }
1718
1719 static void
1720 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1721 struct nvmm_vcpu_exit *exit)
1722 {
1723 uint64_t qual;
1724 int ret;
1725
1726 exit->reason = NVMM_VCPU_EXIT_NONE;
1727
1728 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1729
1730 switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) {
1731 case 0:
1732 ret = vmx_inkernel_handle_cr0(mach, vcpu, qual);
1733 break;
1734 case 4:
1735 ret = vmx_inkernel_handle_cr4(mach, vcpu, qual);
1736 break;
1737 case 8:
1738 ret = vmx_inkernel_handle_cr8(mach, vcpu, qual, exit);
1739 break;
1740 default:
1741 ret = -1;
1742 break;
1743 }
1744
1745 if (ret == -1) {
1746 vmx_inject_gp(vcpu);
1747 }
1748 }
1749
1750 #define VMX_QUAL_IO_SIZE __BITS(2,0)
1751 #define IO_SIZE_8 0
1752 #define IO_SIZE_16 1
1753 #define IO_SIZE_32 3
1754 #define VMX_QUAL_IO_IN __BIT(3)
1755 #define VMX_QUAL_IO_STR __BIT(4)
1756 #define VMX_QUAL_IO_REP __BIT(5)
1757 #define VMX_QUAL_IO_DX __BIT(6)
1758 #define VMX_QUAL_IO_PORT __BITS(31,16)
1759
1760 #define VMX_INFO_IO_ADRSIZE __BITS(9,7)
1761 #define IO_ADRSIZE_16 0
1762 #define IO_ADRSIZE_32 1
1763 #define IO_ADRSIZE_64 2
1764 #define VMX_INFO_IO_SEG __BITS(17,15)
1765
1766 static void
1767 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1768 struct nvmm_vcpu_exit *exit)
1769 {
1770 uint64_t qual, info, inslen, rip;
1771
1772 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1773 info = vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO);
1774
1775 exit->reason = NVMM_VCPU_EXIT_IO;
1776
1777 exit->u.io.in = (qual & VMX_QUAL_IO_IN) != 0;
1778 exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
1779
1780 KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
1781 exit->u.io.seg = __SHIFTOUT(info, VMX_INFO_IO_SEG);
1782
1783 if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) {
1784 exit->u.io.address_size = 8;
1785 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) {
1786 exit->u.io.address_size = 4;
1787 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) {
1788 exit->u.io.address_size = 2;
1789 }
1790
1791 if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) {
1792 exit->u.io.operand_size = 4;
1793 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) {
1794 exit->u.io.operand_size = 2;
1795 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) {
1796 exit->u.io.operand_size = 1;
1797 }
1798
1799 exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
1800 exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
1801
1802 if (exit->u.io.in && exit->u.io.str) {
1803 exit->u.io.seg = NVMM_X64_SEG_ES;
1804 }
1805
1806 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1807 rip = vmx_vmread(VMCS_GUEST_RIP);
1808 exit->u.io.npc = rip + inslen;
1809
1810 vmx_vcpu_state_provide(vcpu,
1811 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
1812 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
1813 }
1814
1815 static const uint64_t msr_ignore_list[] = {
1816 MSR_BIOS_SIGN,
1817 MSR_IA32_PLATFORM_ID
1818 };
1819
1820 static bool
1821 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1822 struct nvmm_vcpu_exit *exit)
1823 {
1824 struct vmx_cpudata *cpudata = vcpu->cpudata;
1825 uint64_t val;
1826 size_t i;
1827
1828 if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
1829 if (exit->u.rdmsr.msr == MSR_CR_PAT) {
1830 val = vmx_vmread(VMCS_GUEST_IA32_PAT);
1831 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1832 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1833 goto handled;
1834 }
1835 if (exit->u.rdmsr.msr == MSR_MISC_ENABLE) {
1836 val = cpudata->gmsr_misc_enable;
1837 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1838 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1839 goto handled;
1840 }
1841 if (exit->u.rdmsr.msr == MSR_IA32_ARCH_CAPABILITIES) {
1842 u_int descs[4];
1843 if (cpuid_level < 7) {
1844 goto error;
1845 }
1846 x86_cpuid(7, descs);
1847 if (!(descs[3] & CPUID_SEF_ARCH_CAP)) {
1848 goto error;
1849 }
1850 val = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
1851 val &= (IA32_ARCH_RDCL_NO |
1852 IA32_ARCH_SSB_NO |
1853 IA32_ARCH_MDS_NO |
1854 IA32_ARCH_TAA_NO);
1855 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1856 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1857 goto handled;
1858 }
1859 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1860 if (msr_ignore_list[i] != exit->u.rdmsr.msr)
1861 continue;
1862 val = 0;
1863 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1864 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1865 goto handled;
1866 }
1867 } else {
1868 if (exit->u.wrmsr.msr == MSR_TSC) {
1869 cpudata->gtsc = exit->u.wrmsr.val;
1870 cpudata->gtsc_want_update = true;
1871 goto handled;
1872 }
1873 if (exit->u.wrmsr.msr == MSR_CR_PAT) {
1874 val = exit->u.wrmsr.val;
1875 if (__predict_false(!nvmm_x86_pat_validate(val))) {
1876 goto error;
1877 }
1878 vmx_vmwrite(VMCS_GUEST_IA32_PAT, val);
1879 goto handled;
1880 }
1881 if (exit->u.wrmsr.msr == MSR_MISC_ENABLE) {
1882 /* Don't care. */
1883 goto handled;
1884 }
1885 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1886 if (msr_ignore_list[i] != exit->u.wrmsr.msr)
1887 continue;
1888 goto handled;
1889 }
1890 }
1891
1892 return false;
1893
1894 handled:
1895 vmx_inkernel_advance();
1896 return true;
1897
1898 error:
1899 vmx_inject_gp(vcpu);
1900 return true;
1901 }
1902
1903 static void
1904 vmx_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1905 struct nvmm_vcpu_exit *exit)
1906 {
1907 struct vmx_cpudata *cpudata = vcpu->cpudata;
1908 uint64_t inslen, rip;
1909
1910 exit->reason = NVMM_VCPU_EXIT_RDMSR;
1911 exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1912
1913 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1914 exit->reason = NVMM_VCPU_EXIT_NONE;
1915 return;
1916 }
1917
1918 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1919 rip = vmx_vmread(VMCS_GUEST_RIP);
1920 exit->u.rdmsr.npc = rip + inslen;
1921
1922 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1923 }
1924
1925 static void
1926 vmx_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1927 struct nvmm_vcpu_exit *exit)
1928 {
1929 struct vmx_cpudata *cpudata = vcpu->cpudata;
1930 uint64_t rdx, rax, inslen, rip;
1931
1932 rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
1933 rax = cpudata->gprs[NVMM_X64_GPR_RAX];
1934
1935 exit->reason = NVMM_VCPU_EXIT_WRMSR;
1936 exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1937 exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
1938
1939 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1940 exit->reason = NVMM_VCPU_EXIT_NONE;
1941 return;
1942 }
1943
1944 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1945 rip = vmx_vmread(VMCS_GUEST_RIP);
1946 exit->u.wrmsr.npc = rip + inslen;
1947
1948 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1949 }
1950
1951 static void
1952 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1953 struct nvmm_vcpu_exit *exit)
1954 {
1955 struct vmx_cpudata *cpudata = vcpu->cpudata;
1956 uint64_t val;
1957
1958 exit->reason = NVMM_VCPU_EXIT_NONE;
1959
1960 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
1961 (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
1962
1963 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
1964 goto error;
1965 } else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
1966 goto error;
1967 } else if (__predict_false((val & XCR0_X87) == 0)) {
1968 goto error;
1969 }
1970
1971 cpudata->gxcr0 = val;
1972 if (vmx_xcr0_mask != 0) {
1973 wrxcr(0, cpudata->gxcr0);
1974 }
1975
1976 vmx_inkernel_advance();
1977 return;
1978
1979 error:
1980 vmx_inject_gp(vcpu);
1981 }
1982
1983 #define VMX_EPT_VIOLATION_READ __BIT(0)
1984 #define VMX_EPT_VIOLATION_WRITE __BIT(1)
1985 #define VMX_EPT_VIOLATION_EXECUTE __BIT(2)
1986
1987 static void
1988 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1989 struct nvmm_vcpu_exit *exit)
1990 {
1991 uint64_t perm;
1992 gpaddr_t gpa;
1993
1994 gpa = vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS);
1995
1996 exit->reason = NVMM_VCPU_EXIT_MEMORY;
1997 perm = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1998 if (perm & VMX_EPT_VIOLATION_WRITE)
1999 exit->u.mem.prot = PROT_WRITE;
2000 else if (perm & VMX_EPT_VIOLATION_EXECUTE)
2001 exit->u.mem.prot = PROT_EXEC;
2002 else
2003 exit->u.mem.prot = PROT_READ;
2004 exit->u.mem.gpa = gpa;
2005 exit->u.mem.inst_len = 0;
2006
2007 vmx_vcpu_state_provide(vcpu,
2008 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
2009 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
2010 }
2011
2012 /* -------------------------------------------------------------------------- */
2013
2014 static void
2015 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
2016 {
2017 struct vmx_cpudata *cpudata = vcpu->cpudata;
2018
2019 fpu_kern_enter();
2020 fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask);
2021
2022 if (vmx_xcr0_mask != 0) {
2023 cpudata->hxcr0 = rdxcr(0);
2024 wrxcr(0, cpudata->gxcr0);
2025 }
2026 }
2027
2028 static void
2029 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
2030 {
2031 struct vmx_cpudata *cpudata = vcpu->cpudata;
2032
2033 if (vmx_xcr0_mask != 0) {
2034 cpudata->gxcr0 = rdxcr(0);
2035 wrxcr(0, cpudata->hxcr0);
2036 }
2037
2038 fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask);
2039 fpu_kern_leave();
2040 }
2041
2042 static void
2043 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
2044 {
2045 struct vmx_cpudata *cpudata = vcpu->cpudata;
2046
2047 x86_dbregs_save(curlwp);
2048
2049 ldr7(0);
2050
2051 ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
2052 ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
2053 ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
2054 ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
2055 ldr6(cpudata->drs[NVMM_X64_DR_DR6]);
2056 }
2057
2058 static void
2059 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
2060 {
2061 struct vmx_cpudata *cpudata = vcpu->cpudata;
2062
2063 cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
2064 cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
2065 cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
2066 cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
2067 cpudata->drs[NVMM_X64_DR_DR6] = rdr6();
2068
2069 x86_dbregs_restore(curlwp);
2070 }
2071
2072 static void
2073 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
2074 {
2075 struct vmx_cpudata *cpudata = vcpu->cpudata;
2076
2077 /* This gets restored automatically by the CPU. */
2078 vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)curcpu()->ci_idtvec.iv_idt);
2079 vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE));
2080 vmx_vmwrite(VMCS_HOST_CR3, rcr3());
2081 vmx_vmwrite(VMCS_HOST_CR4, rcr4());
2082
2083 cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
2084 }
2085
2086 static void
2087 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
2088 {
2089 struct vmx_cpudata *cpudata = vcpu->cpudata;
2090
2091 wrmsr(MSR_STAR, cpudata->star);
2092 wrmsr(MSR_LSTAR, cpudata->lstar);
2093 wrmsr(MSR_CSTAR, cpudata->cstar);
2094 wrmsr(MSR_SFMASK, cpudata->sfmask);
2095 wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
2096 }
2097
2098 /* -------------------------------------------------------------------------- */
2099
2100 #define VMX_INVVPID_ADDRESS 0
2101 #define VMX_INVVPID_CONTEXT 1
2102 #define VMX_INVVPID_ALL 2
2103 #define VMX_INVVPID_CONTEXT_NOGLOBAL 3
2104
2105 #define VMX_INVEPT_CONTEXT 1
2106 #define VMX_INVEPT_ALL 2
2107
2108 static inline void
2109 vmx_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
2110 {
2111 struct vmx_cpudata *cpudata = vcpu->cpudata;
2112
2113 if (vcpu->hcpu_last != hcpu) {
2114 cpudata->gtlb_want_flush = true;
2115 }
2116 }
2117
2118 static inline void
2119 vmx_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
2120 {
2121 struct vmx_cpudata *cpudata = vcpu->cpudata;
2122 struct ept_desc ept_desc;
2123
2124 if (__predict_true(!kcpuset_isset(cpudata->htlb_want_flush, hcpu))) {
2125 return;
2126 }
2127
2128 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
2129 ept_desc.mbz = 0;
2130 vmx_invept(vmx_ept_flush_op, &ept_desc);
2131 kcpuset_clear(cpudata->htlb_want_flush, hcpu);
2132 }
2133
2134 static inline uint64_t
2135 vmx_htlb_flush(struct vmx_machdata *machdata, struct vmx_cpudata *cpudata)
2136 {
2137 struct ept_desc ept_desc;
2138 uint64_t machgen;
2139
2140 machgen = machdata->mach_htlb_gen;
2141 if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
2142 return machgen;
2143 }
2144
2145 kcpuset_copy(cpudata->htlb_want_flush, kcpuset_running);
2146
2147 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
2148 ept_desc.mbz = 0;
2149 vmx_invept(vmx_ept_flush_op, &ept_desc);
2150
2151 return machgen;
2152 }
2153
2154 static inline void
2155 vmx_htlb_flush_ack(struct vmx_cpudata *cpudata, uint64_t machgen)
2156 {
2157 cpudata->vcpu_htlb_gen = machgen;
2158 kcpuset_clear(cpudata->htlb_want_flush, cpu_number());
2159 }
2160
2161 static inline void
2162 vmx_exit_evt(struct vmx_cpudata *cpudata)
2163 {
2164 uint64_t info, err, inslen;
2165
2166 cpudata->evt_pending = false;
2167
2168 info = vmx_vmread(VMCS_IDT_VECTORING_INFO);
2169 if (__predict_true((info & INTR_INFO_VALID) == 0)) {
2170 return;
2171 }
2172 err = vmx_vmread(VMCS_IDT_VECTORING_ERROR);
2173
2174 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
2175 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, err);
2176
2177 switch (__SHIFTOUT(info, INTR_INFO_TYPE)) {
2178 case INTR_TYPE_SW_INT:
2179 case INTR_TYPE_PRIV_SW_EXC:
2180 case INTR_TYPE_SW_EXC:
2181 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
2182 vmx_vmwrite(VMCS_ENTRY_INSTRUCTION_LENGTH, inslen);
2183 }
2184
2185 cpudata->evt_pending = true;
2186 }
2187
2188 static int
2189 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
2190 struct nvmm_vcpu_exit *exit)
2191 {
2192 struct nvmm_comm_page *comm = vcpu->comm;
2193 struct vmx_machdata *machdata = mach->machdata;
2194 struct vmx_cpudata *cpudata = vcpu->cpudata;
2195 struct vpid_desc vpid_desc;
2196 struct cpu_info *ci;
2197 uint64_t exitcode;
2198 uint64_t intstate;
2199 uint64_t machgen;
2200 int hcpu, ret;
2201 bool launched;
2202
2203 vmx_vmcs_enter(vcpu);
2204
2205 vmx_vcpu_state_commit(vcpu);
2206 comm->state_cached = 0;
2207
2208 if (__predict_false(vmx_vcpu_event_commit(vcpu) != 0)) {
2209 vmx_vmcs_leave(vcpu);
2210 return EINVAL;
2211 }
2212
2213 ci = curcpu();
2214 hcpu = cpu_number();
2215 launched = cpudata->vmcs_launched;
2216
2217 vmx_gtlb_catchup(vcpu, hcpu);
2218 vmx_htlb_catchup(vcpu, hcpu);
2219
2220 if (vcpu->hcpu_last != hcpu) {
2221 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel);
2222 vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss);
2223 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt);
2224 vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
2225 cpudata->gtsc_want_update = true;
2226 vcpu->hcpu_last = hcpu;
2227 }
2228
2229 vmx_vcpu_guest_dbregs_enter(vcpu);
2230 vmx_vcpu_guest_misc_enter(vcpu);
2231 vmx_vcpu_guest_fpu_enter(vcpu);
2232
2233 while (1) {
2234 if (cpudata->gtlb_want_flush) {
2235 vpid_desc.vpid = cpudata->asid;
2236 vpid_desc.addr = 0;
2237 vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
2238 cpudata->gtlb_want_flush = false;
2239 }
2240
2241 if (__predict_false(cpudata->gtsc_want_update)) {
2242 vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc - rdtsc());
2243 cpudata->gtsc_want_update = false;
2244 }
2245
2246 vmx_cli();
2247 machgen = vmx_htlb_flush(machdata, cpudata);
2248 lcr2(cpudata->gcr2);
2249 if (launched) {
2250 ret = vmx_vmresume(cpudata->gprs);
2251 } else {
2252 ret = vmx_vmlaunch(cpudata->gprs);
2253 }
2254 cpudata->gcr2 = rcr2();
2255 vmx_htlb_flush_ack(cpudata, machgen);
2256 vmx_sti();
2257
2258 if (__predict_false(ret != 0)) {
2259 vmx_exit_invalid(exit, -1);
2260 break;
2261 }
2262 vmx_exit_evt(cpudata);
2263
2264 launched = true;
2265
2266 exitcode = vmx_vmread(VMCS_EXIT_REASON);
2267 exitcode &= __BITS(15,0);
2268
2269 switch (exitcode) {
2270 case VMCS_EXITCODE_EXC_NMI:
2271 vmx_exit_exc_nmi(mach, vcpu, exit);
2272 break;
2273 case VMCS_EXITCODE_EXT_INT:
2274 exit->reason = NVMM_VCPU_EXIT_NONE;
2275 break;
2276 case VMCS_EXITCODE_CPUID:
2277 vmx_exit_cpuid(mach, vcpu, exit);
2278 break;
2279 case VMCS_EXITCODE_HLT:
2280 vmx_exit_hlt(mach, vcpu, exit);
2281 break;
2282 case VMCS_EXITCODE_CR:
2283 vmx_exit_cr(mach, vcpu, exit);
2284 break;
2285 case VMCS_EXITCODE_IO:
2286 vmx_exit_io(mach, vcpu, exit);
2287 break;
2288 case VMCS_EXITCODE_RDMSR:
2289 vmx_exit_rdmsr(mach, vcpu, exit);
2290 break;
2291 case VMCS_EXITCODE_WRMSR:
2292 vmx_exit_wrmsr(mach, vcpu, exit);
2293 break;
2294 case VMCS_EXITCODE_SHUTDOWN:
2295 exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
2296 break;
2297 case VMCS_EXITCODE_MONITOR:
2298 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MONITOR);
2299 break;
2300 case VMCS_EXITCODE_MWAIT:
2301 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MWAIT);
2302 break;
2303 case VMCS_EXITCODE_XSETBV:
2304 vmx_exit_xsetbv(mach, vcpu, exit);
2305 break;
2306 case VMCS_EXITCODE_RDPMC:
2307 case VMCS_EXITCODE_RDTSCP:
2308 case VMCS_EXITCODE_INVVPID:
2309 case VMCS_EXITCODE_INVEPT:
2310 case VMCS_EXITCODE_VMCALL:
2311 case VMCS_EXITCODE_VMCLEAR:
2312 case VMCS_EXITCODE_VMLAUNCH:
2313 case VMCS_EXITCODE_VMPTRLD:
2314 case VMCS_EXITCODE_VMPTRST:
2315 case VMCS_EXITCODE_VMREAD:
2316 case VMCS_EXITCODE_VMRESUME:
2317 case VMCS_EXITCODE_VMWRITE:
2318 case VMCS_EXITCODE_VMXOFF:
2319 case VMCS_EXITCODE_VMXON:
2320 vmx_inject_ud(vcpu);
2321 exit->reason = NVMM_VCPU_EXIT_NONE;
2322 break;
2323 case VMCS_EXITCODE_EPT_VIOLATION:
2324 vmx_exit_epf(mach, vcpu, exit);
2325 break;
2326 case VMCS_EXITCODE_INT_WINDOW:
2327 vmx_event_waitexit_disable(vcpu, false);
2328 exit->reason = NVMM_VCPU_EXIT_INT_READY;
2329 break;
2330 case VMCS_EXITCODE_NMI_WINDOW:
2331 vmx_event_waitexit_disable(vcpu, true);
2332 exit->reason = NVMM_VCPU_EXIT_NMI_READY;
2333 break;
2334 default:
2335 vmx_exit_invalid(exit, exitcode);
2336 break;
2337 }
2338
2339 /* If no reason to return to userland, keep rolling. */
2340 if (nvmm_return_needed()) {
2341 break;
2342 }
2343 if (exit->reason != NVMM_VCPU_EXIT_NONE) {
2344 break;
2345 }
2346 }
2347
2348 cpudata->vmcs_launched = launched;
2349
2350 cpudata->gtsc = vmx_vmread(VMCS_TSC_OFFSET) + rdtsc();
2351
2352 vmx_vcpu_guest_fpu_leave(vcpu);
2353 vmx_vcpu_guest_misc_leave(vcpu);
2354 vmx_vcpu_guest_dbregs_leave(vcpu);
2355
2356 exit->exitstate.rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
2357 exit->exitstate.cr8 = cpudata->gcr8;
2358 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2359 exit->exitstate.int_shadow =
2360 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2361 exit->exitstate.int_window_exiting = cpudata->int_window_exit;
2362 exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit;
2363 exit->exitstate.evt_pending = cpudata->evt_pending;
2364
2365 vmx_vmcs_leave(vcpu);
2366
2367 return 0;
2368 }
2369
2370 /* -------------------------------------------------------------------------- */
2371
2372 static int
2373 vmx_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
2374 {
2375 struct pglist pglist;
2376 paddr_t _pa;
2377 vaddr_t _va;
2378 size_t i;
2379 int ret;
2380
2381 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
2382 &pglist, 1, 0);
2383 if (ret != 0)
2384 return ENOMEM;
2385 _pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
2386 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
2387 UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
2388 if (_va == 0)
2389 goto error;
2390
2391 for (i = 0; i < npages; i++) {
2392 pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
2393 VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
2394 }
2395 pmap_update(pmap_kernel());
2396
2397 memset((void *)_va, 0, npages * PAGE_SIZE);
2398
2399 *pa = _pa;
2400 *va = _va;
2401 return 0;
2402
2403 error:
2404 for (i = 0; i < npages; i++) {
2405 uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
2406 }
2407 return ENOMEM;
2408 }
2409
2410 static void
2411 vmx_memfree(paddr_t pa, vaddr_t va, size_t npages)
2412 {
2413 size_t i;
2414
2415 pmap_kremove(va, npages * PAGE_SIZE);
2416 pmap_update(pmap_kernel());
2417 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
2418 for (i = 0; i < npages; i++) {
2419 uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
2420 }
2421 }
2422
2423 /* -------------------------------------------------------------------------- */
2424
2425 static void
2426 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
2427 {
2428 uint64_t byte;
2429 uint8_t bitoff;
2430
2431 if (msr < 0x00002000) {
2432 /* Range 1 */
2433 byte = ((msr - 0x00000000) / 8) + 0;
2434 } else if (msr >= 0xC0000000 && msr < 0xC0002000) {
2435 /* Range 2 */
2436 byte = ((msr - 0xC0000000) / 8) + 1024;
2437 } else {
2438 panic("%s: wrong range", __func__);
2439 }
2440
2441 bitoff = (msr & 0x7);
2442
2443 if (read) {
2444 bitmap[byte] &= ~__BIT(bitoff);
2445 }
2446 if (write) {
2447 bitmap[2048 + byte] &= ~__BIT(bitoff);
2448 }
2449 }
2450
2451 #define VMX_SEG_ATTRIB_TYPE __BITS(3,0)
2452 #define VMX_SEG_ATTRIB_S __BIT(4)
2453 #define VMX_SEG_ATTRIB_DPL __BITS(6,5)
2454 #define VMX_SEG_ATTRIB_P __BIT(7)
2455 #define VMX_SEG_ATTRIB_AVL __BIT(12)
2456 #define VMX_SEG_ATTRIB_L __BIT(13)
2457 #define VMX_SEG_ATTRIB_DEF __BIT(14)
2458 #define VMX_SEG_ATTRIB_G __BIT(15)
2459 #define VMX_SEG_ATTRIB_UNUSABLE __BIT(16)
2460
2461 static void
2462 vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg *segs, int idx)
2463 {
2464 uint64_t attrib;
2465
2466 attrib =
2467 __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) |
2468 __SHIFTIN(segs[idx].attrib.s, VMX_SEG_ATTRIB_S) |
2469 __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) |
2470 __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) |
2471 __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) |
2472 __SHIFTIN(segs[idx].attrib.l, VMX_SEG_ATTRIB_L) |
2473 __SHIFTIN(segs[idx].attrib.def, VMX_SEG_ATTRIB_DEF) |
2474 __SHIFTIN(segs[idx].attrib.g, VMX_SEG_ATTRIB_G) |
2475 (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0);
2476
2477 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2478 vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector);
2479 vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib);
2480 }
2481 vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit);
2482 vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base);
2483 }
2484
2485 static void
2486 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx)
2487 {
2488 uint64_t selector = 0, attrib = 0, base, limit;
2489
2490 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2491 selector = vmx_vmread(vmx_guest_segs[idx].selector);
2492 attrib = vmx_vmread(vmx_guest_segs[idx].attrib);
2493 }
2494 limit = vmx_vmread(vmx_guest_segs[idx].limit);
2495 base = vmx_vmread(vmx_guest_segs[idx].base);
2496
2497 segs[idx].selector = selector;
2498 segs[idx].limit = limit;
2499 segs[idx].base = base;
2500 segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE);
2501 segs[idx].attrib.s = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_S);
2502 segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL);
2503 segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P);
2504 segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL);
2505 segs[idx].attrib.l = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_L);
2506 segs[idx].attrib.def = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF);
2507 segs[idx].attrib.g = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_G);
2508 if (attrib & VMX_SEG_ATTRIB_UNUSABLE) {
2509 segs[idx].attrib.p = 0;
2510 }
2511 }
2512
2513 static inline bool
2514 vmx_state_tlb_flush(const struct nvmm_x64_state *state, uint64_t flags)
2515 {
2516 uint64_t cr0, cr3, cr4, efer;
2517
2518 if (flags & NVMM_X64_STATE_CRS) {
2519 cr0 = vmx_vmread(VMCS_GUEST_CR0);
2520 if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
2521 return true;
2522 }
2523 cr3 = vmx_vmread(VMCS_GUEST_CR3);
2524 if (cr3 != state->crs[NVMM_X64_CR_CR3]) {
2525 return true;
2526 }
2527 cr4 = vmx_vmread(VMCS_GUEST_CR4);
2528 if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
2529 return true;
2530 }
2531 }
2532
2533 if (flags & NVMM_X64_STATE_MSRS) {
2534 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
2535 if ((efer ^
2536 state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
2537 return true;
2538 }
2539 }
2540
2541 return false;
2542 }
2543
2544 static void
2545 vmx_vcpu_setstate(struct nvmm_cpu *vcpu)
2546 {
2547 struct nvmm_comm_page *comm = vcpu->comm;
2548 const struct nvmm_x64_state *state = &comm->state;
2549 struct vmx_cpudata *cpudata = vcpu->cpudata;
2550 struct fxsave *fpustate;
2551 uint64_t ctls1, intstate;
2552 uint64_t flags;
2553
2554 flags = comm->state_wanted;
2555
2556 vmx_vmcs_enter(vcpu);
2557
2558 if (vmx_state_tlb_flush(state, flags)) {
2559 cpudata->gtlb_want_flush = true;
2560 }
2561
2562 if (flags & NVMM_X64_STATE_SEGS) {
2563 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS);
2564 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS);
2565 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES);
2566 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS);
2567 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS);
2568 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS);
2569 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT);
2570 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT);
2571 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT);
2572 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR);
2573 }
2574
2575 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2576 if (flags & NVMM_X64_STATE_GPRS) {
2577 memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
2578
2579 vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]);
2580 vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]);
2581 vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]);
2582 }
2583
2584 if (flags & NVMM_X64_STATE_CRS) {
2585 /*
2586 * CR0_ET must be 1 both in the shadow and the real register.
2587 * CR0_NE must be 1 in the real register.
2588 * CR0_NW and CR0_CD must be 0 in the real register.
2589 */
2590 vmx_vmwrite(VMCS_CR0_SHADOW,
2591 (state->crs[NVMM_X64_CR_CR0] & CR0_STATIC_MASK) |
2592 CR0_ET);
2593 vmx_vmwrite(VMCS_GUEST_CR0,
2594 (state->crs[NVMM_X64_CR_CR0] & ~CR0_STATIC_MASK) |
2595 CR0_ET | CR0_NE);
2596
2597 cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2];
2598
2599 /* XXX We are not handling PDPTE here. */
2600 vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]);
2601
2602 /* CR4_VMXE is mandatory. */
2603 vmx_vmwrite(VMCS_GUEST_CR4,
2604 (state->crs[NVMM_X64_CR_CR4] & CR4_VALID) | CR4_VMXE);
2605
2606 cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8];
2607
2608 if (vmx_xcr0_mask != 0) {
2609 /* Clear illegal XCR0 bits, set mandatory X87 bit. */
2610 cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
2611 cpudata->gxcr0 &= vmx_xcr0_mask;
2612 cpudata->gxcr0 |= XCR0_X87;
2613 }
2614 }
2615
2616 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2617 if (flags & NVMM_X64_STATE_DRS) {
2618 memcpy(cpudata->drs, state->drs, sizeof(state->drs));
2619
2620 cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF;
2621 vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]);
2622 }
2623
2624 if (flags & NVMM_X64_STATE_MSRS) {
2625 cpudata->gmsr[VMX_MSRLIST_STAR].val =
2626 state->msrs[NVMM_X64_MSR_STAR];
2627 cpudata->gmsr[VMX_MSRLIST_LSTAR].val =
2628 state->msrs[NVMM_X64_MSR_LSTAR];
2629 cpudata->gmsr[VMX_MSRLIST_CSTAR].val =
2630 state->msrs[NVMM_X64_MSR_CSTAR];
2631 cpudata->gmsr[VMX_MSRLIST_SFMASK].val =
2632 state->msrs[NVMM_X64_MSR_SFMASK];
2633 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val =
2634 state->msrs[NVMM_X64_MSR_KERNELGSBASE];
2635
2636 vmx_vmwrite(VMCS_GUEST_IA32_EFER,
2637 state->msrs[NVMM_X64_MSR_EFER]);
2638 vmx_vmwrite(VMCS_GUEST_IA32_PAT,
2639 state->msrs[NVMM_X64_MSR_PAT]);
2640 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS,
2641 state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
2642 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP,
2643 state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
2644 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP,
2645 state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
2646
2647 cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC];
2648 cpudata->gtsc_want_update = true;
2649
2650 /* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */
2651 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
2652 if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) {
2653 ctls1 |= ENTRY_CTLS_LONG_MODE;
2654 } else {
2655 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
2656 }
2657 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
2658 }
2659
2660 if (flags & NVMM_X64_STATE_INTR) {
2661 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2662 intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS);
2663 if (state->intr.int_shadow) {
2664 intstate |= INT_STATE_MOVSS;
2665 }
2666 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate);
2667
2668 if (state->intr.int_window_exiting) {
2669 vmx_event_waitexit_enable(vcpu, false);
2670 } else {
2671 vmx_event_waitexit_disable(vcpu, false);
2672 }
2673
2674 if (state->intr.nmi_window_exiting) {
2675 vmx_event_waitexit_enable(vcpu, true);
2676 } else {
2677 vmx_event_waitexit_disable(vcpu, true);
2678 }
2679 }
2680
2681 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
2682 if (flags & NVMM_X64_STATE_FPU) {
2683 memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
2684 sizeof(state->fpu));
2685
2686 fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
2687 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
2688 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
2689
2690 if (vmx_xcr0_mask != 0) {
2691 /* Reset XSTATE_BV, to force a reload. */
2692 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2693 }
2694 }
2695
2696 vmx_vmcs_leave(vcpu);
2697
2698 comm->state_wanted = 0;
2699 comm->state_cached |= flags;
2700 }
2701
2702 static void
2703 vmx_vcpu_getstate(struct nvmm_cpu *vcpu)
2704 {
2705 struct nvmm_comm_page *comm = vcpu->comm;
2706 struct nvmm_x64_state *state = &comm->state;
2707 struct vmx_cpudata *cpudata = vcpu->cpudata;
2708 uint64_t intstate, flags;
2709
2710 flags = comm->state_wanted;
2711
2712 vmx_vmcs_enter(vcpu);
2713
2714 if (flags & NVMM_X64_STATE_SEGS) {
2715 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS);
2716 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS);
2717 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES);
2718 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS);
2719 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS);
2720 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS);
2721 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT);
2722 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT);
2723 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT);
2724 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR);
2725 }
2726
2727 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2728 if (flags & NVMM_X64_STATE_GPRS) {
2729 memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
2730
2731 state->gprs[NVMM_X64_GPR_RIP] = vmx_vmread(VMCS_GUEST_RIP);
2732 state->gprs[NVMM_X64_GPR_RSP] = vmx_vmread(VMCS_GUEST_RSP);
2733 state->gprs[NVMM_X64_GPR_RFLAGS] = vmx_vmread(VMCS_GUEST_RFLAGS);
2734 }
2735
2736 if (flags & NVMM_X64_STATE_CRS) {
2737 state->crs[NVMM_X64_CR_CR0] =
2738 (vmx_vmread(VMCS_CR0_SHADOW) & CR0_STATIC_MASK) |
2739 (vmx_vmread(VMCS_GUEST_CR0) & ~CR0_STATIC_MASK);
2740 state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2;
2741 state->crs[NVMM_X64_CR_CR3] = vmx_vmread(VMCS_GUEST_CR3);
2742 state->crs[NVMM_X64_CR_CR4] = vmx_vmread(VMCS_GUEST_CR4);
2743 state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8;
2744 state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
2745
2746 /* Hide VMXE. */
2747 state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE;
2748 }
2749
2750 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2751 if (flags & NVMM_X64_STATE_DRS) {
2752 memcpy(state->drs, cpudata->drs, sizeof(state->drs));
2753
2754 state->drs[NVMM_X64_DR_DR7] = vmx_vmread(VMCS_GUEST_DR7);
2755 }
2756
2757 if (flags & NVMM_X64_STATE_MSRS) {
2758 state->msrs[NVMM_X64_MSR_STAR] =
2759 cpudata->gmsr[VMX_MSRLIST_STAR].val;
2760 state->msrs[NVMM_X64_MSR_LSTAR] =
2761 cpudata->gmsr[VMX_MSRLIST_LSTAR].val;
2762 state->msrs[NVMM_X64_MSR_CSTAR] =
2763 cpudata->gmsr[VMX_MSRLIST_CSTAR].val;
2764 state->msrs[NVMM_X64_MSR_SFMASK] =
2765 cpudata->gmsr[VMX_MSRLIST_SFMASK].val;
2766 state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
2767 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val;
2768 state->msrs[NVMM_X64_MSR_EFER] =
2769 vmx_vmread(VMCS_GUEST_IA32_EFER);
2770 state->msrs[NVMM_X64_MSR_PAT] =
2771 vmx_vmread(VMCS_GUEST_IA32_PAT);
2772 state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
2773 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS);
2774 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
2775 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP);
2776 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
2777 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP);
2778 state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc;
2779 }
2780
2781 if (flags & NVMM_X64_STATE_INTR) {
2782 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2783 state->intr.int_shadow =
2784 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2785 state->intr.int_window_exiting = cpudata->int_window_exit;
2786 state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
2787 state->intr.evt_pending = cpudata->evt_pending;
2788 }
2789
2790 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
2791 if (flags & NVMM_X64_STATE_FPU) {
2792 memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
2793 sizeof(state->fpu));
2794 }
2795
2796 vmx_vmcs_leave(vcpu);
2797
2798 comm->state_wanted = 0;
2799 comm->state_cached |= flags;
2800 }
2801
2802 static void
2803 vmx_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
2804 {
2805 vcpu->comm->state_wanted = flags;
2806 vmx_vcpu_getstate(vcpu);
2807 }
2808
2809 static void
2810 vmx_vcpu_state_commit(struct nvmm_cpu *vcpu)
2811 {
2812 vcpu->comm->state_wanted = vcpu->comm->state_commit;
2813 vcpu->comm->state_commit = 0;
2814 vmx_vcpu_setstate(vcpu);
2815 }
2816
2817 /* -------------------------------------------------------------------------- */
2818
2819 static void
2820 vmx_asid_alloc(struct nvmm_cpu *vcpu)
2821 {
2822 struct vmx_cpudata *cpudata = vcpu->cpudata;
2823 size_t i, oct, bit;
2824
2825 mutex_enter(&vmx_asidlock);
2826
2827 for (i = 0; i < vmx_maxasid; i++) {
2828 oct = i / 8;
2829 bit = i % 8;
2830
2831 if (vmx_asidmap[oct] & __BIT(bit)) {
2832 continue;
2833 }
2834
2835 cpudata->asid = i;
2836
2837 vmx_asidmap[oct] |= __BIT(bit);
2838 vmx_vmwrite(VMCS_VPID, i);
2839 mutex_exit(&vmx_asidlock);
2840 return;
2841 }
2842
2843 mutex_exit(&vmx_asidlock);
2844
2845 panic("%s: impossible", __func__);
2846 }
2847
2848 static void
2849 vmx_asid_free(struct nvmm_cpu *vcpu)
2850 {
2851 size_t oct, bit;
2852 uint64_t asid;
2853
2854 asid = vmx_vmread(VMCS_VPID);
2855
2856 oct = asid / 8;
2857 bit = asid % 8;
2858
2859 mutex_enter(&vmx_asidlock);
2860 vmx_asidmap[oct] &= ~__BIT(bit);
2861 mutex_exit(&vmx_asidlock);
2862 }
2863
2864 static void
2865 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2866 {
2867 struct vmx_cpudata *cpudata = vcpu->cpudata;
2868 struct vmcs *vmcs = cpudata->vmcs;
2869 struct msr_entry *gmsr = cpudata->gmsr;
2870 extern uint8_t vmx_resume_rip;
2871 uint64_t rev, eptp;
2872
2873 rev = vmx_get_revision();
2874
2875 memset(vmcs, 0, VMCS_SIZE);
2876 vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION);
2877 vmcs->abort = 0;
2878
2879 vmx_vmcs_enter(vcpu);
2880
2881 /* No link pointer. */
2882 vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF);
2883
2884 /* Install the CTLSs. */
2885 vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls);
2886 vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls);
2887 vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2);
2888 vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls);
2889 vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls);
2890
2891 /* Allow direct access to certain MSRs. */
2892 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
2893 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true);
2894 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
2895 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
2896 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
2897 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
2898 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
2899 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
2900 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
2901 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
2902 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
2903 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
2904 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
2905 vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa);
2906
2907 /*
2908 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This
2909 * includes the L1D_FLUSH MSR, to mitigate L1TF.
2910 */
2911 gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR;
2912 gmsr[VMX_MSRLIST_STAR].val = 0;
2913 gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR;
2914 gmsr[VMX_MSRLIST_LSTAR].val = 0;
2915 gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR;
2916 gmsr[VMX_MSRLIST_CSTAR].val = 0;
2917 gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK;
2918 gmsr[VMX_MSRLIST_SFMASK].val = 0;
2919 gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE;
2920 gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0;
2921 gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD;
2922 gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH;
2923 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa);
2924 vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa);
2925 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr);
2926 vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR);
2927
2928 /* Set the CR0 mask. Any change of these bits causes a VMEXIT. */
2929 vmx_vmwrite(VMCS_CR0_MASK, CR0_STATIC_MASK);
2930
2931 /* Force unsupported CR4 fields to zero. */
2932 vmx_vmwrite(VMCS_CR4_MASK, CR4_INVALID);
2933 vmx_vmwrite(VMCS_CR4_SHADOW, 0);
2934
2935 /* Set the Host state for resuming. */
2936 vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)&vmx_resume_rip);
2937 vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL));
2938 vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2939 vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2940 vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2941 vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0);
2942 vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0);
2943 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0);
2944 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0);
2945 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0);
2946 vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT));
2947 vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER));
2948 vmx_vmwrite(VMCS_HOST_CR0, rcr0() & ~CR0_TS);
2949
2950 /* Generate ASID. */
2951 vmx_asid_alloc(vcpu);
2952
2953 /* Enable Extended Paging, 4-Level. */
2954 eptp =
2955 __SHIFTIN(vmx_eptp_type, EPTP_TYPE) |
2956 __SHIFTIN(4-1, EPTP_WALKLEN) |
2957 (pmap_ept_has_ad ? EPTP_FLAGS_AD : 0) |
2958 mach->vm->vm_map.pmap->pm_pdirpa[0];
2959 vmx_vmwrite(VMCS_EPTP, eptp);
2960
2961 /* Init IA32_MISC_ENABLE. */
2962 cpudata->gmsr_misc_enable = rdmsr(MSR_MISC_ENABLE);
2963 cpudata->gmsr_misc_enable &=
2964 ~(IA32_MISC_PERFMON_EN|IA32_MISC_EISST_EN|IA32_MISC_MWAIT_EN);
2965 cpudata->gmsr_misc_enable |=
2966 (IA32_MISC_BTS_UNAVAIL|IA32_MISC_PEBS_UNAVAIL);
2967
2968 /* Init XSAVE header. */
2969 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2970 cpudata->gfpu.xsh_xcomp_bv = 0;
2971
2972 /* These MSRs are static. */
2973 cpudata->star = rdmsr(MSR_STAR);
2974 cpudata->lstar = rdmsr(MSR_LSTAR);
2975 cpudata->cstar = rdmsr(MSR_CSTAR);
2976 cpudata->sfmask = rdmsr(MSR_SFMASK);
2977
2978 /* Install the RESET state. */
2979 memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
2980 sizeof(nvmm_x86_reset_state));
2981 vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
2982 vcpu->comm->state_cached = 0;
2983 vmx_vcpu_setstate(vcpu);
2984
2985 vmx_vmcs_leave(vcpu);
2986 }
2987
2988 static int
2989 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2990 {
2991 struct vmx_cpudata *cpudata;
2992 int error;
2993
2994 /* Allocate the VMX cpudata. */
2995 cpudata = (struct vmx_cpudata *)uvm_km_alloc(kernel_map,
2996 roundup(sizeof(*cpudata), PAGE_SIZE), 0,
2997 UVM_KMF_WIRED|UVM_KMF_ZERO);
2998 vcpu->cpudata = cpudata;
2999
3000 /* VMCS */
3001 error = vmx_memalloc(&cpudata->vmcs_pa, (vaddr_t *)&cpudata->vmcs,
3002 VMCS_NPAGES);
3003 if (error)
3004 goto error;
3005
3006 /* MSR Bitmap */
3007 error = vmx_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
3008 MSRBM_NPAGES);
3009 if (error)
3010 goto error;
3011
3012 /* Guest MSR List */
3013 error = vmx_memalloc(&cpudata->gmsr_pa, (vaddr_t *)&cpudata->gmsr, 1);
3014 if (error)
3015 goto error;
3016
3017 kcpuset_create(&cpudata->htlb_want_flush, true);
3018
3019 /* Init the VCPU info. */
3020 vmx_vcpu_init(mach, vcpu);
3021
3022 return 0;
3023
3024 error:
3025 if (cpudata->vmcs_pa) {
3026 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
3027 VMCS_NPAGES);
3028 }
3029 if (cpudata->msrbm_pa) {
3030 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
3031 MSRBM_NPAGES);
3032 }
3033 if (cpudata->gmsr_pa) {
3034 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
3035 }
3036
3037 kmem_free(cpudata, sizeof(*cpudata));
3038 return error;
3039 }
3040
3041 static void
3042 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
3043 {
3044 struct vmx_cpudata *cpudata = vcpu->cpudata;
3045
3046 vmx_vmcs_enter(vcpu);
3047 vmx_asid_free(vcpu);
3048 vmx_vmcs_destroy(vcpu);
3049
3050 kcpuset_destroy(cpudata->htlb_want_flush);
3051
3052 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, VMCS_NPAGES);
3053 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
3054 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
3055 uvm_km_free(kernel_map, (vaddr_t)cpudata,
3056 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
3057 }
3058
3059 /* -------------------------------------------------------------------------- */
3060
3061 static int
3062 vmx_vcpu_configure_cpuid(struct vmx_cpudata *cpudata, void *data)
3063 {
3064 struct nvmm_vcpu_conf_cpuid *cpuid = data;
3065 size_t i;
3066
3067 if (__predict_false(cpuid->mask && cpuid->exit)) {
3068 return EINVAL;
3069 }
3070 if (__predict_false(cpuid->mask &&
3071 ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
3072 (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
3073 (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
3074 (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
3075 return EINVAL;
3076 }
3077
3078 /* If unset, delete, to restore the default behavior. */
3079 if (!cpuid->mask && !cpuid->exit) {
3080 for (i = 0; i < VMX_NCPUIDS; i++) {
3081 if (!cpudata->cpuidpresent[i]) {
3082 continue;
3083 }
3084 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
3085 cpudata->cpuidpresent[i] = false;
3086 }
3087 }
3088 return 0;
3089 }
3090
3091 /* If already here, replace. */
3092 for (i = 0; i < VMX_NCPUIDS; i++) {
3093 if (!cpudata->cpuidpresent[i]) {
3094 continue;
3095 }
3096 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
3097 memcpy(&cpudata->cpuid[i], cpuid,
3098 sizeof(struct nvmm_vcpu_conf_cpuid));
3099 return 0;
3100 }
3101 }
3102
3103 /* Not here, insert. */
3104 for (i = 0; i < VMX_NCPUIDS; i++) {
3105 if (!cpudata->cpuidpresent[i]) {
3106 cpudata->cpuidpresent[i] = true;
3107 memcpy(&cpudata->cpuid[i], cpuid,
3108 sizeof(struct nvmm_vcpu_conf_cpuid));
3109 return 0;
3110 }
3111 }
3112
3113 return ENOBUFS;
3114 }
3115
3116 static int
3117 vmx_vcpu_configure_tpr(struct vmx_cpudata *cpudata, void *data)
3118 {
3119 struct nvmm_vcpu_conf_tpr *tpr = data;
3120
3121 memcpy(&cpudata->tpr, tpr, sizeof(*tpr));
3122 return 0;
3123 }
3124
3125 static int
3126 vmx_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
3127 {
3128 struct vmx_cpudata *cpudata = vcpu->cpudata;
3129
3130 switch (op) {
3131 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID):
3132 return vmx_vcpu_configure_cpuid(cpudata, data);
3133 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR):
3134 return vmx_vcpu_configure_tpr(cpudata, data);
3135 default:
3136 return EINVAL;
3137 }
3138 }
3139
3140 /* -------------------------------------------------------------------------- */
3141
3142 static void
3143 vmx_tlb_flush(struct pmap *pm)
3144 {
3145 struct nvmm_machine *mach = pm->pm_data;
3146 struct vmx_machdata *machdata = mach->machdata;
3147
3148 atomic_inc_64(&machdata->mach_htlb_gen);
3149
3150 /* Generates IPIs, which cause #VMEXITs. */
3151 pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM);
3152 }
3153
3154 static void
3155 vmx_machine_create(struct nvmm_machine *mach)
3156 {
3157 struct pmap *pmap = mach->vm->vm_map.pmap;
3158 struct vmx_machdata *machdata;
3159
3160 /* Convert to EPT. */
3161 pmap_ept_transform(pmap);
3162
3163 /* Fill in pmap info. */
3164 pmap->pm_data = (void *)mach;
3165 pmap->pm_tlb_flush = vmx_tlb_flush;
3166
3167 machdata = kmem_zalloc(sizeof(struct vmx_machdata), KM_SLEEP);
3168 mach->machdata = machdata;
3169
3170 /* Start with an hTLB flush everywhere. */
3171 machdata->mach_htlb_gen = 1;
3172 }
3173
3174 static void
3175 vmx_machine_destroy(struct nvmm_machine *mach)
3176 {
3177 struct vmx_machdata *machdata = mach->machdata;
3178
3179 kmem_free(machdata, sizeof(struct vmx_machdata));
3180 }
3181
3182 static int
3183 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
3184 {
3185 panic("%s: impossible", __func__);
3186 }
3187
3188 /* -------------------------------------------------------------------------- */
3189
3190 #define CTLS_ONE_ALLOWED(msrval, bitoff) \
3191 ((msrval & __BIT(32 + bitoff)) != 0)
3192 #define CTLS_ZERO_ALLOWED(msrval, bitoff) \
3193 ((msrval & __BIT(bitoff)) == 0)
3194
3195 static int
3196 vmx_check_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls, uint64_t set_one)
3197 {
3198 uint64_t basic, val, true_val;
3199 bool has_true;
3200 size_t i;
3201
3202 basic = rdmsr(MSR_IA32_VMX_BASIC);
3203 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
3204
3205 val = rdmsr(msr_ctls);
3206 if (has_true) {
3207 true_val = rdmsr(msr_true_ctls);
3208 } else {
3209 true_val = val;
3210 }
3211
3212 for (i = 0; i < 32; i++) {
3213 if (!(set_one & __BIT(i))) {
3214 continue;
3215 }
3216 if (!CTLS_ONE_ALLOWED(true_val, i)) {
3217 return -1;
3218 }
3219 }
3220
3221 return 0;
3222 }
3223
3224 static int
3225 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls,
3226 uint64_t set_one, uint64_t set_zero, uint64_t *res)
3227 {
3228 uint64_t basic, val, true_val;
3229 bool one_allowed, zero_allowed, has_true;
3230 size_t i;
3231
3232 basic = rdmsr(MSR_IA32_VMX_BASIC);
3233 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
3234
3235 val = rdmsr(msr_ctls);
3236 if (has_true) {
3237 true_val = rdmsr(msr_true_ctls);
3238 } else {
3239 true_val = val;
3240 }
3241
3242 for (i = 0; i < 32; i++) {
3243 one_allowed = CTLS_ONE_ALLOWED(true_val, i);
3244 zero_allowed = CTLS_ZERO_ALLOWED(true_val, i);
3245
3246 if (zero_allowed && !one_allowed) {
3247 if (set_one & __BIT(i))
3248 return -1;
3249 *res &= ~__BIT(i);
3250 } else if (one_allowed && !zero_allowed) {
3251 if (set_zero & __BIT(i))
3252 return -1;
3253 *res |= __BIT(i);
3254 } else {
3255 if (set_zero & __BIT(i)) {
3256 *res &= ~__BIT(i);
3257 } else if (set_one & __BIT(i)) {
3258 *res |= __BIT(i);
3259 } else if (!has_true) {
3260 *res &= ~__BIT(i);
3261 } else if (CTLS_ZERO_ALLOWED(val, i)) {
3262 *res &= ~__BIT(i);
3263 } else if (CTLS_ONE_ALLOWED(val, i)) {
3264 *res |= __BIT(i);
3265 } else {
3266 return -1;
3267 }
3268 }
3269 }
3270
3271 return 0;
3272 }
3273
3274 static bool
3275 vmx_ident(void)
3276 {
3277 uint64_t msr;
3278 int ret;
3279
3280 if (!(cpu_feature[1] & CPUID2_VMX)) {
3281 return false;
3282 }
3283
3284 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
3285 if ((msr & IA32_FEATURE_CONTROL_LOCK) != 0 &&
3286 (msr & IA32_FEATURE_CONTROL_OUT_SMX) == 0) {
3287 printf("NVMM: VMX disabled in BIOS\n");
3288 return false;
3289 }
3290
3291 msr = rdmsr(MSR_IA32_VMX_BASIC);
3292 if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) {
3293 printf("NVMM: I/O reporting not supported\n");
3294 return false;
3295 }
3296 if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) {
3297 printf("NVMM: WB memory not supported\n");
3298 return false;
3299 }
3300
3301 /* PG and PE are reported, even if Unrestricted Guests is supported. */
3302 vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE);
3303 vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE);
3304 ret = vmx_check_cr(rcr0(), vmx_cr0_fixed0, vmx_cr0_fixed1);
3305 if (ret == -1) {
3306 printf("NVMM: CR0 requirements not satisfied\n");
3307 return false;
3308 }
3309
3310 vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
3311 vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
3312 ret = vmx_check_cr(rcr4() | CR4_VMXE, vmx_cr4_fixed0, vmx_cr4_fixed1);
3313 if (ret == -1) {
3314 printf("NVMM: CR4 requirements not satisfied\n");
3315 return false;
3316 }
3317
3318 /* Init the CTLSs right now, and check for errors. */
3319 ret = vmx_init_ctls(
3320 MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3321 VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO,
3322 &vmx_pinbased_ctls);
3323 if (ret == -1) {
3324 printf("NVMM: pin-based-ctls requirements not satisfied\n");
3325 return false;
3326 }
3327 ret = vmx_init_ctls(
3328 MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3329 VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO,
3330 &vmx_procbased_ctls);
3331 if (ret == -1) {
3332 printf("NVMM: proc-based-ctls requirements not satisfied\n");
3333 return false;
3334 }
3335 ret = vmx_init_ctls(
3336 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3337 VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO,
3338 &vmx_procbased_ctls2);
3339 if (ret == -1) {
3340 printf("NVMM: proc-based-ctls2 requirements not satisfied\n");
3341 return false;
3342 }
3343 ret = vmx_check_ctls(
3344 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3345 PROC_CTLS2_INVPCID_ENABLE);
3346 if (ret != -1) {
3347 vmx_procbased_ctls2 |= PROC_CTLS2_INVPCID_ENABLE;
3348 }
3349 ret = vmx_init_ctls(
3350 MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3351 VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO,
3352 &vmx_entry_ctls);
3353 if (ret == -1) {
3354 printf("NVMM: entry-ctls requirements not satisfied\n");
3355 return false;
3356 }
3357 ret = vmx_init_ctls(
3358 MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS,
3359 VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO,
3360 &vmx_exit_ctls);
3361 if (ret == -1) {
3362 printf("NVMM: exit-ctls requirements not satisfied\n");
3363 return false;
3364 }
3365
3366 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3367 if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) {
3368 printf("NVMM: 4-level page tree not supported\n");
3369 return false;
3370 }
3371 if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) {
3372 printf("NVMM: INVEPT not supported\n");
3373 return false;
3374 }
3375 if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) {
3376 printf("NVMM: INVVPID not supported\n");
3377 return false;
3378 }
3379 if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) != 0) {
3380 pmap_ept_has_ad = true;
3381 } else {
3382 pmap_ept_has_ad = false;
3383 }
3384 if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) {
3385 printf("NVMM: EPT UC/WB memory types not supported\n");
3386 return false;
3387 }
3388
3389 return true;
3390 }
3391
3392 static void
3393 vmx_init_asid(uint32_t maxasid)
3394 {
3395 size_t allocsz;
3396
3397 mutex_init(&vmx_asidlock, MUTEX_DEFAULT, IPL_NONE);
3398
3399 vmx_maxasid = maxasid;
3400 allocsz = roundup(maxasid, 8) / 8;
3401 vmx_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
3402
3403 /* ASID 0 is reserved for the host. */
3404 vmx_asidmap[0] |= __BIT(0);
3405 }
3406
3407 static void
3408 vmx_change_cpu(void *arg1, void *arg2)
3409 {
3410 struct cpu_info *ci = curcpu();
3411 bool enable = arg1 != NULL;
3412 uint64_t msr, cr4;
3413
3414 if (enable) {
3415 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
3416 if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) {
3417 /* Lock now, with VMX-outside-SMX enabled. */
3418 wrmsr(MSR_IA32_FEATURE_CONTROL, msr |
3419 IA32_FEATURE_CONTROL_LOCK |
3420 IA32_FEATURE_CONTROL_OUT_SMX);
3421 }
3422 }
3423
3424 if (!enable) {
3425 vmx_vmxoff();
3426 }
3427
3428 cr4 = rcr4();
3429 if (enable) {
3430 cr4 |= CR4_VMXE;
3431 } else {
3432 cr4 &= ~CR4_VMXE;
3433 }
3434 lcr4(cr4);
3435
3436 if (enable) {
3437 vmx_vmxon(&vmxoncpu[cpu_index(ci)].pa);
3438 }
3439 }
3440
3441 static void
3442 vmx_init_l1tf(void)
3443 {
3444 u_int descs[4];
3445 uint64_t msr;
3446
3447 if (cpuid_level < 7) {
3448 return;
3449 }
3450
3451 x86_cpuid(7, descs);
3452
3453 if (descs[3] & CPUID_SEF_ARCH_CAP) {
3454 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
3455 if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) {
3456 /* No mitigation needed. */
3457 return;
3458 }
3459 }
3460
3461 if (descs[3] & CPUID_SEF_L1D_FLUSH) {
3462 /* Enable hardware mitigation. */
3463 vmx_msrlist_entry_nmsr += 1;
3464 }
3465 }
3466
3467 static void
3468 vmx_init(void)
3469 {
3470 CPU_INFO_ITERATOR cii;
3471 struct cpu_info *ci;
3472 uint64_t xc, msr;
3473 struct vmxon *vmxon;
3474 uint32_t revision;
3475 u_int descs[4];
3476 paddr_t pa;
3477 vaddr_t va;
3478 int error;
3479
3480 /* Init the ASID bitmap (VPID). */
3481 vmx_init_asid(VPID_MAX);
3482
3483 /* Init the XCR0 mask. */
3484 vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
3485
3486 /* Init the max basic CPUID leaf. */
3487 vmx_cpuid_max_basic = uimin(cpuid_level, VMX_CPUID_MAX_BASIC);
3488
3489 /* Init the max extended CPUID leaf. */
3490 x86_cpuid(0x80000000, descs);
3491 vmx_cpuid_max_extended = uimin(descs[0], VMX_CPUID_MAX_EXTENDED);
3492
3493 /* Init the TLB flush op, the EPT flush op and the EPTP type. */
3494 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3495 if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
3496 vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
3497 } else {
3498 vmx_tlb_flush_op = VMX_INVVPID_ALL;
3499 }
3500 if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
3501 vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
3502 } else {
3503 vmx_ept_flush_op = VMX_INVEPT_ALL;
3504 }
3505 if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {
3506 vmx_eptp_type = EPTP_TYPE_WB;
3507 } else {
3508 vmx_eptp_type = EPTP_TYPE_UC;
3509 }
3510
3511 /* Init the L1TF mitigation. */
3512 vmx_init_l1tf();
3513
3514 memset(vmxoncpu, 0, sizeof(vmxoncpu));
3515 revision = vmx_get_revision();
3516
3517 for (CPU_INFO_FOREACH(cii, ci)) {
3518 error = vmx_memalloc(&pa, &va, 1);
3519 if (error) {
3520 panic("%s: out of memory", __func__);
3521 }
3522 vmxoncpu[cpu_index(ci)].pa = pa;
3523 vmxoncpu[cpu_index(ci)].va = va;
3524
3525 vmxon = (struct vmxon *)vmxoncpu[cpu_index(ci)].va;
3526 vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION);
3527 }
3528
3529 xc = xc_broadcast(0, vmx_change_cpu, (void *)true, NULL);
3530 xc_wait(xc);
3531 }
3532
3533 static void
3534 vmx_fini_asid(void)
3535 {
3536 size_t allocsz;
3537
3538 allocsz = roundup(vmx_maxasid, 8) / 8;
3539 kmem_free(vmx_asidmap, allocsz);
3540
3541 mutex_destroy(&vmx_asidlock);
3542 }
3543
3544 static void
3545 vmx_fini(void)
3546 {
3547 uint64_t xc;
3548 size_t i;
3549
3550 xc = xc_broadcast(0, vmx_change_cpu, (void *)false, NULL);
3551 xc_wait(xc);
3552
3553 for (i = 0; i < MAXCPUS; i++) {
3554 if (vmxoncpu[i].pa != 0)
3555 vmx_memfree(vmxoncpu[i].pa, vmxoncpu[i].va, 1);
3556 }
3557
3558 vmx_fini_asid();
3559 }
3560
3561 static void
3562 vmx_capability(struct nvmm_capability *cap)
3563 {
3564 cap->arch.mach_conf_support = 0;
3565 cap->arch.vcpu_conf_support =
3566 NVMM_CAP_ARCH_VCPU_CONF_CPUID |
3567 NVMM_CAP_ARCH_VCPU_CONF_TPR;
3568 cap->arch.xcr0_mask = vmx_xcr0_mask;
3569 cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
3570 cap->arch.conf_cpuid_maxops = VMX_NCPUIDS;
3571 }
3572
3573 const struct nvmm_impl nvmm_x86_vmx = {
3574 .name = "x86-vmx",
3575 .ident = vmx_ident,
3576 .init = vmx_init,
3577 .fini = vmx_fini,
3578 .capability = vmx_capability,
3579 .mach_conf_max = NVMM_X86_MACH_NCONF,
3580 .mach_conf_sizes = NULL,
3581 .vcpu_conf_max = NVMM_X86_VCPU_NCONF,
3582 .vcpu_conf_sizes = vmx_vcpu_conf_sizes,
3583 .state_size = sizeof(struct nvmm_x64_state),
3584 .machine_create = vmx_machine_create,
3585 .machine_destroy = vmx_machine_destroy,
3586 .machine_configure = vmx_machine_configure,
3587 .vcpu_create = vmx_vcpu_create,
3588 .vcpu_destroy = vmx_vcpu_destroy,
3589 .vcpu_configure = vmx_vcpu_configure,
3590 .vcpu_setstate = vmx_vcpu_setstate,
3591 .vcpu_getstate = vmx_vcpu_getstate,
3592 .vcpu_inject = vmx_vcpu_inject,
3593 .vcpu_run = vmx_vcpu_run
3594 };
3595