nvmm_x86_vmx.c revision 1.71 1 /* $NetBSD: nvmm_x86_vmx.c,v 1.71 2020/08/20 11:09:56 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.71 2020/08/20 11:09:56 maxv Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/kmem.h>
39 #include <sys/cpu.h>
40 #include <sys/xcall.h>
41 #include <sys/mman.h>
42 #include <sys/bitops.h>
43
44 #include <uvm/uvm.h>
45 #include <uvm/uvm_page.h>
46
47 #include <x86/cputypes.h>
48 #include <x86/specialreg.h>
49 #include <x86/pmap.h>
50 #include <x86/dbregs.h>
51 #include <x86/cpu_counter.h>
52 #include <machine/cpuvar.h>
53
54 #include <dev/nvmm/nvmm.h>
55 #include <dev/nvmm/nvmm_internal.h>
56 #include <dev/nvmm/x86/nvmm_x86.h>
57
58 int _vmx_vmxon(paddr_t *pa);
59 int _vmx_vmxoff(void);
60 int vmx_vmlaunch(uint64_t *gprs);
61 int vmx_vmresume(uint64_t *gprs);
62
63 #define vmx_vmxon(a) \
64 if (__predict_false(_vmx_vmxon(a) != 0)) { \
65 panic("%s: VMXON failed", __func__); \
66 }
67 #define vmx_vmxoff() \
68 if (__predict_false(_vmx_vmxoff() != 0)) { \
69 panic("%s: VMXOFF failed", __func__); \
70 }
71
72 struct ept_desc {
73 uint64_t eptp;
74 uint64_t mbz;
75 } __packed;
76
77 struct vpid_desc {
78 uint64_t vpid;
79 uint64_t addr;
80 } __packed;
81
82 static inline void
83 vmx_invept(uint64_t op, struct ept_desc *desc)
84 {
85 asm volatile (
86 "invept %[desc],%[op];"
87 "jz vmx_insn_failvalid;"
88 "jc vmx_insn_failinvalid;"
89 :
90 : [desc] "m" (*desc), [op] "r" (op)
91 : "memory", "cc"
92 );
93 }
94
95 static inline void
96 vmx_invvpid(uint64_t op, struct vpid_desc *desc)
97 {
98 asm volatile (
99 "invvpid %[desc],%[op];"
100 "jz vmx_insn_failvalid;"
101 "jc vmx_insn_failinvalid;"
102 :
103 : [desc] "m" (*desc), [op] "r" (op)
104 : "memory", "cc"
105 );
106 }
107
108 static inline uint64_t
109 vmx_vmread(uint64_t field)
110 {
111 uint64_t value;
112
113 asm volatile (
114 "vmread %[field],%[value];"
115 "jz vmx_insn_failvalid;"
116 "jc vmx_insn_failinvalid;"
117 : [value] "=r" (value)
118 : [field] "r" (field)
119 : "cc"
120 );
121
122 return value;
123 }
124
125 static inline void
126 vmx_vmwrite(uint64_t field, uint64_t value)
127 {
128 asm volatile (
129 "vmwrite %[value],%[field];"
130 "jz vmx_insn_failvalid;"
131 "jc vmx_insn_failinvalid;"
132 :
133 : [field] "r" (field), [value] "r" (value)
134 : "cc"
135 );
136 }
137
138 #ifdef DIAGNOSTIC
139 static inline paddr_t
140 vmx_vmptrst(void)
141 {
142 paddr_t pa;
143
144 asm volatile (
145 "vmptrst %[pa];"
146 :
147 : [pa] "m" (*(paddr_t *)&pa)
148 : "memory"
149 );
150
151 return pa;
152 }
153 #endif
154
155 static inline void
156 vmx_vmptrld(paddr_t *pa)
157 {
158 asm volatile (
159 "vmptrld %[pa];"
160 "jz vmx_insn_failvalid;"
161 "jc vmx_insn_failinvalid;"
162 :
163 : [pa] "m" (*pa)
164 : "memory", "cc"
165 );
166 }
167
168 static inline void
169 vmx_vmclear(paddr_t *pa)
170 {
171 asm volatile (
172 "vmclear %[pa];"
173 "jz vmx_insn_failvalid;"
174 "jc vmx_insn_failinvalid;"
175 :
176 : [pa] "m" (*pa)
177 : "memory", "cc"
178 );
179 }
180
181 static inline void
182 vmx_cli(void)
183 {
184 asm volatile ("cli" ::: "memory");
185 }
186
187 static inline void
188 vmx_sti(void)
189 {
190 asm volatile ("sti" ::: "memory");
191 }
192
193 #define MSR_IA32_FEATURE_CONTROL 0x003A
194 #define IA32_FEATURE_CONTROL_LOCK __BIT(0)
195 #define IA32_FEATURE_CONTROL_IN_SMX __BIT(1)
196 #define IA32_FEATURE_CONTROL_OUT_SMX __BIT(2)
197
198 #define MSR_IA32_VMX_BASIC 0x0480
199 #define IA32_VMX_BASIC_IDENT __BITS(30,0)
200 #define IA32_VMX_BASIC_DATA_SIZE __BITS(44,32)
201 #define IA32_VMX_BASIC_MEM_WIDTH __BIT(48)
202 #define IA32_VMX_BASIC_DUAL __BIT(49)
203 #define IA32_VMX_BASIC_MEM_TYPE __BITS(53,50)
204 #define MEM_TYPE_UC 0
205 #define MEM_TYPE_WB 6
206 #define IA32_VMX_BASIC_IO_REPORT __BIT(54)
207 #define IA32_VMX_BASIC_TRUE_CTLS __BIT(55)
208
209 #define MSR_IA32_VMX_PINBASED_CTLS 0x0481
210 #define MSR_IA32_VMX_PROCBASED_CTLS 0x0482
211 #define MSR_IA32_VMX_EXIT_CTLS 0x0483
212 #define MSR_IA32_VMX_ENTRY_CTLS 0x0484
213 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x048B
214
215 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x048D
216 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x048E
217 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x048F
218 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x0490
219
220 #define MSR_IA32_VMX_CR0_FIXED0 0x0486
221 #define MSR_IA32_VMX_CR0_FIXED1 0x0487
222 #define MSR_IA32_VMX_CR4_FIXED0 0x0488
223 #define MSR_IA32_VMX_CR4_FIXED1 0x0489
224
225 #define MSR_IA32_VMX_EPT_VPID_CAP 0x048C
226 #define IA32_VMX_EPT_VPID_XO __BIT(0)
227 #define IA32_VMX_EPT_VPID_WALKLENGTH_4 __BIT(6)
228 #define IA32_VMX_EPT_VPID_UC __BIT(8)
229 #define IA32_VMX_EPT_VPID_WB __BIT(14)
230 #define IA32_VMX_EPT_VPID_2MB __BIT(16)
231 #define IA32_VMX_EPT_VPID_1GB __BIT(17)
232 #define IA32_VMX_EPT_VPID_INVEPT __BIT(20)
233 #define IA32_VMX_EPT_VPID_FLAGS_AD __BIT(21)
234 #define IA32_VMX_EPT_VPID_ADVANCED_VMEXIT_INFO __BIT(22)
235 #define IA32_VMX_EPT_VPID_SHSTK __BIT(23)
236 #define IA32_VMX_EPT_VPID_INVEPT_CONTEXT __BIT(25)
237 #define IA32_VMX_EPT_VPID_INVEPT_ALL __BIT(26)
238 #define IA32_VMX_EPT_VPID_INVVPID __BIT(32)
239 #define IA32_VMX_EPT_VPID_INVVPID_ADDR __BIT(40)
240 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT __BIT(41)
241 #define IA32_VMX_EPT_VPID_INVVPID_ALL __BIT(42)
242 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG __BIT(43)
243
244 /* -------------------------------------------------------------------------- */
245
246 /* 16-bit control fields */
247 #define VMCS_VPID 0x00000000
248 #define VMCS_PIR_VECTOR 0x00000002
249 #define VMCS_EPTP_INDEX 0x00000004
250 /* 16-bit guest-state fields */
251 #define VMCS_GUEST_ES_SELECTOR 0x00000800
252 #define VMCS_GUEST_CS_SELECTOR 0x00000802
253 #define VMCS_GUEST_SS_SELECTOR 0x00000804
254 #define VMCS_GUEST_DS_SELECTOR 0x00000806
255 #define VMCS_GUEST_FS_SELECTOR 0x00000808
256 #define VMCS_GUEST_GS_SELECTOR 0x0000080A
257 #define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
258 #define VMCS_GUEST_TR_SELECTOR 0x0000080E
259 #define VMCS_GUEST_INTR_STATUS 0x00000810
260 #define VMCS_PML_INDEX 0x00000812
261 /* 16-bit host-state fields */
262 #define VMCS_HOST_ES_SELECTOR 0x00000C00
263 #define VMCS_HOST_CS_SELECTOR 0x00000C02
264 #define VMCS_HOST_SS_SELECTOR 0x00000C04
265 #define VMCS_HOST_DS_SELECTOR 0x00000C06
266 #define VMCS_HOST_FS_SELECTOR 0x00000C08
267 #define VMCS_HOST_GS_SELECTOR 0x00000C0A
268 #define VMCS_HOST_TR_SELECTOR 0x00000C0C
269 /* 64-bit control fields */
270 #define VMCS_IO_BITMAP_A 0x00002000
271 #define VMCS_IO_BITMAP_B 0x00002002
272 #define VMCS_MSR_BITMAP 0x00002004
273 #define VMCS_EXIT_MSR_STORE_ADDRESS 0x00002006
274 #define VMCS_EXIT_MSR_LOAD_ADDRESS 0x00002008
275 #define VMCS_ENTRY_MSR_LOAD_ADDRESS 0x0000200A
276 #define VMCS_EXECUTIVE_VMCS 0x0000200C
277 #define VMCS_PML_ADDRESS 0x0000200E
278 #define VMCS_TSC_OFFSET 0x00002010
279 #define VMCS_VIRTUAL_APIC 0x00002012
280 #define VMCS_APIC_ACCESS 0x00002014
281 #define VMCS_PIR_DESC 0x00002016
282 #define VMCS_VM_CONTROL 0x00002018
283 #define VMCS_EPTP 0x0000201A
284 #define EPTP_TYPE __BITS(2,0)
285 #define EPTP_TYPE_UC 0
286 #define EPTP_TYPE_WB 6
287 #define EPTP_WALKLEN __BITS(5,3)
288 #define EPTP_FLAGS_AD __BIT(6)
289 #define EPTP_SSS __BIT(7)
290 #define EPTP_PHYSADDR __BITS(63,12)
291 #define VMCS_EOI_EXIT0 0x0000201C
292 #define VMCS_EOI_EXIT1 0x0000201E
293 #define VMCS_EOI_EXIT2 0x00002020
294 #define VMCS_EOI_EXIT3 0x00002022
295 #define VMCS_EPTP_LIST 0x00002024
296 #define VMCS_VMREAD_BITMAP 0x00002026
297 #define VMCS_VMWRITE_BITMAP 0x00002028
298 #define VMCS_VIRTUAL_EXCEPTION 0x0000202A
299 #define VMCS_XSS_EXIT_BITMAP 0x0000202C
300 #define VMCS_ENCLS_EXIT_BITMAP 0x0000202E
301 #define VMCS_SUBPAGE_PERM_TABLE_PTR 0x00002030
302 #define VMCS_TSC_MULTIPLIER 0x00002032
303 #define VMCS_ENCLV_EXIT_BITMAP 0x00002036
304 /* 64-bit read-only fields */
305 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
306 /* 64-bit guest-state fields */
307 #define VMCS_LINK_POINTER 0x00002800
308 #define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
309 #define VMCS_GUEST_IA32_PAT 0x00002804
310 #define VMCS_GUEST_IA32_EFER 0x00002806
311 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
312 #define VMCS_GUEST_PDPTE0 0x0000280A
313 #define VMCS_GUEST_PDPTE1 0x0000280C
314 #define VMCS_GUEST_PDPTE2 0x0000280E
315 #define VMCS_GUEST_PDPTE3 0x00002810
316 #define VMCS_GUEST_BNDCFGS 0x00002812
317 #define VMCS_GUEST_RTIT_CTL 0x00002814
318 #define VMCS_GUEST_PKRS 0x00002818
319 /* 64-bit host-state fields */
320 #define VMCS_HOST_IA32_PAT 0x00002C00
321 #define VMCS_HOST_IA32_EFER 0x00002C02
322 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
323 #define VMCS_HOST_IA32_PKRS 0x00002C06
324 /* 32-bit control fields */
325 #define VMCS_PINBASED_CTLS 0x00004000
326 #define PIN_CTLS_INT_EXITING __BIT(0)
327 #define PIN_CTLS_NMI_EXITING __BIT(3)
328 #define PIN_CTLS_VIRTUAL_NMIS __BIT(5)
329 #define PIN_CTLS_ACTIVATE_PREEMPT_TIMER __BIT(6)
330 #define PIN_CTLS_PROCESS_POSTED_INTS __BIT(7)
331 #define VMCS_PROCBASED_CTLS 0x00004002
332 #define PROC_CTLS_INT_WINDOW_EXITING __BIT(2)
333 #define PROC_CTLS_USE_TSC_OFFSETTING __BIT(3)
334 #define PROC_CTLS_HLT_EXITING __BIT(7)
335 #define PROC_CTLS_INVLPG_EXITING __BIT(9)
336 #define PROC_CTLS_MWAIT_EXITING __BIT(10)
337 #define PROC_CTLS_RDPMC_EXITING __BIT(11)
338 #define PROC_CTLS_RDTSC_EXITING __BIT(12)
339 #define PROC_CTLS_RCR3_EXITING __BIT(15)
340 #define PROC_CTLS_LCR3_EXITING __BIT(16)
341 #define PROC_CTLS_RCR8_EXITING __BIT(19)
342 #define PROC_CTLS_LCR8_EXITING __BIT(20)
343 #define PROC_CTLS_USE_TPR_SHADOW __BIT(21)
344 #define PROC_CTLS_NMI_WINDOW_EXITING __BIT(22)
345 #define PROC_CTLS_DR_EXITING __BIT(23)
346 #define PROC_CTLS_UNCOND_IO_EXITING __BIT(24)
347 #define PROC_CTLS_USE_IO_BITMAPS __BIT(25)
348 #define PROC_CTLS_MONITOR_TRAP_FLAG __BIT(27)
349 #define PROC_CTLS_USE_MSR_BITMAPS __BIT(28)
350 #define PROC_CTLS_MONITOR_EXITING __BIT(29)
351 #define PROC_CTLS_PAUSE_EXITING __BIT(30)
352 #define PROC_CTLS_ACTIVATE_CTLS2 __BIT(31)
353 #define VMCS_EXCEPTION_BITMAP 0x00004004
354 #define VMCS_PF_ERROR_MASK 0x00004006
355 #define VMCS_PF_ERROR_MATCH 0x00004008
356 #define VMCS_CR3_TARGET_COUNT 0x0000400A
357 #define VMCS_EXIT_CTLS 0x0000400C
358 #define EXIT_CTLS_SAVE_DEBUG_CONTROLS __BIT(2)
359 #define EXIT_CTLS_HOST_LONG_MODE __BIT(9)
360 #define EXIT_CTLS_LOAD_PERFGLOBALCTRL __BIT(12)
361 #define EXIT_CTLS_ACK_INTERRUPT __BIT(15)
362 #define EXIT_CTLS_SAVE_PAT __BIT(18)
363 #define EXIT_CTLS_LOAD_PAT __BIT(19)
364 #define EXIT_CTLS_SAVE_EFER __BIT(20)
365 #define EXIT_CTLS_LOAD_EFER __BIT(21)
366 #define EXIT_CTLS_SAVE_PREEMPT_TIMER __BIT(22)
367 #define EXIT_CTLS_CLEAR_BNDCFGS __BIT(23)
368 #define EXIT_CTLS_CONCEAL_PT __BIT(24)
369 #define EXIT_CTLS_CLEAR_RTIT_CTL __BIT(25)
370 #define EXIT_CTLS_LOAD_CET __BIT(28)
371 #define EXIT_CTLS_LOAD_PKRS __BIT(29)
372 #define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
373 #define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
374 #define VMCS_ENTRY_CTLS 0x00004012
375 #define ENTRY_CTLS_LOAD_DEBUG_CONTROLS __BIT(2)
376 #define ENTRY_CTLS_LONG_MODE __BIT(9)
377 #define ENTRY_CTLS_SMM __BIT(10)
378 #define ENTRY_CTLS_DISABLE_DUAL __BIT(11)
379 #define ENTRY_CTLS_LOAD_PERFGLOBALCTRL __BIT(13)
380 #define ENTRY_CTLS_LOAD_PAT __BIT(14)
381 #define ENTRY_CTLS_LOAD_EFER __BIT(15)
382 #define ENTRY_CTLS_LOAD_BNDCFGS __BIT(16)
383 #define ENTRY_CTLS_CONCEAL_PT __BIT(17)
384 #define ENTRY_CTLS_LOAD_RTIT_CTL __BIT(18)
385 #define ENTRY_CTLS_LOAD_CET __BIT(20)
386 #define ENTRY_CTLS_LOAD_PKRS __BIT(22)
387 #define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
388 #define VMCS_ENTRY_INTR_INFO 0x00004016
389 #define INTR_INFO_VECTOR __BITS(7,0)
390 #define INTR_INFO_TYPE __BITS(10,8)
391 #define INTR_TYPE_EXT_INT 0
392 #define INTR_TYPE_NMI 2
393 #define INTR_TYPE_HW_EXC 3
394 #define INTR_TYPE_SW_INT 4
395 #define INTR_TYPE_PRIV_SW_EXC 5
396 #define INTR_TYPE_SW_EXC 6
397 #define INTR_TYPE_OTHER 7
398 #define INTR_INFO_ERROR __BIT(11)
399 #define INTR_INFO_VALID __BIT(31)
400 #define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
401 #define VMCS_ENTRY_INSTRUCTION_LENGTH 0x0000401A
402 #define VMCS_TPR_THRESHOLD 0x0000401C
403 #define VMCS_PROCBASED_CTLS2 0x0000401E
404 #define PROC_CTLS2_VIRT_APIC_ACCESSES __BIT(0)
405 #define PROC_CTLS2_ENABLE_EPT __BIT(1)
406 #define PROC_CTLS2_DESC_TABLE_EXITING __BIT(2)
407 #define PROC_CTLS2_ENABLE_RDTSCP __BIT(3)
408 #define PROC_CTLS2_VIRT_X2APIC __BIT(4)
409 #define PROC_CTLS2_ENABLE_VPID __BIT(5)
410 #define PROC_CTLS2_WBINVD_EXITING __BIT(6)
411 #define PROC_CTLS2_UNRESTRICTED_GUEST __BIT(7)
412 #define PROC_CTLS2_APIC_REG_VIRT __BIT(8)
413 #define PROC_CTLS2_VIRT_INT_DELIVERY __BIT(9)
414 #define PROC_CTLS2_PAUSE_LOOP_EXITING __BIT(10)
415 #define PROC_CTLS2_RDRAND_EXITING __BIT(11)
416 #define PROC_CTLS2_INVPCID_ENABLE __BIT(12)
417 #define PROC_CTLS2_VMFUNC_ENABLE __BIT(13)
418 #define PROC_CTLS2_VMCS_SHADOWING __BIT(14)
419 #define PROC_CTLS2_ENCLS_EXITING __BIT(15)
420 #define PROC_CTLS2_RDSEED_EXITING __BIT(16)
421 #define PROC_CTLS2_PML_ENABLE __BIT(17)
422 #define PROC_CTLS2_EPT_VIOLATION __BIT(18)
423 #define PROC_CTLS2_CONCEAL_VMX_FROM_PT __BIT(19)
424 #define PROC_CTLS2_XSAVES_ENABLE __BIT(20)
425 #define PROC_CTLS2_MODE_BASED_EXEC_EPT __BIT(22)
426 #define PROC_CTLS2_SUBPAGE_PERMISSIONS __BIT(23)
427 #define PROC_CTLS2_PT_USES_GPA __BIT(24)
428 #define PROC_CTLS2_USE_TSC_SCALING __BIT(25)
429 #define PROC_CTLS2_WAIT_PAUSE_ENABLE __BIT(26)
430 #define PROC_CTLS2_ENCLV_EXITING __BIT(28)
431 #define VMCS_PLE_GAP 0x00004020
432 #define VMCS_PLE_WINDOW 0x00004022
433 /* 32-bit read-only data fields */
434 #define VMCS_INSTRUCTION_ERROR 0x00004400
435 #define VMCS_EXIT_REASON 0x00004402
436 #define VMCS_EXIT_INTR_INFO 0x00004404
437 #define VMCS_EXIT_INTR_ERRCODE 0x00004406
438 #define VMCS_IDT_VECTORING_INFO 0x00004408
439 #define VMCS_IDT_VECTORING_ERROR 0x0000440A
440 #define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
441 #define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
442 /* 32-bit guest-state fields */
443 #define VMCS_GUEST_ES_LIMIT 0x00004800
444 #define VMCS_GUEST_CS_LIMIT 0x00004802
445 #define VMCS_GUEST_SS_LIMIT 0x00004804
446 #define VMCS_GUEST_DS_LIMIT 0x00004806
447 #define VMCS_GUEST_FS_LIMIT 0x00004808
448 #define VMCS_GUEST_GS_LIMIT 0x0000480A
449 #define VMCS_GUEST_LDTR_LIMIT 0x0000480C
450 #define VMCS_GUEST_TR_LIMIT 0x0000480E
451 #define VMCS_GUEST_GDTR_LIMIT 0x00004810
452 #define VMCS_GUEST_IDTR_LIMIT 0x00004812
453 #define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
454 #define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
455 #define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
456 #define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
457 #define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
458 #define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
459 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
460 #define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
461 #define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
462 #define INT_STATE_STI __BIT(0)
463 #define INT_STATE_MOVSS __BIT(1)
464 #define INT_STATE_SMI __BIT(2)
465 #define INT_STATE_NMI __BIT(3)
466 #define INT_STATE_ENCLAVE __BIT(4)
467 #define VMCS_GUEST_ACTIVITY 0x00004826
468 #define VMCS_GUEST_SMBASE 0x00004828
469 #define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
470 #define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
471 /* 32-bit host state fields */
472 #define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
473 /* Natural-Width control fields */
474 #define VMCS_CR0_MASK 0x00006000
475 #define VMCS_CR4_MASK 0x00006002
476 #define VMCS_CR0_SHADOW 0x00006004
477 #define VMCS_CR4_SHADOW 0x00006006
478 #define VMCS_CR3_TARGET0 0x00006008
479 #define VMCS_CR3_TARGET1 0x0000600A
480 #define VMCS_CR3_TARGET2 0x0000600C
481 #define VMCS_CR3_TARGET3 0x0000600E
482 /* Natural-Width read-only fields */
483 #define VMCS_EXIT_QUALIFICATION 0x00006400
484 #define VMCS_IO_RCX 0x00006402
485 #define VMCS_IO_RSI 0x00006404
486 #define VMCS_IO_RDI 0x00006406
487 #define VMCS_IO_RIP 0x00006408
488 #define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
489 /* Natural-Width guest-state fields */
490 #define VMCS_GUEST_CR0 0x00006800
491 #define VMCS_GUEST_CR3 0x00006802
492 #define VMCS_GUEST_CR4 0x00006804
493 #define VMCS_GUEST_ES_BASE 0x00006806
494 #define VMCS_GUEST_CS_BASE 0x00006808
495 #define VMCS_GUEST_SS_BASE 0x0000680A
496 #define VMCS_GUEST_DS_BASE 0x0000680C
497 #define VMCS_GUEST_FS_BASE 0x0000680E
498 #define VMCS_GUEST_GS_BASE 0x00006810
499 #define VMCS_GUEST_LDTR_BASE 0x00006812
500 #define VMCS_GUEST_TR_BASE 0x00006814
501 #define VMCS_GUEST_GDTR_BASE 0x00006816
502 #define VMCS_GUEST_IDTR_BASE 0x00006818
503 #define VMCS_GUEST_DR7 0x0000681A
504 #define VMCS_GUEST_RSP 0x0000681C
505 #define VMCS_GUEST_RIP 0x0000681E
506 #define VMCS_GUEST_RFLAGS 0x00006820
507 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
508 #define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
509 #define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
510 #define VMCS_GUEST_IA32_S_CET 0x00006828
511 #define VMCS_GUEST_SSP 0x0000682A
512 #define VMCS_GUEST_IA32_INTR_SSP_TABLE 0x0000682C
513 /* Natural-Width host-state fields */
514 #define VMCS_HOST_CR0 0x00006C00
515 #define VMCS_HOST_CR3 0x00006C02
516 #define VMCS_HOST_CR4 0x00006C04
517 #define VMCS_HOST_FS_BASE 0x00006C06
518 #define VMCS_HOST_GS_BASE 0x00006C08
519 #define VMCS_HOST_TR_BASE 0x00006C0A
520 #define VMCS_HOST_GDTR_BASE 0x00006C0C
521 #define VMCS_HOST_IDTR_BASE 0x00006C0E
522 #define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
523 #define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
524 #define VMCS_HOST_RSP 0x00006C14
525 #define VMCS_HOST_RIP 0x00006C16
526 #define VMCS_HOST_IA32_S_CET 0x00006C18
527 #define VMCS_HOST_SSP 0x00006C1A
528 #define VMCS_HOST_IA32_INTR_SSP_TABLE 0x00006C1C
529
530 /* VMX basic exit reasons. */
531 #define VMCS_EXITCODE_EXC_NMI 0
532 #define VMCS_EXITCODE_EXT_INT 1
533 #define VMCS_EXITCODE_SHUTDOWN 2
534 #define VMCS_EXITCODE_INIT 3
535 #define VMCS_EXITCODE_SIPI 4
536 #define VMCS_EXITCODE_SMI 5
537 #define VMCS_EXITCODE_OTHER_SMI 6
538 #define VMCS_EXITCODE_INT_WINDOW 7
539 #define VMCS_EXITCODE_NMI_WINDOW 8
540 #define VMCS_EXITCODE_TASK_SWITCH 9
541 #define VMCS_EXITCODE_CPUID 10
542 #define VMCS_EXITCODE_GETSEC 11
543 #define VMCS_EXITCODE_HLT 12
544 #define VMCS_EXITCODE_INVD 13
545 #define VMCS_EXITCODE_INVLPG 14
546 #define VMCS_EXITCODE_RDPMC 15
547 #define VMCS_EXITCODE_RDTSC 16
548 #define VMCS_EXITCODE_RSM 17
549 #define VMCS_EXITCODE_VMCALL 18
550 #define VMCS_EXITCODE_VMCLEAR 19
551 #define VMCS_EXITCODE_VMLAUNCH 20
552 #define VMCS_EXITCODE_VMPTRLD 21
553 #define VMCS_EXITCODE_VMPTRST 22
554 #define VMCS_EXITCODE_VMREAD 23
555 #define VMCS_EXITCODE_VMRESUME 24
556 #define VMCS_EXITCODE_VMWRITE 25
557 #define VMCS_EXITCODE_VMXOFF 26
558 #define VMCS_EXITCODE_VMXON 27
559 #define VMCS_EXITCODE_CR 28
560 #define VMCS_EXITCODE_DR 29
561 #define VMCS_EXITCODE_IO 30
562 #define VMCS_EXITCODE_RDMSR 31
563 #define VMCS_EXITCODE_WRMSR 32
564 #define VMCS_EXITCODE_FAIL_GUEST_INVALID 33
565 #define VMCS_EXITCODE_FAIL_MSR_INVALID 34
566 #define VMCS_EXITCODE_MWAIT 36
567 #define VMCS_EXITCODE_TRAP_FLAG 37
568 #define VMCS_EXITCODE_MONITOR 39
569 #define VMCS_EXITCODE_PAUSE 40
570 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK 41
571 #define VMCS_EXITCODE_TPR_BELOW 43
572 #define VMCS_EXITCODE_APIC_ACCESS 44
573 #define VMCS_EXITCODE_VEOI 45
574 #define VMCS_EXITCODE_GDTR_IDTR 46
575 #define VMCS_EXITCODE_LDTR_TR 47
576 #define VMCS_EXITCODE_EPT_VIOLATION 48
577 #define VMCS_EXITCODE_EPT_MISCONFIG 49
578 #define VMCS_EXITCODE_INVEPT 50
579 #define VMCS_EXITCODE_RDTSCP 51
580 #define VMCS_EXITCODE_PREEMPT_TIMEOUT 52
581 #define VMCS_EXITCODE_INVVPID 53
582 #define VMCS_EXITCODE_WBINVD 54
583 #define VMCS_EXITCODE_XSETBV 55
584 #define VMCS_EXITCODE_APIC_WRITE 56
585 #define VMCS_EXITCODE_RDRAND 57
586 #define VMCS_EXITCODE_INVPCID 58
587 #define VMCS_EXITCODE_VMFUNC 59
588 #define VMCS_EXITCODE_ENCLS 60
589 #define VMCS_EXITCODE_RDSEED 61
590 #define VMCS_EXITCODE_PAGE_LOG_FULL 62
591 #define VMCS_EXITCODE_XSAVES 63
592 #define VMCS_EXITCODE_XRSTORS 64
593 #define VMCS_EXITCODE_SPP 66
594 #define VMCS_EXITCODE_UMWAIT 67
595 #define VMCS_EXITCODE_TPAUSE 68
596
597 /* -------------------------------------------------------------------------- */
598
599 static void vmx_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
600 static void vmx_vcpu_state_commit(struct nvmm_cpu *);
601
602 #define VMX_MSRLIST_STAR 0
603 #define VMX_MSRLIST_LSTAR 1
604 #define VMX_MSRLIST_CSTAR 2
605 #define VMX_MSRLIST_SFMASK 3
606 #define VMX_MSRLIST_KERNELGSBASE 4
607 #define VMX_MSRLIST_EXIT_NMSR 5
608 #define VMX_MSRLIST_L1DFLUSH 5
609
610 /* On entry, we may do +1 to include L1DFLUSH. */
611 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR;
612
613 struct vmxon {
614 uint32_t ident;
615 #define VMXON_IDENT_REVISION __BITS(30,0)
616
617 uint8_t data[PAGE_SIZE - 4];
618 } __packed;
619
620 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE);
621
622 struct vmxoncpu {
623 vaddr_t va;
624 paddr_t pa;
625 };
626
627 static struct vmxoncpu vmxoncpu[MAXCPUS];
628
629 struct vmcs {
630 uint32_t ident;
631 #define VMCS_IDENT_REVISION __BITS(30,0)
632 #define VMCS_IDENT_SHADOW __BIT(31)
633
634 uint32_t abort;
635 uint8_t data[PAGE_SIZE - 8];
636 } __packed;
637
638 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
639
640 struct msr_entry {
641 uint32_t msr;
642 uint32_t rsvd;
643 uint64_t val;
644 } __packed;
645
646 #define VPID_MAX 0xFFFF
647
648 /* Make sure we never run out of VPIDs. */
649 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS);
650
651 static uint64_t vmx_tlb_flush_op __read_mostly;
652 static uint64_t vmx_ept_flush_op __read_mostly;
653 static uint64_t vmx_eptp_type __read_mostly;
654
655 static uint64_t vmx_pinbased_ctls __read_mostly;
656 static uint64_t vmx_procbased_ctls __read_mostly;
657 static uint64_t vmx_procbased_ctls2 __read_mostly;
658 static uint64_t vmx_entry_ctls __read_mostly;
659 static uint64_t vmx_exit_ctls __read_mostly;
660
661 static uint64_t vmx_cr0_fixed0 __read_mostly;
662 static uint64_t vmx_cr0_fixed1 __read_mostly;
663 static uint64_t vmx_cr4_fixed0 __read_mostly;
664 static uint64_t vmx_cr4_fixed1 __read_mostly;
665
666 extern bool pmap_ept_has_ad;
667
668 #define VMX_PINBASED_CTLS_ONE \
669 (PIN_CTLS_INT_EXITING| \
670 PIN_CTLS_NMI_EXITING| \
671 PIN_CTLS_VIRTUAL_NMIS)
672
673 #define VMX_PINBASED_CTLS_ZERO 0
674
675 #define VMX_PROCBASED_CTLS_ONE \
676 (PROC_CTLS_USE_TSC_OFFSETTING| \
677 PROC_CTLS_HLT_EXITING| \
678 PROC_CTLS_MWAIT_EXITING | \
679 PROC_CTLS_RDPMC_EXITING | \
680 PROC_CTLS_RCR8_EXITING | \
681 PROC_CTLS_LCR8_EXITING | \
682 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \
683 PROC_CTLS_USE_MSR_BITMAPS | \
684 PROC_CTLS_MONITOR_EXITING | \
685 PROC_CTLS_ACTIVATE_CTLS2)
686
687 #define VMX_PROCBASED_CTLS_ZERO \
688 (PROC_CTLS_RCR3_EXITING| \
689 PROC_CTLS_LCR3_EXITING)
690
691 #define VMX_PROCBASED_CTLS2_ONE \
692 (PROC_CTLS2_ENABLE_EPT| \
693 PROC_CTLS2_ENABLE_VPID| \
694 PROC_CTLS2_UNRESTRICTED_GUEST)
695
696 #define VMX_PROCBASED_CTLS2_ZERO 0
697
698 #define VMX_ENTRY_CTLS_ONE \
699 (ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \
700 ENTRY_CTLS_LOAD_EFER| \
701 ENTRY_CTLS_LOAD_PAT)
702
703 #define VMX_ENTRY_CTLS_ZERO \
704 (ENTRY_CTLS_SMM| \
705 ENTRY_CTLS_DISABLE_DUAL)
706
707 #define VMX_EXIT_CTLS_ONE \
708 (EXIT_CTLS_SAVE_DEBUG_CONTROLS| \
709 EXIT_CTLS_HOST_LONG_MODE| \
710 EXIT_CTLS_SAVE_PAT| \
711 EXIT_CTLS_LOAD_PAT| \
712 EXIT_CTLS_SAVE_EFER| \
713 EXIT_CTLS_LOAD_EFER)
714
715 #define VMX_EXIT_CTLS_ZERO 0
716
717 static uint8_t *vmx_asidmap __read_mostly;
718 static uint32_t vmx_maxasid __read_mostly;
719 static kmutex_t vmx_asidlock __cacheline_aligned;
720
721 #define VMX_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE)
722 static uint64_t vmx_xcr0_mask __read_mostly;
723
724 #define VMX_NCPUIDS 32
725
726 #define VMCS_NPAGES 1
727 #define VMCS_SIZE (VMCS_NPAGES * PAGE_SIZE)
728
729 #define MSRBM_NPAGES 1
730 #define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE)
731
732 #define EFER_TLB_FLUSH \
733 (EFER_NXE|EFER_LMA|EFER_LME)
734 #define CR0_TLB_FLUSH \
735 (CR0_PG|CR0_WP|CR0_CD|CR0_NW)
736 #define CR4_TLB_FLUSH \
737 (CR4_PSE|CR4_PAE|CR4_PGE|CR4_PCIDE|CR4_SMEP)
738
739 /* -------------------------------------------------------------------------- */
740
741 struct vmx_machdata {
742 volatile uint64_t mach_htlb_gen;
743 };
744
745 static const size_t vmx_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
746 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
747 sizeof(struct nvmm_vcpu_conf_cpuid),
748 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] =
749 sizeof(struct nvmm_vcpu_conf_tpr)
750 };
751
752 struct vmx_cpudata {
753 /* General */
754 uint64_t asid;
755 bool gtlb_want_flush;
756 bool gtsc_want_update;
757 uint64_t vcpu_htlb_gen;
758 kcpuset_t *htlb_want_flush;
759
760 /* VMCS */
761 struct vmcs *vmcs;
762 paddr_t vmcs_pa;
763 size_t vmcs_refcnt;
764 struct cpu_info *vmcs_ci;
765 bool vmcs_launched;
766
767 /* MSR bitmap */
768 uint8_t *msrbm;
769 paddr_t msrbm_pa;
770
771 /* Host state */
772 uint64_t hxcr0;
773 uint64_t star;
774 uint64_t lstar;
775 uint64_t cstar;
776 uint64_t sfmask;
777 uint64_t kernelgsbase;
778
779 /* Intr state */
780 bool int_window_exit;
781 bool nmi_window_exit;
782 bool evt_pending;
783
784 /* Guest state */
785 struct msr_entry *gmsr;
786 paddr_t gmsr_pa;
787 uint64_t gmsr_misc_enable;
788 uint64_t gcr2;
789 uint64_t gcr8;
790 uint64_t gxcr0;
791 uint64_t gprs[NVMM_X64_NGPR];
792 uint64_t drs[NVMM_X64_NDR];
793 uint64_t gtsc;
794 struct xsave_header gfpu __aligned(64);
795
796 /* VCPU configuration. */
797 bool cpuidpresent[VMX_NCPUIDS];
798 struct nvmm_vcpu_conf_cpuid cpuid[VMX_NCPUIDS];
799 struct nvmm_vcpu_conf_tpr tpr;
800 };
801
802 static const struct {
803 uint64_t selector;
804 uint64_t attrib;
805 uint64_t limit;
806 uint64_t base;
807 } vmx_guest_segs[NVMM_X64_NSEG] = {
808 [NVMM_X64_SEG_ES] = {
809 VMCS_GUEST_ES_SELECTOR,
810 VMCS_GUEST_ES_ACCESS_RIGHTS,
811 VMCS_GUEST_ES_LIMIT,
812 VMCS_GUEST_ES_BASE
813 },
814 [NVMM_X64_SEG_CS] = {
815 VMCS_GUEST_CS_SELECTOR,
816 VMCS_GUEST_CS_ACCESS_RIGHTS,
817 VMCS_GUEST_CS_LIMIT,
818 VMCS_GUEST_CS_BASE
819 },
820 [NVMM_X64_SEG_SS] = {
821 VMCS_GUEST_SS_SELECTOR,
822 VMCS_GUEST_SS_ACCESS_RIGHTS,
823 VMCS_GUEST_SS_LIMIT,
824 VMCS_GUEST_SS_BASE
825 },
826 [NVMM_X64_SEG_DS] = {
827 VMCS_GUEST_DS_SELECTOR,
828 VMCS_GUEST_DS_ACCESS_RIGHTS,
829 VMCS_GUEST_DS_LIMIT,
830 VMCS_GUEST_DS_BASE
831 },
832 [NVMM_X64_SEG_FS] = {
833 VMCS_GUEST_FS_SELECTOR,
834 VMCS_GUEST_FS_ACCESS_RIGHTS,
835 VMCS_GUEST_FS_LIMIT,
836 VMCS_GUEST_FS_BASE
837 },
838 [NVMM_X64_SEG_GS] = {
839 VMCS_GUEST_GS_SELECTOR,
840 VMCS_GUEST_GS_ACCESS_RIGHTS,
841 VMCS_GUEST_GS_LIMIT,
842 VMCS_GUEST_GS_BASE
843 },
844 [NVMM_X64_SEG_GDT] = {
845 0, /* doesn't exist */
846 0, /* doesn't exist */
847 VMCS_GUEST_GDTR_LIMIT,
848 VMCS_GUEST_GDTR_BASE
849 },
850 [NVMM_X64_SEG_IDT] = {
851 0, /* doesn't exist */
852 0, /* doesn't exist */
853 VMCS_GUEST_IDTR_LIMIT,
854 VMCS_GUEST_IDTR_BASE
855 },
856 [NVMM_X64_SEG_LDT] = {
857 VMCS_GUEST_LDTR_SELECTOR,
858 VMCS_GUEST_LDTR_ACCESS_RIGHTS,
859 VMCS_GUEST_LDTR_LIMIT,
860 VMCS_GUEST_LDTR_BASE
861 },
862 [NVMM_X64_SEG_TR] = {
863 VMCS_GUEST_TR_SELECTOR,
864 VMCS_GUEST_TR_ACCESS_RIGHTS,
865 VMCS_GUEST_TR_LIMIT,
866 VMCS_GUEST_TR_BASE
867 }
868 };
869
870 /* -------------------------------------------------------------------------- */
871
872 static uint64_t
873 vmx_get_revision(void)
874 {
875 uint64_t msr;
876
877 msr = rdmsr(MSR_IA32_VMX_BASIC);
878 msr &= IA32_VMX_BASIC_IDENT;
879
880 return msr;
881 }
882
883 static void
884 vmx_vmclear_ipi(void *arg1, void *arg2)
885 {
886 paddr_t vmcs_pa = (paddr_t)arg1;
887 vmx_vmclear(&vmcs_pa);
888 }
889
890 static void
891 vmx_vmclear_remote(struct cpu_info *ci, paddr_t vmcs_pa)
892 {
893 uint64_t xc;
894 int bound;
895
896 KASSERT(kpreempt_disabled());
897
898 bound = curlwp_bind();
899 kpreempt_enable();
900
901 xc = xc_unicast(XC_HIGHPRI, vmx_vmclear_ipi, (void *)vmcs_pa, NULL, ci);
902 xc_wait(xc);
903
904 kpreempt_disable();
905 curlwp_bindx(bound);
906 }
907
908 static void
909 vmx_vmcs_enter(struct nvmm_cpu *vcpu)
910 {
911 struct vmx_cpudata *cpudata = vcpu->cpudata;
912 struct cpu_info *vmcs_ci;
913
914 cpudata->vmcs_refcnt++;
915 if (cpudata->vmcs_refcnt > 1) {
916 KASSERT(kpreempt_disabled());
917 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
918 return;
919 }
920
921 vmcs_ci = cpudata->vmcs_ci;
922 cpudata->vmcs_ci = (void *)0x00FFFFFFFFFFFFFF; /* clobber */
923
924 kpreempt_disable();
925
926 if (vmcs_ci == NULL) {
927 /* This VMCS is loaded for the first time. */
928 vmx_vmclear(&cpudata->vmcs_pa);
929 cpudata->vmcs_launched = false;
930 } else if (vmcs_ci != curcpu()) {
931 /* This VMCS is active on a remote CPU. */
932 vmx_vmclear_remote(vmcs_ci, cpudata->vmcs_pa);
933 cpudata->vmcs_launched = false;
934 } else {
935 /* This VMCS is active on curcpu, nothing to do. */
936 }
937
938 vmx_vmptrld(&cpudata->vmcs_pa);
939 }
940
941 static void
942 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
943 {
944 struct vmx_cpudata *cpudata = vcpu->cpudata;
945
946 KASSERT(kpreempt_disabled());
947 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
948 KASSERT(cpudata->vmcs_refcnt > 0);
949 cpudata->vmcs_refcnt--;
950
951 if (cpudata->vmcs_refcnt > 0) {
952 return;
953 }
954
955 cpudata->vmcs_ci = curcpu();
956 kpreempt_enable();
957 }
958
959 static void
960 vmx_vmcs_destroy(struct nvmm_cpu *vcpu)
961 {
962 struct vmx_cpudata *cpudata = vcpu->cpudata;
963
964 KASSERT(kpreempt_disabled());
965 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
966 KASSERT(cpudata->vmcs_refcnt == 1);
967 cpudata->vmcs_refcnt--;
968
969 vmx_vmclear(&cpudata->vmcs_pa);
970 kpreempt_enable();
971 }
972
973 /* -------------------------------------------------------------------------- */
974
975 static void
976 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
977 {
978 struct vmx_cpudata *cpudata = vcpu->cpudata;
979 uint64_t ctls1;
980
981 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
982
983 if (nmi) {
984 // XXX INT_STATE_NMI?
985 ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING;
986 cpudata->nmi_window_exit = true;
987 } else {
988 ctls1 |= PROC_CTLS_INT_WINDOW_EXITING;
989 cpudata->int_window_exit = true;
990 }
991
992 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
993 }
994
995 static void
996 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
997 {
998 struct vmx_cpudata *cpudata = vcpu->cpudata;
999 uint64_t ctls1;
1000
1001 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
1002
1003 if (nmi) {
1004 ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
1005 cpudata->nmi_window_exit = false;
1006 } else {
1007 ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING;
1008 cpudata->int_window_exit = false;
1009 }
1010
1011 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
1012 }
1013
1014 static inline int
1015 vmx_event_has_error(uint8_t vector)
1016 {
1017 switch (vector) {
1018 case 8: /* #DF */
1019 case 10: /* #TS */
1020 case 11: /* #NP */
1021 case 12: /* #SS */
1022 case 13: /* #GP */
1023 case 14: /* #PF */
1024 case 17: /* #AC */
1025 case 30: /* #SX */
1026 return 1;
1027 default:
1028 return 0;
1029 }
1030 }
1031
1032 static int
1033 vmx_vcpu_inject(struct nvmm_cpu *vcpu)
1034 {
1035 struct nvmm_comm_page *comm = vcpu->comm;
1036 struct vmx_cpudata *cpudata = vcpu->cpudata;
1037 int type = 0, err = 0, ret = EINVAL;
1038 u_int evtype;
1039 uint8_t vector;
1040 uint64_t info, error;
1041
1042 evtype = comm->event.type;
1043 vector = comm->event.vector;
1044 error = comm->event.u.excp.error;
1045 __insn_barrier();
1046
1047 vmx_vmcs_enter(vcpu);
1048
1049 switch (evtype) {
1050 case NVMM_VCPU_EVENT_EXCP:
1051 if (vector == 2 || vector >= 32)
1052 goto out;
1053 if (vector == 3 || vector == 0)
1054 goto out;
1055 type = INTR_TYPE_HW_EXC;
1056 err = vmx_event_has_error(vector);
1057 break;
1058 case NVMM_VCPU_EVENT_INTR:
1059 type = INTR_TYPE_EXT_INT;
1060 if (vector == 2) {
1061 type = INTR_TYPE_NMI;
1062 vmx_event_waitexit_enable(vcpu, true);
1063 }
1064 err = 0;
1065 break;
1066 default:
1067 goto out;
1068 }
1069
1070 info =
1071 __SHIFTIN((uint64_t)vector, INTR_INFO_VECTOR) |
1072 __SHIFTIN((uint64_t)type, INTR_INFO_TYPE) |
1073 __SHIFTIN((uint64_t)err, INTR_INFO_ERROR) |
1074 __SHIFTIN((uint64_t)1, INTR_INFO_VALID);
1075 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
1076 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, error);
1077
1078 cpudata->evt_pending = true;
1079 ret = 0;
1080
1081 out:
1082 vmx_vmcs_leave(vcpu);
1083 return ret;
1084 }
1085
1086 static void
1087 vmx_inject_ud(struct nvmm_cpu *vcpu)
1088 {
1089 struct nvmm_comm_page *comm = vcpu->comm;
1090 int ret __diagused;
1091
1092 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1093 comm->event.vector = 6;
1094 comm->event.u.excp.error = 0;
1095
1096 ret = vmx_vcpu_inject(vcpu);
1097 KASSERT(ret == 0);
1098 }
1099
1100 static void
1101 vmx_inject_gp(struct nvmm_cpu *vcpu)
1102 {
1103 struct nvmm_comm_page *comm = vcpu->comm;
1104 int ret __diagused;
1105
1106 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1107 comm->event.vector = 13;
1108 comm->event.u.excp.error = 0;
1109
1110 ret = vmx_vcpu_inject(vcpu);
1111 KASSERT(ret == 0);
1112 }
1113
1114 static inline int
1115 vmx_vcpu_event_commit(struct nvmm_cpu *vcpu)
1116 {
1117 if (__predict_true(!vcpu->comm->event_commit)) {
1118 return 0;
1119 }
1120 vcpu->comm->event_commit = false;
1121 return vmx_vcpu_inject(vcpu);
1122 }
1123
1124 static inline void
1125 vmx_inkernel_advance(void)
1126 {
1127 uint64_t rip, inslen, intstate;
1128
1129 /*
1130 * Maybe we should also apply single-stepping and debug exceptions.
1131 * Matters for guest-ring3, because it can execute 'cpuid' under a
1132 * debugger.
1133 */
1134 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1135 rip = vmx_vmread(VMCS_GUEST_RIP);
1136 vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
1137 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
1138 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
1139 intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
1140 }
1141
1142 static void
1143 vmx_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
1144 {
1145 exit->u.inv.hwcode = code;
1146 exit->reason = NVMM_VCPU_EXIT_INVALID;
1147 }
1148
1149 static void
1150 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1151 struct nvmm_vcpu_exit *exit)
1152 {
1153 uint64_t qual;
1154
1155 qual = vmx_vmread(VMCS_EXIT_INTR_INFO);
1156
1157 if ((qual & INTR_INFO_VALID) == 0) {
1158 goto error;
1159 }
1160 if (__SHIFTOUT(qual, INTR_INFO_TYPE) != INTR_TYPE_NMI) {
1161 goto error;
1162 }
1163
1164 exit->reason = NVMM_VCPU_EXIT_NONE;
1165 return;
1166
1167 error:
1168 vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI);
1169 }
1170
1171 #define VMX_CPUID_MAX_BASIC 0x16
1172 #define VMX_CPUID_MAX_HYPERVISOR 0x40000000
1173 #define VMX_CPUID_MAX_EXTENDED 0x80000008
1174 static uint32_t vmx_cpuid_max_basic __read_mostly;
1175 static uint32_t vmx_cpuid_max_extended __read_mostly;
1176
1177 static void
1178 vmx_inkernel_exec_cpuid(struct vmx_cpudata *cpudata, uint64_t eax, uint64_t ecx)
1179 {
1180 u_int descs[4];
1181
1182 x86_cpuid2(eax, ecx, descs);
1183 cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0];
1184 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
1185 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
1186 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
1187 }
1188
1189 static void
1190 vmx_inkernel_handle_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1191 uint64_t eax, uint64_t ecx)
1192 {
1193 struct vmx_cpudata *cpudata = vcpu->cpudata;
1194 unsigned int ncpus;
1195 uint64_t cr4;
1196
1197 if (eax < 0x40000000) {
1198 if (__predict_false(eax > vmx_cpuid_max_basic)) {
1199 eax = vmx_cpuid_max_basic;
1200 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1201 }
1202 } else if (eax < 0x80000000) {
1203 if (__predict_false(eax > VMX_CPUID_MAX_HYPERVISOR)) {
1204 eax = vmx_cpuid_max_basic;
1205 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1206 }
1207 } else {
1208 if (__predict_false(eax > vmx_cpuid_max_extended)) {
1209 eax = vmx_cpuid_max_basic;
1210 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1211 }
1212 }
1213
1214 switch (eax) {
1215 case 0x00000000:
1216 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_basic;
1217 break;
1218 case 0x00000001:
1219 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax;
1220
1221 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
1222 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
1223 CPUID_LOCAL_APIC_ID);
1224
1225 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
1226 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
1227 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1228 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_PCID;
1229 }
1230
1231 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
1232
1233 /* CPUID2_OSXSAVE depends on CR4. */
1234 cr4 = vmx_vmread(VMCS_GUEST_CR4);
1235 if (!(cr4 & CR4_OSXSAVE)) {
1236 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
1237 }
1238 break;
1239 case 0x00000002:
1240 break;
1241 case 0x00000003:
1242 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1243 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1244 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1245 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1246 break;
1247 case 0x00000004: /* Deterministic Cache Parameters */
1248 break; /* TODO? */
1249 case 0x00000005: /* MONITOR/MWAIT */
1250 case 0x00000006: /* Thermal and Power Management */
1251 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1252 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1253 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1254 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1255 break;
1256 case 0x00000007: /* Structured Extended Feature Flags Enumeration */
1257 switch (ecx) {
1258 case 0:
1259 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1260 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
1261 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
1262 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
1263 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1264 cpudata->gprs[NVMM_X64_GPR_RBX] |= CPUID_SEF_INVPCID;
1265 }
1266 break;
1267 default:
1268 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1269 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1270 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1271 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1272 break;
1273 }
1274 break;
1275 case 0x00000008: /* Empty */
1276 case 0x00000009: /* Direct Cache Access Information */
1277 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1278 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1279 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1280 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1281 break;
1282 case 0x0000000A: /* Architectural Performance Monitoring */
1283 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1284 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1285 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1286 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1287 break;
1288 case 0x0000000B: /* Extended Topology Enumeration */
1289 switch (ecx) {
1290 case 0: /* Threads */
1291 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1292 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1293 cpudata->gprs[NVMM_X64_GPR_RCX] =
1294 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
1295 __SHIFTIN(CPUID_TOP_LVLTYPE_SMT, CPUID_TOP_LVLTYPE);
1296 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1297 break;
1298 case 1: /* Cores */
1299 ncpus = atomic_load_relaxed(&mach->ncpus);
1300 cpudata->gprs[NVMM_X64_GPR_RAX] = ilog2(ncpus);
1301 cpudata->gprs[NVMM_X64_GPR_RBX] = ncpus;
1302 cpudata->gprs[NVMM_X64_GPR_RCX] =
1303 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
1304 __SHIFTIN(CPUID_TOP_LVLTYPE_CORE, CPUID_TOP_LVLTYPE);
1305 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1306 break;
1307 default:
1308 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1309 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1310 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; /* LVLTYPE_INVAL */
1311 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1312 break;
1313 }
1314 break;
1315 case 0x0000000C: /* Empty */
1316 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1317 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1318 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1319 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1320 break;
1321 case 0x0000000D: /* Processor Extended State Enumeration */
1322 if (vmx_xcr0_mask == 0) {
1323 break;
1324 }
1325 switch (ecx) {
1326 case 0:
1327 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF;
1328 if (cpudata->gxcr0 & XCR0_SSE) {
1329 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
1330 } else {
1331 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
1332 }
1333 cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
1334 cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64;
1335 cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32;
1336 break;
1337 case 1:
1338 cpudata->gprs[NVMM_X64_GPR_RAX] &=
1339 (CPUID_PES1_XSAVEOPT | CPUID_PES1_XSAVEC |
1340 CPUID_PES1_XGETBV);
1341 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1342 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1343 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1344 break;
1345 default:
1346 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1347 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1348 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1349 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1350 break;
1351 }
1352 break;
1353 case 0x0000000E: /* Empty */
1354 case 0x0000000F: /* Intel RDT Monitoring Enumeration */
1355 case 0x00000010: /* Intel RDT Allocation Enumeration */
1356 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1357 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1358 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1359 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1360 break;
1361 case 0x00000011: /* Empty */
1362 case 0x00000012: /* Intel SGX Capability Enumeration */
1363 case 0x00000013: /* Empty */
1364 case 0x00000014: /* Intel Processor Trace Enumeration */
1365 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1366 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1367 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1368 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1369 break;
1370 case 0x00000015: /* TSC and Nominal Core Crystal Clock Information */
1371 case 0x00000016: /* Processor Frequency Information */
1372 break;
1373
1374 case 0x40000000: /* Hypervisor Information */
1375 cpudata->gprs[NVMM_X64_GPR_RAX] = VMX_CPUID_MAX_HYPERVISOR;
1376 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1377 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1378 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1379 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
1380 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
1381 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
1382 break;
1383
1384 case 0x80000000:
1385 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_extended;
1386 break;
1387 case 0x80000001:
1388 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax;
1389 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
1390 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
1391 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
1392 break;
1393 case 0x80000002: /* Processor Brand String */
1394 case 0x80000003: /* Processor Brand String */
1395 case 0x80000004: /* Processor Brand String */
1396 case 0x80000005: /* Reserved Zero */
1397 case 0x80000006: /* Cache Information */
1398 break;
1399 case 0x80000007: /* TSC Information */
1400 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000007.eax;
1401 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000007.ebx;
1402 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000007.ecx;
1403 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000007.edx;
1404 break;
1405 case 0x80000008: /* Address Sizes */
1406 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000008.eax;
1407 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000008.ebx;
1408 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000008.ecx;
1409 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000008.edx;
1410 break;
1411
1412 default:
1413 break;
1414 }
1415 }
1416
1417 static void
1418 vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason)
1419 {
1420 uint64_t inslen, rip;
1421
1422 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1423 rip = vmx_vmread(VMCS_GUEST_RIP);
1424 exit->u.insn.npc = rip + inslen;
1425 exit->reason = reason;
1426 }
1427
1428 static void
1429 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1430 struct nvmm_vcpu_exit *exit)
1431 {
1432 struct vmx_cpudata *cpudata = vcpu->cpudata;
1433 struct nvmm_vcpu_conf_cpuid *cpuid;
1434 uint64_t eax, ecx;
1435 size_t i;
1436
1437 eax = cpudata->gprs[NVMM_X64_GPR_RAX];
1438 ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
1439 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1440 vmx_inkernel_handle_cpuid(mach, vcpu, eax, ecx);
1441
1442 for (i = 0; i < VMX_NCPUIDS; i++) {
1443 if (!cpudata->cpuidpresent[i]) {
1444 continue;
1445 }
1446 cpuid = &cpudata->cpuid[i];
1447 if (cpuid->leaf != eax) {
1448 continue;
1449 }
1450
1451 if (cpuid->exit) {
1452 vmx_exit_insn(exit, NVMM_VCPU_EXIT_CPUID);
1453 return;
1454 }
1455 KASSERT(cpuid->mask);
1456
1457 /* del */
1458 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->u.mask.del.eax;
1459 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
1460 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
1461 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
1462
1463 /* set */
1464 cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->u.mask.set.eax;
1465 cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
1466 cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
1467 cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
1468
1469 break;
1470 }
1471
1472 vmx_inkernel_advance();
1473 exit->reason = NVMM_VCPU_EXIT_NONE;
1474 }
1475
1476 static void
1477 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1478 struct nvmm_vcpu_exit *exit)
1479 {
1480 struct vmx_cpudata *cpudata = vcpu->cpudata;
1481 uint64_t rflags;
1482
1483 if (cpudata->int_window_exit) {
1484 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1485 if (rflags & PSL_I) {
1486 vmx_event_waitexit_disable(vcpu, false);
1487 }
1488 }
1489
1490 vmx_inkernel_advance();
1491 exit->reason = NVMM_VCPU_EXIT_HALTED;
1492 }
1493
1494 #define VMX_QUAL_CR_NUM __BITS(3,0)
1495 #define VMX_QUAL_CR_TYPE __BITS(5,4)
1496 #define CR_TYPE_WRITE 0
1497 #define CR_TYPE_READ 1
1498 #define CR_TYPE_CLTS 2
1499 #define CR_TYPE_LMSW 3
1500 #define VMX_QUAL_CR_LMSW_OPMEM __BIT(6)
1501 #define VMX_QUAL_CR_GPR __BITS(11,8)
1502 #define VMX_QUAL_CR_LMSW_SRC __BIT(31,16)
1503
1504 static inline int
1505 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1)
1506 {
1507 /* Bits set to 1 in fixed0 are fixed to 1. */
1508 if ((crval & fixed0) != fixed0) {
1509 return -1;
1510 }
1511 /* Bits set to 0 in fixed1 are fixed to 0. */
1512 if (crval & ~fixed1) {
1513 return -1;
1514 }
1515 return 0;
1516 }
1517
1518 static int
1519 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1520 uint64_t qual)
1521 {
1522 struct vmx_cpudata *cpudata = vcpu->cpudata;
1523 uint64_t type, gpr, cr0;
1524 uint64_t efer, ctls1;
1525
1526 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1527 if (type != CR_TYPE_WRITE) {
1528 return -1;
1529 }
1530
1531 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1532 KASSERT(gpr < 16);
1533
1534 if (gpr == NVMM_X64_GPR_RSP) {
1535 gpr = vmx_vmread(VMCS_GUEST_RSP);
1536 } else {
1537 gpr = cpudata->gprs[gpr];
1538 }
1539
1540 cr0 = gpr | CR0_NE | CR0_ET;
1541 cr0 &= ~(CR0_NW|CR0_CD);
1542
1543 if (vmx_check_cr(cr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) {
1544 return -1;
1545 }
1546
1547 /*
1548 * XXX Handle 32bit PAE paging, need to set PDPTEs, fetched manually
1549 * from CR3.
1550 */
1551
1552 if (cr0 & CR0_PG) {
1553 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
1554 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
1555 if (efer & EFER_LME) {
1556 ctls1 |= ENTRY_CTLS_LONG_MODE;
1557 efer |= EFER_LMA;
1558 } else {
1559 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
1560 efer &= ~EFER_LMA;
1561 }
1562 vmx_vmwrite(VMCS_GUEST_IA32_EFER, efer);
1563 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
1564 }
1565
1566 vmx_vmwrite(VMCS_GUEST_CR0, cr0);
1567 vmx_inkernel_advance();
1568 return 0;
1569 }
1570
1571 static int
1572 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1573 uint64_t qual)
1574 {
1575 struct vmx_cpudata *cpudata = vcpu->cpudata;
1576 uint64_t type, gpr, cr4;
1577
1578 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1579 if (type != CR_TYPE_WRITE) {
1580 return -1;
1581 }
1582
1583 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1584 KASSERT(gpr < 16);
1585
1586 if (gpr == NVMM_X64_GPR_RSP) {
1587 gpr = vmx_vmread(VMCS_GUEST_RSP);
1588 } else {
1589 gpr = cpudata->gprs[gpr];
1590 }
1591
1592 cr4 = gpr | CR4_VMXE;
1593
1594 if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) {
1595 return -1;
1596 }
1597
1598 vmx_vmwrite(VMCS_GUEST_CR4, cr4);
1599 vmx_inkernel_advance();
1600 return 0;
1601 }
1602
1603 static int
1604 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1605 uint64_t qual, struct nvmm_vcpu_exit *exit)
1606 {
1607 struct vmx_cpudata *cpudata = vcpu->cpudata;
1608 uint64_t type, gpr;
1609 bool write;
1610
1611 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1612 if (type == CR_TYPE_WRITE) {
1613 write = true;
1614 } else if (type == CR_TYPE_READ) {
1615 write = false;
1616 } else {
1617 return -1;
1618 }
1619
1620 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1621 KASSERT(gpr < 16);
1622
1623 if (write) {
1624 if (gpr == NVMM_X64_GPR_RSP) {
1625 cpudata->gcr8 = vmx_vmread(VMCS_GUEST_RSP);
1626 } else {
1627 cpudata->gcr8 = cpudata->gprs[gpr];
1628 }
1629 if (cpudata->tpr.exit_changed) {
1630 exit->reason = NVMM_VCPU_EXIT_TPR_CHANGED;
1631 }
1632 } else {
1633 if (gpr == NVMM_X64_GPR_RSP) {
1634 vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8);
1635 } else {
1636 cpudata->gprs[gpr] = cpudata->gcr8;
1637 }
1638 }
1639
1640 vmx_inkernel_advance();
1641 return 0;
1642 }
1643
1644 static void
1645 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1646 struct nvmm_vcpu_exit *exit)
1647 {
1648 uint64_t qual;
1649 int ret;
1650
1651 exit->reason = NVMM_VCPU_EXIT_NONE;
1652
1653 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1654
1655 switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) {
1656 case 0:
1657 ret = vmx_inkernel_handle_cr0(mach, vcpu, qual);
1658 break;
1659 case 4:
1660 ret = vmx_inkernel_handle_cr4(mach, vcpu, qual);
1661 break;
1662 case 8:
1663 ret = vmx_inkernel_handle_cr8(mach, vcpu, qual, exit);
1664 break;
1665 default:
1666 ret = -1;
1667 break;
1668 }
1669
1670 if (ret == -1) {
1671 vmx_inject_gp(vcpu);
1672 }
1673 }
1674
1675 #define VMX_QUAL_IO_SIZE __BITS(2,0)
1676 #define IO_SIZE_8 0
1677 #define IO_SIZE_16 1
1678 #define IO_SIZE_32 3
1679 #define VMX_QUAL_IO_IN __BIT(3)
1680 #define VMX_QUAL_IO_STR __BIT(4)
1681 #define VMX_QUAL_IO_REP __BIT(5)
1682 #define VMX_QUAL_IO_DX __BIT(6)
1683 #define VMX_QUAL_IO_PORT __BITS(31,16)
1684
1685 #define VMX_INFO_IO_ADRSIZE __BITS(9,7)
1686 #define IO_ADRSIZE_16 0
1687 #define IO_ADRSIZE_32 1
1688 #define IO_ADRSIZE_64 2
1689 #define VMX_INFO_IO_SEG __BITS(17,15)
1690
1691 static void
1692 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1693 struct nvmm_vcpu_exit *exit)
1694 {
1695 uint64_t qual, info, inslen, rip;
1696
1697 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1698 info = vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO);
1699
1700 exit->reason = NVMM_VCPU_EXIT_IO;
1701
1702 exit->u.io.in = (qual & VMX_QUAL_IO_IN) != 0;
1703 exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
1704
1705 KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
1706 exit->u.io.seg = __SHIFTOUT(info, VMX_INFO_IO_SEG);
1707
1708 if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) {
1709 exit->u.io.address_size = 8;
1710 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) {
1711 exit->u.io.address_size = 4;
1712 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) {
1713 exit->u.io.address_size = 2;
1714 }
1715
1716 if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) {
1717 exit->u.io.operand_size = 4;
1718 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) {
1719 exit->u.io.operand_size = 2;
1720 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) {
1721 exit->u.io.operand_size = 1;
1722 }
1723
1724 exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
1725 exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
1726
1727 if (exit->u.io.in && exit->u.io.str) {
1728 exit->u.io.seg = NVMM_X64_SEG_ES;
1729 }
1730
1731 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1732 rip = vmx_vmread(VMCS_GUEST_RIP);
1733 exit->u.io.npc = rip + inslen;
1734
1735 vmx_vcpu_state_provide(vcpu,
1736 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
1737 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
1738 }
1739
1740 static const uint64_t msr_ignore_list[] = {
1741 MSR_BIOS_SIGN,
1742 MSR_IA32_PLATFORM_ID
1743 };
1744
1745 static bool
1746 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1747 struct nvmm_vcpu_exit *exit)
1748 {
1749 struct vmx_cpudata *cpudata = vcpu->cpudata;
1750 uint64_t val;
1751 size_t i;
1752
1753 if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
1754 if (exit->u.rdmsr.msr == MSR_CR_PAT) {
1755 val = vmx_vmread(VMCS_GUEST_IA32_PAT);
1756 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1757 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1758 goto handled;
1759 }
1760 if (exit->u.rdmsr.msr == MSR_MISC_ENABLE) {
1761 val = cpudata->gmsr_misc_enable;
1762 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1763 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1764 goto handled;
1765 }
1766 if (exit->u.rdmsr.msr == MSR_IA32_ARCH_CAPABILITIES) {
1767 u_int descs[4];
1768 if (cpuid_level < 7) {
1769 goto error;
1770 }
1771 x86_cpuid(7, descs);
1772 if (!(descs[3] & CPUID_SEF_ARCH_CAP)) {
1773 goto error;
1774 }
1775 val = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
1776 val &= (IA32_ARCH_RDCL_NO |
1777 IA32_ARCH_SSB_NO |
1778 IA32_ARCH_MDS_NO |
1779 IA32_ARCH_TAA_NO);
1780 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1781 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1782 goto handled;
1783 }
1784 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1785 if (msr_ignore_list[i] != exit->u.rdmsr.msr)
1786 continue;
1787 val = 0;
1788 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1789 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1790 goto handled;
1791 }
1792 } else {
1793 if (exit->u.wrmsr.msr == MSR_TSC) {
1794 cpudata->gtsc = exit->u.wrmsr.val;
1795 cpudata->gtsc_want_update = true;
1796 goto handled;
1797 }
1798 if (exit->u.wrmsr.msr == MSR_CR_PAT) {
1799 val = exit->u.wrmsr.val;
1800 if (__predict_false(!nvmm_x86_pat_validate(val))) {
1801 goto error;
1802 }
1803 vmx_vmwrite(VMCS_GUEST_IA32_PAT, val);
1804 goto handled;
1805 }
1806 if (exit->u.wrmsr.msr == MSR_MISC_ENABLE) {
1807 /* Don't care. */
1808 goto handled;
1809 }
1810 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1811 if (msr_ignore_list[i] != exit->u.wrmsr.msr)
1812 continue;
1813 goto handled;
1814 }
1815 }
1816
1817 return false;
1818
1819 handled:
1820 vmx_inkernel_advance();
1821 return true;
1822
1823 error:
1824 vmx_inject_gp(vcpu);
1825 return true;
1826 }
1827
1828 static void
1829 vmx_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1830 struct nvmm_vcpu_exit *exit)
1831 {
1832 struct vmx_cpudata *cpudata = vcpu->cpudata;
1833 uint64_t inslen, rip;
1834
1835 exit->reason = NVMM_VCPU_EXIT_RDMSR;
1836 exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1837
1838 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1839 exit->reason = NVMM_VCPU_EXIT_NONE;
1840 return;
1841 }
1842
1843 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1844 rip = vmx_vmread(VMCS_GUEST_RIP);
1845 exit->u.rdmsr.npc = rip + inslen;
1846
1847 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1848 }
1849
1850 static void
1851 vmx_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1852 struct nvmm_vcpu_exit *exit)
1853 {
1854 struct vmx_cpudata *cpudata = vcpu->cpudata;
1855 uint64_t rdx, rax, inslen, rip;
1856
1857 rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
1858 rax = cpudata->gprs[NVMM_X64_GPR_RAX];
1859
1860 exit->reason = NVMM_VCPU_EXIT_WRMSR;
1861 exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1862 exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
1863
1864 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1865 exit->reason = NVMM_VCPU_EXIT_NONE;
1866 return;
1867 }
1868
1869 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1870 rip = vmx_vmread(VMCS_GUEST_RIP);
1871 exit->u.wrmsr.npc = rip + inslen;
1872
1873 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1874 }
1875
1876 static void
1877 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1878 struct nvmm_vcpu_exit *exit)
1879 {
1880 struct vmx_cpudata *cpudata = vcpu->cpudata;
1881 uint64_t val;
1882
1883 exit->reason = NVMM_VCPU_EXIT_NONE;
1884
1885 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
1886 (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
1887
1888 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
1889 goto error;
1890 } else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
1891 goto error;
1892 } else if (__predict_false((val & XCR0_X87) == 0)) {
1893 goto error;
1894 }
1895
1896 cpudata->gxcr0 = val;
1897 if (vmx_xcr0_mask != 0) {
1898 wrxcr(0, cpudata->gxcr0);
1899 }
1900
1901 vmx_inkernel_advance();
1902 return;
1903
1904 error:
1905 vmx_inject_gp(vcpu);
1906 }
1907
1908 #define VMX_EPT_VIOLATION_READ __BIT(0)
1909 #define VMX_EPT_VIOLATION_WRITE __BIT(1)
1910 #define VMX_EPT_VIOLATION_EXECUTE __BIT(2)
1911
1912 static void
1913 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1914 struct nvmm_vcpu_exit *exit)
1915 {
1916 uint64_t perm;
1917 gpaddr_t gpa;
1918
1919 gpa = vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS);
1920
1921 exit->reason = NVMM_VCPU_EXIT_MEMORY;
1922 perm = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1923 if (perm & VMX_EPT_VIOLATION_WRITE)
1924 exit->u.mem.prot = PROT_WRITE;
1925 else if (perm & VMX_EPT_VIOLATION_EXECUTE)
1926 exit->u.mem.prot = PROT_EXEC;
1927 else
1928 exit->u.mem.prot = PROT_READ;
1929 exit->u.mem.gpa = gpa;
1930 exit->u.mem.inst_len = 0;
1931
1932 vmx_vcpu_state_provide(vcpu,
1933 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
1934 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
1935 }
1936
1937 /* -------------------------------------------------------------------------- */
1938
1939 static void
1940 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
1941 {
1942 struct vmx_cpudata *cpudata = vcpu->cpudata;
1943
1944 fpu_kern_enter();
1945 fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask);
1946
1947 if (vmx_xcr0_mask != 0) {
1948 cpudata->hxcr0 = rdxcr(0);
1949 wrxcr(0, cpudata->gxcr0);
1950 }
1951 }
1952
1953 static void
1954 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
1955 {
1956 struct vmx_cpudata *cpudata = vcpu->cpudata;
1957
1958 if (vmx_xcr0_mask != 0) {
1959 cpudata->gxcr0 = rdxcr(0);
1960 wrxcr(0, cpudata->hxcr0);
1961 }
1962
1963 fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask);
1964 fpu_kern_leave();
1965 }
1966
1967 static void
1968 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
1969 {
1970 struct vmx_cpudata *cpudata = vcpu->cpudata;
1971
1972 x86_dbregs_save(curlwp);
1973
1974 ldr7(0);
1975
1976 ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
1977 ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
1978 ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
1979 ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
1980 ldr6(cpudata->drs[NVMM_X64_DR_DR6]);
1981 }
1982
1983 static void
1984 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
1985 {
1986 struct vmx_cpudata *cpudata = vcpu->cpudata;
1987
1988 cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
1989 cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
1990 cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
1991 cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
1992 cpudata->drs[NVMM_X64_DR_DR6] = rdr6();
1993
1994 x86_dbregs_restore(curlwp);
1995 }
1996
1997 static void
1998 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
1999 {
2000 struct vmx_cpudata *cpudata = vcpu->cpudata;
2001
2002 /* This gets restored automatically by the CPU. */
2003 vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)curcpu()->ci_idtvec.iv_idt);
2004 vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE));
2005 vmx_vmwrite(VMCS_HOST_CR3, rcr3());
2006 vmx_vmwrite(VMCS_HOST_CR4, rcr4());
2007
2008 cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
2009 }
2010
2011 static void
2012 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
2013 {
2014 struct vmx_cpudata *cpudata = vcpu->cpudata;
2015
2016 wrmsr(MSR_STAR, cpudata->star);
2017 wrmsr(MSR_LSTAR, cpudata->lstar);
2018 wrmsr(MSR_CSTAR, cpudata->cstar);
2019 wrmsr(MSR_SFMASK, cpudata->sfmask);
2020 wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
2021 }
2022
2023 /* -------------------------------------------------------------------------- */
2024
2025 #define VMX_INVVPID_ADDRESS 0
2026 #define VMX_INVVPID_CONTEXT 1
2027 #define VMX_INVVPID_ALL 2
2028 #define VMX_INVVPID_CONTEXT_NOGLOBAL 3
2029
2030 #define VMX_INVEPT_CONTEXT 1
2031 #define VMX_INVEPT_ALL 2
2032
2033 static inline void
2034 vmx_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
2035 {
2036 struct vmx_cpudata *cpudata = vcpu->cpudata;
2037
2038 if (vcpu->hcpu_last != hcpu) {
2039 cpudata->gtlb_want_flush = true;
2040 }
2041 }
2042
2043 static inline void
2044 vmx_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
2045 {
2046 struct vmx_cpudata *cpudata = vcpu->cpudata;
2047 struct ept_desc ept_desc;
2048
2049 if (__predict_true(!kcpuset_isset(cpudata->htlb_want_flush, hcpu))) {
2050 return;
2051 }
2052
2053 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
2054 ept_desc.mbz = 0;
2055 vmx_invept(vmx_ept_flush_op, &ept_desc);
2056 kcpuset_clear(cpudata->htlb_want_flush, hcpu);
2057 }
2058
2059 static inline uint64_t
2060 vmx_htlb_flush(struct vmx_machdata *machdata, struct vmx_cpudata *cpudata)
2061 {
2062 struct ept_desc ept_desc;
2063 uint64_t machgen;
2064
2065 machgen = machdata->mach_htlb_gen;
2066 if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
2067 return machgen;
2068 }
2069
2070 kcpuset_copy(cpudata->htlb_want_flush, kcpuset_running);
2071
2072 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
2073 ept_desc.mbz = 0;
2074 vmx_invept(vmx_ept_flush_op, &ept_desc);
2075
2076 return machgen;
2077 }
2078
2079 static inline void
2080 vmx_htlb_flush_ack(struct vmx_cpudata *cpudata, uint64_t machgen)
2081 {
2082 cpudata->vcpu_htlb_gen = machgen;
2083 kcpuset_clear(cpudata->htlb_want_flush, cpu_number());
2084 }
2085
2086 static inline void
2087 vmx_exit_evt(struct vmx_cpudata *cpudata)
2088 {
2089 uint64_t info, err, inslen;
2090
2091 cpudata->evt_pending = false;
2092
2093 info = vmx_vmread(VMCS_IDT_VECTORING_INFO);
2094 if (__predict_true((info & INTR_INFO_VALID) == 0)) {
2095 return;
2096 }
2097 err = vmx_vmread(VMCS_IDT_VECTORING_ERROR);
2098
2099 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
2100 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, err);
2101
2102 switch (__SHIFTOUT(info, INTR_INFO_TYPE)) {
2103 case INTR_TYPE_SW_INT:
2104 case INTR_TYPE_PRIV_SW_EXC:
2105 case INTR_TYPE_SW_EXC:
2106 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
2107 vmx_vmwrite(VMCS_ENTRY_INSTRUCTION_LENGTH, inslen);
2108 }
2109
2110 cpudata->evt_pending = true;
2111 }
2112
2113 static int
2114 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
2115 struct nvmm_vcpu_exit *exit)
2116 {
2117 struct nvmm_comm_page *comm = vcpu->comm;
2118 struct vmx_machdata *machdata = mach->machdata;
2119 struct vmx_cpudata *cpudata = vcpu->cpudata;
2120 struct vpid_desc vpid_desc;
2121 struct cpu_info *ci;
2122 uint64_t exitcode;
2123 uint64_t intstate;
2124 uint64_t machgen;
2125 int hcpu, ret;
2126 bool launched;
2127
2128 vmx_vmcs_enter(vcpu);
2129
2130 if (__predict_false(vmx_vcpu_event_commit(vcpu) != 0)) {
2131 vmx_vmcs_leave(vcpu);
2132 return EINVAL;
2133 }
2134 vmx_vcpu_state_commit(vcpu);
2135 comm->state_cached = 0;
2136
2137 ci = curcpu();
2138 hcpu = cpu_number();
2139 launched = cpudata->vmcs_launched;
2140
2141 vmx_gtlb_catchup(vcpu, hcpu);
2142 vmx_htlb_catchup(vcpu, hcpu);
2143
2144 if (vcpu->hcpu_last != hcpu) {
2145 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel);
2146 vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss);
2147 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt);
2148 vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
2149 cpudata->gtsc_want_update = true;
2150 vcpu->hcpu_last = hcpu;
2151 }
2152
2153 vmx_vcpu_guest_dbregs_enter(vcpu);
2154 vmx_vcpu_guest_misc_enter(vcpu);
2155 vmx_vcpu_guest_fpu_enter(vcpu);
2156
2157 while (1) {
2158 if (cpudata->gtlb_want_flush) {
2159 vpid_desc.vpid = cpudata->asid;
2160 vpid_desc.addr = 0;
2161 vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
2162 cpudata->gtlb_want_flush = false;
2163 }
2164
2165 if (__predict_false(cpudata->gtsc_want_update)) {
2166 vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc - rdtsc());
2167 cpudata->gtsc_want_update = false;
2168 }
2169
2170 vmx_cli();
2171 machgen = vmx_htlb_flush(machdata, cpudata);
2172 lcr2(cpudata->gcr2);
2173 if (launched) {
2174 ret = vmx_vmresume(cpudata->gprs);
2175 } else {
2176 ret = vmx_vmlaunch(cpudata->gprs);
2177 }
2178 cpudata->gcr2 = rcr2();
2179 vmx_htlb_flush_ack(cpudata, machgen);
2180 vmx_sti();
2181
2182 if (__predict_false(ret != 0)) {
2183 vmx_exit_invalid(exit, -1);
2184 break;
2185 }
2186 vmx_exit_evt(cpudata);
2187
2188 launched = true;
2189
2190 exitcode = vmx_vmread(VMCS_EXIT_REASON);
2191 exitcode &= __BITS(15,0);
2192
2193 switch (exitcode) {
2194 case VMCS_EXITCODE_EXC_NMI:
2195 vmx_exit_exc_nmi(mach, vcpu, exit);
2196 break;
2197 case VMCS_EXITCODE_EXT_INT:
2198 exit->reason = NVMM_VCPU_EXIT_NONE;
2199 break;
2200 case VMCS_EXITCODE_CPUID:
2201 vmx_exit_cpuid(mach, vcpu, exit);
2202 break;
2203 case VMCS_EXITCODE_HLT:
2204 vmx_exit_hlt(mach, vcpu, exit);
2205 break;
2206 case VMCS_EXITCODE_CR:
2207 vmx_exit_cr(mach, vcpu, exit);
2208 break;
2209 case VMCS_EXITCODE_IO:
2210 vmx_exit_io(mach, vcpu, exit);
2211 break;
2212 case VMCS_EXITCODE_RDMSR:
2213 vmx_exit_rdmsr(mach, vcpu, exit);
2214 break;
2215 case VMCS_EXITCODE_WRMSR:
2216 vmx_exit_wrmsr(mach, vcpu, exit);
2217 break;
2218 case VMCS_EXITCODE_SHUTDOWN:
2219 exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
2220 break;
2221 case VMCS_EXITCODE_MONITOR:
2222 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MONITOR);
2223 break;
2224 case VMCS_EXITCODE_MWAIT:
2225 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MWAIT);
2226 break;
2227 case VMCS_EXITCODE_XSETBV:
2228 vmx_exit_xsetbv(mach, vcpu, exit);
2229 break;
2230 case VMCS_EXITCODE_RDPMC:
2231 case VMCS_EXITCODE_RDTSCP:
2232 case VMCS_EXITCODE_INVVPID:
2233 case VMCS_EXITCODE_INVEPT:
2234 case VMCS_EXITCODE_VMCALL:
2235 case VMCS_EXITCODE_VMCLEAR:
2236 case VMCS_EXITCODE_VMLAUNCH:
2237 case VMCS_EXITCODE_VMPTRLD:
2238 case VMCS_EXITCODE_VMPTRST:
2239 case VMCS_EXITCODE_VMREAD:
2240 case VMCS_EXITCODE_VMRESUME:
2241 case VMCS_EXITCODE_VMWRITE:
2242 case VMCS_EXITCODE_VMXOFF:
2243 case VMCS_EXITCODE_VMXON:
2244 vmx_inject_ud(vcpu);
2245 exit->reason = NVMM_VCPU_EXIT_NONE;
2246 break;
2247 case VMCS_EXITCODE_EPT_VIOLATION:
2248 vmx_exit_epf(mach, vcpu, exit);
2249 break;
2250 case VMCS_EXITCODE_INT_WINDOW:
2251 vmx_event_waitexit_disable(vcpu, false);
2252 exit->reason = NVMM_VCPU_EXIT_INT_READY;
2253 break;
2254 case VMCS_EXITCODE_NMI_WINDOW:
2255 vmx_event_waitexit_disable(vcpu, true);
2256 exit->reason = NVMM_VCPU_EXIT_NMI_READY;
2257 break;
2258 default:
2259 vmx_exit_invalid(exit, exitcode);
2260 break;
2261 }
2262
2263 /* If no reason to return to userland, keep rolling. */
2264 if (nvmm_return_needed()) {
2265 break;
2266 }
2267 if (exit->reason != NVMM_VCPU_EXIT_NONE) {
2268 break;
2269 }
2270 }
2271
2272 cpudata->vmcs_launched = launched;
2273
2274 cpudata->gtsc = vmx_vmread(VMCS_TSC_OFFSET) + rdtsc();
2275
2276 vmx_vcpu_guest_fpu_leave(vcpu);
2277 vmx_vcpu_guest_misc_leave(vcpu);
2278 vmx_vcpu_guest_dbregs_leave(vcpu);
2279
2280 exit->exitstate.rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
2281 exit->exitstate.cr8 = cpudata->gcr8;
2282 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2283 exit->exitstate.int_shadow =
2284 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2285 exit->exitstate.int_window_exiting = cpudata->int_window_exit;
2286 exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit;
2287 exit->exitstate.evt_pending = cpudata->evt_pending;
2288
2289 vmx_vmcs_leave(vcpu);
2290
2291 return 0;
2292 }
2293
2294 /* -------------------------------------------------------------------------- */
2295
2296 static int
2297 vmx_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
2298 {
2299 struct pglist pglist;
2300 paddr_t _pa;
2301 vaddr_t _va;
2302 size_t i;
2303 int ret;
2304
2305 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
2306 &pglist, 1, 0);
2307 if (ret != 0)
2308 return ENOMEM;
2309 _pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
2310 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
2311 UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
2312 if (_va == 0)
2313 goto error;
2314
2315 for (i = 0; i < npages; i++) {
2316 pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
2317 VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
2318 }
2319 pmap_update(pmap_kernel());
2320
2321 memset((void *)_va, 0, npages * PAGE_SIZE);
2322
2323 *pa = _pa;
2324 *va = _va;
2325 return 0;
2326
2327 error:
2328 for (i = 0; i < npages; i++) {
2329 uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
2330 }
2331 return ENOMEM;
2332 }
2333
2334 static void
2335 vmx_memfree(paddr_t pa, vaddr_t va, size_t npages)
2336 {
2337 size_t i;
2338
2339 pmap_kremove(va, npages * PAGE_SIZE);
2340 pmap_update(pmap_kernel());
2341 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
2342 for (i = 0; i < npages; i++) {
2343 uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
2344 }
2345 }
2346
2347 /* -------------------------------------------------------------------------- */
2348
2349 static void
2350 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
2351 {
2352 uint64_t byte;
2353 uint8_t bitoff;
2354
2355 if (msr < 0x00002000) {
2356 /* Range 1 */
2357 byte = ((msr - 0x00000000) / 8) + 0;
2358 } else if (msr >= 0xC0000000 && msr < 0xC0002000) {
2359 /* Range 2 */
2360 byte = ((msr - 0xC0000000) / 8) + 1024;
2361 } else {
2362 panic("%s: wrong range", __func__);
2363 }
2364
2365 bitoff = (msr & 0x7);
2366
2367 if (read) {
2368 bitmap[byte] &= ~__BIT(bitoff);
2369 }
2370 if (write) {
2371 bitmap[2048 + byte] &= ~__BIT(bitoff);
2372 }
2373 }
2374
2375 #define VMX_SEG_ATTRIB_TYPE __BITS(3,0)
2376 #define VMX_SEG_ATTRIB_S __BIT(4)
2377 #define VMX_SEG_ATTRIB_DPL __BITS(6,5)
2378 #define VMX_SEG_ATTRIB_P __BIT(7)
2379 #define VMX_SEG_ATTRIB_AVL __BIT(12)
2380 #define VMX_SEG_ATTRIB_L __BIT(13)
2381 #define VMX_SEG_ATTRIB_DEF __BIT(14)
2382 #define VMX_SEG_ATTRIB_G __BIT(15)
2383 #define VMX_SEG_ATTRIB_UNUSABLE __BIT(16)
2384
2385 static void
2386 vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg *segs, int idx)
2387 {
2388 uint64_t attrib;
2389
2390 attrib =
2391 __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) |
2392 __SHIFTIN(segs[idx].attrib.s, VMX_SEG_ATTRIB_S) |
2393 __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) |
2394 __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) |
2395 __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) |
2396 __SHIFTIN(segs[idx].attrib.l, VMX_SEG_ATTRIB_L) |
2397 __SHIFTIN(segs[idx].attrib.def, VMX_SEG_ATTRIB_DEF) |
2398 __SHIFTIN(segs[idx].attrib.g, VMX_SEG_ATTRIB_G) |
2399 (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0);
2400
2401 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2402 vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector);
2403 vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib);
2404 }
2405 vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit);
2406 vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base);
2407 }
2408
2409 static void
2410 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx)
2411 {
2412 uint64_t selector = 0, attrib = 0, base, limit;
2413
2414 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2415 selector = vmx_vmread(vmx_guest_segs[idx].selector);
2416 attrib = vmx_vmread(vmx_guest_segs[idx].attrib);
2417 }
2418 limit = vmx_vmread(vmx_guest_segs[idx].limit);
2419 base = vmx_vmread(vmx_guest_segs[idx].base);
2420
2421 segs[idx].selector = selector;
2422 segs[idx].limit = limit;
2423 segs[idx].base = base;
2424 segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE);
2425 segs[idx].attrib.s = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_S);
2426 segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL);
2427 segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P);
2428 segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL);
2429 segs[idx].attrib.l = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_L);
2430 segs[idx].attrib.def = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF);
2431 segs[idx].attrib.g = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_G);
2432 if (attrib & VMX_SEG_ATTRIB_UNUSABLE) {
2433 segs[idx].attrib.p = 0;
2434 }
2435 }
2436
2437 static inline bool
2438 vmx_state_tlb_flush(const struct nvmm_x64_state *state, uint64_t flags)
2439 {
2440 uint64_t cr0, cr3, cr4, efer;
2441
2442 if (flags & NVMM_X64_STATE_CRS) {
2443 cr0 = vmx_vmread(VMCS_GUEST_CR0);
2444 if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
2445 return true;
2446 }
2447 cr3 = vmx_vmread(VMCS_GUEST_CR3);
2448 if (cr3 != state->crs[NVMM_X64_CR_CR3]) {
2449 return true;
2450 }
2451 cr4 = vmx_vmread(VMCS_GUEST_CR4);
2452 if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
2453 return true;
2454 }
2455 }
2456
2457 if (flags & NVMM_X64_STATE_MSRS) {
2458 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
2459 if ((efer ^
2460 state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
2461 return true;
2462 }
2463 }
2464
2465 return false;
2466 }
2467
2468 static void
2469 vmx_vcpu_setstate(struct nvmm_cpu *vcpu)
2470 {
2471 struct nvmm_comm_page *comm = vcpu->comm;
2472 const struct nvmm_x64_state *state = &comm->state;
2473 struct vmx_cpudata *cpudata = vcpu->cpudata;
2474 struct fxsave *fpustate;
2475 uint64_t ctls1, intstate;
2476 uint64_t flags;
2477
2478 flags = comm->state_wanted;
2479
2480 vmx_vmcs_enter(vcpu);
2481
2482 if (vmx_state_tlb_flush(state, flags)) {
2483 cpudata->gtlb_want_flush = true;
2484 }
2485
2486 if (flags & NVMM_X64_STATE_SEGS) {
2487 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS);
2488 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS);
2489 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES);
2490 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS);
2491 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS);
2492 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS);
2493 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT);
2494 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT);
2495 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT);
2496 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR);
2497 }
2498
2499 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2500 if (flags & NVMM_X64_STATE_GPRS) {
2501 memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
2502
2503 vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]);
2504 vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]);
2505 vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]);
2506 }
2507
2508 if (flags & NVMM_X64_STATE_CRS) {
2509 /*
2510 * CR0_NE and CR4_VMXE are mandatory.
2511 */
2512 vmx_vmwrite(VMCS_GUEST_CR0,
2513 state->crs[NVMM_X64_CR_CR0] | CR0_NE);
2514 cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2];
2515 vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]); // XXX PDPTE?
2516 vmx_vmwrite(VMCS_GUEST_CR4,
2517 state->crs[NVMM_X64_CR_CR4] | CR4_VMXE);
2518 cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8];
2519
2520 if (vmx_xcr0_mask != 0) {
2521 /* Clear illegal XCR0 bits, set mandatory X87 bit. */
2522 cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
2523 cpudata->gxcr0 &= vmx_xcr0_mask;
2524 cpudata->gxcr0 |= XCR0_X87;
2525 }
2526 }
2527
2528 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2529 if (flags & NVMM_X64_STATE_DRS) {
2530 memcpy(cpudata->drs, state->drs, sizeof(state->drs));
2531
2532 cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF;
2533 vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]);
2534 }
2535
2536 if (flags & NVMM_X64_STATE_MSRS) {
2537 cpudata->gmsr[VMX_MSRLIST_STAR].val =
2538 state->msrs[NVMM_X64_MSR_STAR];
2539 cpudata->gmsr[VMX_MSRLIST_LSTAR].val =
2540 state->msrs[NVMM_X64_MSR_LSTAR];
2541 cpudata->gmsr[VMX_MSRLIST_CSTAR].val =
2542 state->msrs[NVMM_X64_MSR_CSTAR];
2543 cpudata->gmsr[VMX_MSRLIST_SFMASK].val =
2544 state->msrs[NVMM_X64_MSR_SFMASK];
2545 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val =
2546 state->msrs[NVMM_X64_MSR_KERNELGSBASE];
2547
2548 vmx_vmwrite(VMCS_GUEST_IA32_EFER,
2549 state->msrs[NVMM_X64_MSR_EFER]);
2550 vmx_vmwrite(VMCS_GUEST_IA32_PAT,
2551 state->msrs[NVMM_X64_MSR_PAT]);
2552 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS,
2553 state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
2554 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP,
2555 state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
2556 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP,
2557 state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
2558
2559 cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC];
2560 cpudata->gtsc_want_update = true;
2561
2562 /* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */
2563 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
2564 if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) {
2565 ctls1 |= ENTRY_CTLS_LONG_MODE;
2566 } else {
2567 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
2568 }
2569 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
2570 }
2571
2572 if (flags & NVMM_X64_STATE_INTR) {
2573 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2574 intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS);
2575 if (state->intr.int_shadow) {
2576 intstate |= INT_STATE_MOVSS;
2577 }
2578 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate);
2579
2580 if (state->intr.int_window_exiting) {
2581 vmx_event_waitexit_enable(vcpu, false);
2582 } else {
2583 vmx_event_waitexit_disable(vcpu, false);
2584 }
2585
2586 if (state->intr.nmi_window_exiting) {
2587 vmx_event_waitexit_enable(vcpu, true);
2588 } else {
2589 vmx_event_waitexit_disable(vcpu, true);
2590 }
2591 }
2592
2593 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
2594 if (flags & NVMM_X64_STATE_FPU) {
2595 memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
2596 sizeof(state->fpu));
2597
2598 fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
2599 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
2600 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
2601
2602 if (vmx_xcr0_mask != 0) {
2603 /* Reset XSTATE_BV, to force a reload. */
2604 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2605 }
2606 }
2607
2608 vmx_vmcs_leave(vcpu);
2609
2610 comm->state_wanted = 0;
2611 comm->state_cached |= flags;
2612 }
2613
2614 static void
2615 vmx_vcpu_getstate(struct nvmm_cpu *vcpu)
2616 {
2617 struct nvmm_comm_page *comm = vcpu->comm;
2618 struct nvmm_x64_state *state = &comm->state;
2619 struct vmx_cpudata *cpudata = vcpu->cpudata;
2620 uint64_t intstate, flags;
2621
2622 flags = comm->state_wanted;
2623
2624 vmx_vmcs_enter(vcpu);
2625
2626 if (flags & NVMM_X64_STATE_SEGS) {
2627 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS);
2628 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS);
2629 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES);
2630 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS);
2631 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS);
2632 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS);
2633 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT);
2634 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT);
2635 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT);
2636 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR);
2637 }
2638
2639 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2640 if (flags & NVMM_X64_STATE_GPRS) {
2641 memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
2642
2643 state->gprs[NVMM_X64_GPR_RIP] = vmx_vmread(VMCS_GUEST_RIP);
2644 state->gprs[NVMM_X64_GPR_RSP] = vmx_vmread(VMCS_GUEST_RSP);
2645 state->gprs[NVMM_X64_GPR_RFLAGS] = vmx_vmread(VMCS_GUEST_RFLAGS);
2646 }
2647
2648 if (flags & NVMM_X64_STATE_CRS) {
2649 state->crs[NVMM_X64_CR_CR0] = vmx_vmread(VMCS_GUEST_CR0);
2650 state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2;
2651 state->crs[NVMM_X64_CR_CR3] = vmx_vmread(VMCS_GUEST_CR3);
2652 state->crs[NVMM_X64_CR_CR4] = vmx_vmread(VMCS_GUEST_CR4);
2653 state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8;
2654 state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
2655
2656 /* Hide VMXE. */
2657 state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE;
2658 }
2659
2660 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2661 if (flags & NVMM_X64_STATE_DRS) {
2662 memcpy(state->drs, cpudata->drs, sizeof(state->drs));
2663
2664 state->drs[NVMM_X64_DR_DR7] = vmx_vmread(VMCS_GUEST_DR7);
2665 }
2666
2667 if (flags & NVMM_X64_STATE_MSRS) {
2668 state->msrs[NVMM_X64_MSR_STAR] =
2669 cpudata->gmsr[VMX_MSRLIST_STAR].val;
2670 state->msrs[NVMM_X64_MSR_LSTAR] =
2671 cpudata->gmsr[VMX_MSRLIST_LSTAR].val;
2672 state->msrs[NVMM_X64_MSR_CSTAR] =
2673 cpudata->gmsr[VMX_MSRLIST_CSTAR].val;
2674 state->msrs[NVMM_X64_MSR_SFMASK] =
2675 cpudata->gmsr[VMX_MSRLIST_SFMASK].val;
2676 state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
2677 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val;
2678 state->msrs[NVMM_X64_MSR_EFER] =
2679 vmx_vmread(VMCS_GUEST_IA32_EFER);
2680 state->msrs[NVMM_X64_MSR_PAT] =
2681 vmx_vmread(VMCS_GUEST_IA32_PAT);
2682 state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
2683 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS);
2684 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
2685 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP);
2686 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
2687 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP);
2688 state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc;
2689 }
2690
2691 if (flags & NVMM_X64_STATE_INTR) {
2692 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2693 state->intr.int_shadow =
2694 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2695 state->intr.int_window_exiting = cpudata->int_window_exit;
2696 state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
2697 state->intr.evt_pending = cpudata->evt_pending;
2698 }
2699
2700 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
2701 if (flags & NVMM_X64_STATE_FPU) {
2702 memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
2703 sizeof(state->fpu));
2704 }
2705
2706 vmx_vmcs_leave(vcpu);
2707
2708 comm->state_wanted = 0;
2709 comm->state_cached |= flags;
2710 }
2711
2712 static void
2713 vmx_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
2714 {
2715 vcpu->comm->state_wanted = flags;
2716 vmx_vcpu_getstate(vcpu);
2717 }
2718
2719 static void
2720 vmx_vcpu_state_commit(struct nvmm_cpu *vcpu)
2721 {
2722 vcpu->comm->state_wanted = vcpu->comm->state_commit;
2723 vcpu->comm->state_commit = 0;
2724 vmx_vcpu_setstate(vcpu);
2725 }
2726
2727 /* -------------------------------------------------------------------------- */
2728
2729 static void
2730 vmx_asid_alloc(struct nvmm_cpu *vcpu)
2731 {
2732 struct vmx_cpudata *cpudata = vcpu->cpudata;
2733 size_t i, oct, bit;
2734
2735 mutex_enter(&vmx_asidlock);
2736
2737 for (i = 0; i < vmx_maxasid; i++) {
2738 oct = i / 8;
2739 bit = i % 8;
2740
2741 if (vmx_asidmap[oct] & __BIT(bit)) {
2742 continue;
2743 }
2744
2745 cpudata->asid = i;
2746
2747 vmx_asidmap[oct] |= __BIT(bit);
2748 vmx_vmwrite(VMCS_VPID, i);
2749 mutex_exit(&vmx_asidlock);
2750 return;
2751 }
2752
2753 mutex_exit(&vmx_asidlock);
2754
2755 panic("%s: impossible", __func__);
2756 }
2757
2758 static void
2759 vmx_asid_free(struct nvmm_cpu *vcpu)
2760 {
2761 size_t oct, bit;
2762 uint64_t asid;
2763
2764 asid = vmx_vmread(VMCS_VPID);
2765
2766 oct = asid / 8;
2767 bit = asid % 8;
2768
2769 mutex_enter(&vmx_asidlock);
2770 vmx_asidmap[oct] &= ~__BIT(bit);
2771 mutex_exit(&vmx_asidlock);
2772 }
2773
2774 static void
2775 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2776 {
2777 struct vmx_cpudata *cpudata = vcpu->cpudata;
2778 struct vmcs *vmcs = cpudata->vmcs;
2779 struct msr_entry *gmsr = cpudata->gmsr;
2780 extern uint8_t vmx_resume_rip;
2781 uint64_t rev, eptp;
2782
2783 rev = vmx_get_revision();
2784
2785 memset(vmcs, 0, VMCS_SIZE);
2786 vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION);
2787 vmcs->abort = 0;
2788
2789 vmx_vmcs_enter(vcpu);
2790
2791 /* No link pointer. */
2792 vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF);
2793
2794 /* Install the CTLSs. */
2795 vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls);
2796 vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls);
2797 vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2);
2798 vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls);
2799 vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls);
2800
2801 /* Allow direct access to certain MSRs. */
2802 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
2803 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true);
2804 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
2805 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
2806 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
2807 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
2808 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
2809 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
2810 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
2811 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
2812 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
2813 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
2814 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
2815 vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa);
2816
2817 /*
2818 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This
2819 * includes the L1D_FLUSH MSR, to mitigate L1TF.
2820 */
2821 gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR;
2822 gmsr[VMX_MSRLIST_STAR].val = 0;
2823 gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR;
2824 gmsr[VMX_MSRLIST_LSTAR].val = 0;
2825 gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR;
2826 gmsr[VMX_MSRLIST_CSTAR].val = 0;
2827 gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK;
2828 gmsr[VMX_MSRLIST_SFMASK].val = 0;
2829 gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE;
2830 gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0;
2831 gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD;
2832 gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH;
2833 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa);
2834 vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa);
2835 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr);
2836 vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR);
2837
2838 /* Force CR0_NW and CR0_CD to zero, CR0_ET to one. */
2839 vmx_vmwrite(VMCS_CR0_MASK, CR0_NW|CR0_CD|CR0_ET);
2840 vmx_vmwrite(VMCS_CR0_SHADOW, CR0_ET);
2841
2842 /* Force CR4_VMXE to zero. */
2843 vmx_vmwrite(VMCS_CR4_MASK, CR4_VMXE);
2844
2845 /* Set the Host state for resuming. */
2846 vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)&vmx_resume_rip);
2847 vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL));
2848 vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2849 vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2850 vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2851 vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0);
2852 vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0);
2853 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0);
2854 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0);
2855 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0);
2856 vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT));
2857 vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER));
2858 vmx_vmwrite(VMCS_HOST_CR0, rcr0() & ~CR0_TS);
2859
2860 /* Generate ASID. */
2861 vmx_asid_alloc(vcpu);
2862
2863 /* Enable Extended Paging, 4-Level. */
2864 eptp =
2865 __SHIFTIN(vmx_eptp_type, EPTP_TYPE) |
2866 __SHIFTIN(4-1, EPTP_WALKLEN) |
2867 (pmap_ept_has_ad ? EPTP_FLAGS_AD : 0) |
2868 mach->vm->vm_map.pmap->pm_pdirpa[0];
2869 vmx_vmwrite(VMCS_EPTP, eptp);
2870
2871 /* Init IA32_MISC_ENABLE. */
2872 cpudata->gmsr_misc_enable = rdmsr(MSR_MISC_ENABLE);
2873 cpudata->gmsr_misc_enable &=
2874 ~(IA32_MISC_PERFMON_EN|IA32_MISC_EISST_EN|IA32_MISC_MWAIT_EN);
2875 cpudata->gmsr_misc_enable |=
2876 (IA32_MISC_BTS_UNAVAIL|IA32_MISC_PEBS_UNAVAIL);
2877
2878 /* Init XSAVE header. */
2879 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2880 cpudata->gfpu.xsh_xcomp_bv = 0;
2881
2882 /* These MSRs are static. */
2883 cpudata->star = rdmsr(MSR_STAR);
2884 cpudata->lstar = rdmsr(MSR_LSTAR);
2885 cpudata->cstar = rdmsr(MSR_CSTAR);
2886 cpudata->sfmask = rdmsr(MSR_SFMASK);
2887
2888 /* Install the RESET state. */
2889 memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
2890 sizeof(nvmm_x86_reset_state));
2891 vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
2892 vcpu->comm->state_cached = 0;
2893 vmx_vcpu_setstate(vcpu);
2894
2895 vmx_vmcs_leave(vcpu);
2896 }
2897
2898 static int
2899 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2900 {
2901 struct vmx_cpudata *cpudata;
2902 int error;
2903
2904 /* Allocate the VMX cpudata. */
2905 cpudata = (struct vmx_cpudata *)uvm_km_alloc(kernel_map,
2906 roundup(sizeof(*cpudata), PAGE_SIZE), 0,
2907 UVM_KMF_WIRED|UVM_KMF_ZERO);
2908 vcpu->cpudata = cpudata;
2909
2910 /* VMCS */
2911 error = vmx_memalloc(&cpudata->vmcs_pa, (vaddr_t *)&cpudata->vmcs,
2912 VMCS_NPAGES);
2913 if (error)
2914 goto error;
2915
2916 /* MSR Bitmap */
2917 error = vmx_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
2918 MSRBM_NPAGES);
2919 if (error)
2920 goto error;
2921
2922 /* Guest MSR List */
2923 error = vmx_memalloc(&cpudata->gmsr_pa, (vaddr_t *)&cpudata->gmsr, 1);
2924 if (error)
2925 goto error;
2926
2927 kcpuset_create(&cpudata->htlb_want_flush, true);
2928
2929 /* Init the VCPU info. */
2930 vmx_vcpu_init(mach, vcpu);
2931
2932 return 0;
2933
2934 error:
2935 if (cpudata->vmcs_pa) {
2936 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
2937 VMCS_NPAGES);
2938 }
2939 if (cpudata->msrbm_pa) {
2940 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
2941 MSRBM_NPAGES);
2942 }
2943 if (cpudata->gmsr_pa) {
2944 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
2945 }
2946
2947 kmem_free(cpudata, sizeof(*cpudata));
2948 return error;
2949 }
2950
2951 static void
2952 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2953 {
2954 struct vmx_cpudata *cpudata = vcpu->cpudata;
2955
2956 vmx_vmcs_enter(vcpu);
2957 vmx_asid_free(vcpu);
2958 vmx_vmcs_destroy(vcpu);
2959
2960 kcpuset_destroy(cpudata->htlb_want_flush);
2961
2962 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, VMCS_NPAGES);
2963 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
2964 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
2965 uvm_km_free(kernel_map, (vaddr_t)cpudata,
2966 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
2967 }
2968
2969 /* -------------------------------------------------------------------------- */
2970
2971 static int
2972 vmx_vcpu_configure_cpuid(struct vmx_cpudata *cpudata, void *data)
2973 {
2974 struct nvmm_vcpu_conf_cpuid *cpuid = data;
2975 size_t i;
2976
2977 if (__predict_false(cpuid->mask && cpuid->exit)) {
2978 return EINVAL;
2979 }
2980 if (__predict_false(cpuid->mask &&
2981 ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
2982 (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
2983 (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
2984 (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
2985 return EINVAL;
2986 }
2987
2988 /* If unset, delete, to restore the default behavior. */
2989 if (!cpuid->mask && !cpuid->exit) {
2990 for (i = 0; i < VMX_NCPUIDS; i++) {
2991 if (!cpudata->cpuidpresent[i]) {
2992 continue;
2993 }
2994 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
2995 cpudata->cpuidpresent[i] = false;
2996 }
2997 }
2998 return 0;
2999 }
3000
3001 /* If already here, replace. */
3002 for (i = 0; i < VMX_NCPUIDS; i++) {
3003 if (!cpudata->cpuidpresent[i]) {
3004 continue;
3005 }
3006 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
3007 memcpy(&cpudata->cpuid[i], cpuid,
3008 sizeof(struct nvmm_vcpu_conf_cpuid));
3009 return 0;
3010 }
3011 }
3012
3013 /* Not here, insert. */
3014 for (i = 0; i < VMX_NCPUIDS; i++) {
3015 if (!cpudata->cpuidpresent[i]) {
3016 cpudata->cpuidpresent[i] = true;
3017 memcpy(&cpudata->cpuid[i], cpuid,
3018 sizeof(struct nvmm_vcpu_conf_cpuid));
3019 return 0;
3020 }
3021 }
3022
3023 return ENOBUFS;
3024 }
3025
3026 static int
3027 vmx_vcpu_configure_tpr(struct vmx_cpudata *cpudata, void *data)
3028 {
3029 struct nvmm_vcpu_conf_tpr *tpr = data;
3030
3031 memcpy(&cpudata->tpr, tpr, sizeof(*tpr));
3032 return 0;
3033 }
3034
3035 static int
3036 vmx_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
3037 {
3038 struct vmx_cpudata *cpudata = vcpu->cpudata;
3039
3040 switch (op) {
3041 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID):
3042 return vmx_vcpu_configure_cpuid(cpudata, data);
3043 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR):
3044 return vmx_vcpu_configure_tpr(cpudata, data);
3045 default:
3046 return EINVAL;
3047 }
3048 }
3049
3050 /* -------------------------------------------------------------------------- */
3051
3052 static void
3053 vmx_tlb_flush(struct pmap *pm)
3054 {
3055 struct nvmm_machine *mach = pm->pm_data;
3056 struct vmx_machdata *machdata = mach->machdata;
3057
3058 atomic_inc_64(&machdata->mach_htlb_gen);
3059
3060 /* Generates IPIs, which cause #VMEXITs. */
3061 pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM);
3062 }
3063
3064 static void
3065 vmx_machine_create(struct nvmm_machine *mach)
3066 {
3067 struct pmap *pmap = mach->vm->vm_map.pmap;
3068 struct vmx_machdata *machdata;
3069
3070 /* Convert to EPT. */
3071 pmap_ept_transform(pmap);
3072
3073 /* Fill in pmap info. */
3074 pmap->pm_data = (void *)mach;
3075 pmap->pm_tlb_flush = vmx_tlb_flush;
3076
3077 machdata = kmem_zalloc(sizeof(struct vmx_machdata), KM_SLEEP);
3078 mach->machdata = machdata;
3079
3080 /* Start with an hTLB flush everywhere. */
3081 machdata->mach_htlb_gen = 1;
3082 }
3083
3084 static void
3085 vmx_machine_destroy(struct nvmm_machine *mach)
3086 {
3087 struct vmx_machdata *machdata = mach->machdata;
3088
3089 kmem_free(machdata, sizeof(struct vmx_machdata));
3090 }
3091
3092 static int
3093 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
3094 {
3095 panic("%s: impossible", __func__);
3096 }
3097
3098 /* -------------------------------------------------------------------------- */
3099
3100 #define CTLS_ONE_ALLOWED(msrval, bitoff) \
3101 ((msrval & __BIT(32 + bitoff)) != 0)
3102 #define CTLS_ZERO_ALLOWED(msrval, bitoff) \
3103 ((msrval & __BIT(bitoff)) == 0)
3104
3105 static int
3106 vmx_check_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls, uint64_t set_one)
3107 {
3108 uint64_t basic, val, true_val;
3109 bool has_true;
3110 size_t i;
3111
3112 basic = rdmsr(MSR_IA32_VMX_BASIC);
3113 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
3114
3115 val = rdmsr(msr_ctls);
3116 if (has_true) {
3117 true_val = rdmsr(msr_true_ctls);
3118 } else {
3119 true_val = val;
3120 }
3121
3122 for (i = 0; i < 32; i++) {
3123 if (!(set_one & __BIT(i))) {
3124 continue;
3125 }
3126 if (!CTLS_ONE_ALLOWED(true_val, i)) {
3127 return -1;
3128 }
3129 }
3130
3131 return 0;
3132 }
3133
3134 static int
3135 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls,
3136 uint64_t set_one, uint64_t set_zero, uint64_t *res)
3137 {
3138 uint64_t basic, val, true_val;
3139 bool one_allowed, zero_allowed, has_true;
3140 size_t i;
3141
3142 basic = rdmsr(MSR_IA32_VMX_BASIC);
3143 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
3144
3145 val = rdmsr(msr_ctls);
3146 if (has_true) {
3147 true_val = rdmsr(msr_true_ctls);
3148 } else {
3149 true_val = val;
3150 }
3151
3152 for (i = 0; i < 32; i++) {
3153 one_allowed = CTLS_ONE_ALLOWED(true_val, i);
3154 zero_allowed = CTLS_ZERO_ALLOWED(true_val, i);
3155
3156 if (zero_allowed && !one_allowed) {
3157 if (set_one & __BIT(i))
3158 return -1;
3159 *res &= ~__BIT(i);
3160 } else if (one_allowed && !zero_allowed) {
3161 if (set_zero & __BIT(i))
3162 return -1;
3163 *res |= __BIT(i);
3164 } else {
3165 if (set_zero & __BIT(i)) {
3166 *res &= ~__BIT(i);
3167 } else if (set_one & __BIT(i)) {
3168 *res |= __BIT(i);
3169 } else if (!has_true) {
3170 *res &= ~__BIT(i);
3171 } else if (CTLS_ZERO_ALLOWED(val, i)) {
3172 *res &= ~__BIT(i);
3173 } else if (CTLS_ONE_ALLOWED(val, i)) {
3174 *res |= __BIT(i);
3175 } else {
3176 return -1;
3177 }
3178 }
3179 }
3180
3181 return 0;
3182 }
3183
3184 static bool
3185 vmx_ident(void)
3186 {
3187 uint64_t msr;
3188 int ret;
3189
3190 if (!(cpu_feature[1] & CPUID2_VMX)) {
3191 return false;
3192 }
3193
3194 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
3195 if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) {
3196 printf("NVMM: VMX disabled in BIOS\n");
3197 return false;
3198 }
3199 if ((msr & IA32_FEATURE_CONTROL_OUT_SMX) == 0) {
3200 printf("NVMM: VMX disabled in BIOS\n");
3201 return false;
3202 }
3203
3204 msr = rdmsr(MSR_IA32_VMX_BASIC);
3205 if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) {
3206 printf("NVMM: I/O reporting not supported\n");
3207 return false;
3208 }
3209 if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) {
3210 printf("NVMM: WB memory not supported\n");
3211 return false;
3212 }
3213
3214 /* PG and PE are reported, even if Unrestricted Guests is supported. */
3215 vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE);
3216 vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE);
3217 ret = vmx_check_cr(rcr0(), vmx_cr0_fixed0, vmx_cr0_fixed1);
3218 if (ret == -1) {
3219 printf("NVMM: CR0 requirements not satisfied\n");
3220 return false;
3221 }
3222
3223 vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
3224 vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
3225 ret = vmx_check_cr(rcr4() | CR4_VMXE, vmx_cr4_fixed0, vmx_cr4_fixed1);
3226 if (ret == -1) {
3227 printf("NVMM: CR4 requirements not satisfied\n");
3228 return false;
3229 }
3230
3231 /* Init the CTLSs right now, and check for errors. */
3232 ret = vmx_init_ctls(
3233 MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3234 VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO,
3235 &vmx_pinbased_ctls);
3236 if (ret == -1) {
3237 printf("NVMM: pin-based-ctls requirements not satisfied\n");
3238 return false;
3239 }
3240 ret = vmx_init_ctls(
3241 MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3242 VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO,
3243 &vmx_procbased_ctls);
3244 if (ret == -1) {
3245 printf("NVMM: proc-based-ctls requirements not satisfied\n");
3246 return false;
3247 }
3248 ret = vmx_init_ctls(
3249 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3250 VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO,
3251 &vmx_procbased_ctls2);
3252 if (ret == -1) {
3253 printf("NVMM: proc-based-ctls2 requirements not satisfied\n");
3254 return false;
3255 }
3256 ret = vmx_check_ctls(
3257 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3258 PROC_CTLS2_INVPCID_ENABLE);
3259 if (ret != -1) {
3260 vmx_procbased_ctls2 |= PROC_CTLS2_INVPCID_ENABLE;
3261 }
3262 ret = vmx_init_ctls(
3263 MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3264 VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO,
3265 &vmx_entry_ctls);
3266 if (ret == -1) {
3267 printf("NVMM: entry-ctls requirements not satisfied\n");
3268 return false;
3269 }
3270 ret = vmx_init_ctls(
3271 MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS,
3272 VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO,
3273 &vmx_exit_ctls);
3274 if (ret == -1) {
3275 printf("NVMM: exit-ctls requirements not satisfied\n");
3276 return false;
3277 }
3278
3279 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3280 if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) {
3281 printf("NVMM: 4-level page tree not supported\n");
3282 return false;
3283 }
3284 if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) {
3285 printf("NVMM: INVEPT not supported\n");
3286 return false;
3287 }
3288 if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) {
3289 printf("NVMM: INVVPID not supported\n");
3290 return false;
3291 }
3292 if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) != 0) {
3293 pmap_ept_has_ad = true;
3294 } else {
3295 pmap_ept_has_ad = false;
3296 }
3297 if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) {
3298 printf("NVMM: EPT UC/WB memory types not supported\n");
3299 return false;
3300 }
3301
3302 return true;
3303 }
3304
3305 static void
3306 vmx_init_asid(uint32_t maxasid)
3307 {
3308 size_t allocsz;
3309
3310 mutex_init(&vmx_asidlock, MUTEX_DEFAULT, IPL_NONE);
3311
3312 vmx_maxasid = maxasid;
3313 allocsz = roundup(maxasid, 8) / 8;
3314 vmx_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
3315
3316 /* ASID 0 is reserved for the host. */
3317 vmx_asidmap[0] |= __BIT(0);
3318 }
3319
3320 static void
3321 vmx_change_cpu(void *arg1, void *arg2)
3322 {
3323 struct cpu_info *ci = curcpu();
3324 bool enable = arg1 != NULL;
3325 uint64_t cr4;
3326
3327 if (!enable) {
3328 vmx_vmxoff();
3329 }
3330
3331 cr4 = rcr4();
3332 if (enable) {
3333 cr4 |= CR4_VMXE;
3334 } else {
3335 cr4 &= ~CR4_VMXE;
3336 }
3337 lcr4(cr4);
3338
3339 if (enable) {
3340 vmx_vmxon(&vmxoncpu[cpu_index(ci)].pa);
3341 }
3342 }
3343
3344 static void
3345 vmx_init_l1tf(void)
3346 {
3347 u_int descs[4];
3348 uint64_t msr;
3349
3350 if (cpuid_level < 7) {
3351 return;
3352 }
3353
3354 x86_cpuid(7, descs);
3355
3356 if (descs[3] & CPUID_SEF_ARCH_CAP) {
3357 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
3358 if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) {
3359 /* No mitigation needed. */
3360 return;
3361 }
3362 }
3363
3364 if (descs[3] & CPUID_SEF_L1D_FLUSH) {
3365 /* Enable hardware mitigation. */
3366 vmx_msrlist_entry_nmsr += 1;
3367 }
3368 }
3369
3370 static void
3371 vmx_init(void)
3372 {
3373 CPU_INFO_ITERATOR cii;
3374 struct cpu_info *ci;
3375 uint64_t xc, msr;
3376 struct vmxon *vmxon;
3377 uint32_t revision;
3378 u_int descs[4];
3379 paddr_t pa;
3380 vaddr_t va;
3381 int error;
3382
3383 /* Init the ASID bitmap (VPID). */
3384 vmx_init_asid(VPID_MAX);
3385
3386 /* Init the XCR0 mask. */
3387 vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
3388
3389 /* Init the max basic CPUID leaf. */
3390 vmx_cpuid_max_basic = uimin(cpuid_level, VMX_CPUID_MAX_BASIC);
3391
3392 /* Init the max extended CPUID leaf. */
3393 x86_cpuid(0x80000000, descs);
3394 vmx_cpuid_max_extended = uimin(descs[0], VMX_CPUID_MAX_EXTENDED);
3395
3396 /* Init the TLB flush op, the EPT flush op and the EPTP type. */
3397 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3398 if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
3399 vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
3400 } else {
3401 vmx_tlb_flush_op = VMX_INVVPID_ALL;
3402 }
3403 if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
3404 vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
3405 } else {
3406 vmx_ept_flush_op = VMX_INVEPT_ALL;
3407 }
3408 if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {
3409 vmx_eptp_type = EPTP_TYPE_WB;
3410 } else {
3411 vmx_eptp_type = EPTP_TYPE_UC;
3412 }
3413
3414 /* Init the L1TF mitigation. */
3415 vmx_init_l1tf();
3416
3417 memset(vmxoncpu, 0, sizeof(vmxoncpu));
3418 revision = vmx_get_revision();
3419
3420 for (CPU_INFO_FOREACH(cii, ci)) {
3421 error = vmx_memalloc(&pa, &va, 1);
3422 if (error) {
3423 panic("%s: out of memory", __func__);
3424 }
3425 vmxoncpu[cpu_index(ci)].pa = pa;
3426 vmxoncpu[cpu_index(ci)].va = va;
3427
3428 vmxon = (struct vmxon *)vmxoncpu[cpu_index(ci)].va;
3429 vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION);
3430 }
3431
3432 xc = xc_broadcast(0, vmx_change_cpu, (void *)true, NULL);
3433 xc_wait(xc);
3434 }
3435
3436 static void
3437 vmx_fini_asid(void)
3438 {
3439 size_t allocsz;
3440
3441 allocsz = roundup(vmx_maxasid, 8) / 8;
3442 kmem_free(vmx_asidmap, allocsz);
3443
3444 mutex_destroy(&vmx_asidlock);
3445 }
3446
3447 static void
3448 vmx_fini(void)
3449 {
3450 uint64_t xc;
3451 size_t i;
3452
3453 xc = xc_broadcast(0, vmx_change_cpu, (void *)false, NULL);
3454 xc_wait(xc);
3455
3456 for (i = 0; i < MAXCPUS; i++) {
3457 if (vmxoncpu[i].pa != 0)
3458 vmx_memfree(vmxoncpu[i].pa, vmxoncpu[i].va, 1);
3459 }
3460
3461 vmx_fini_asid();
3462 }
3463
3464 static void
3465 vmx_capability(struct nvmm_capability *cap)
3466 {
3467 cap->arch.mach_conf_support = 0;
3468 cap->arch.vcpu_conf_support =
3469 NVMM_CAP_ARCH_VCPU_CONF_CPUID |
3470 NVMM_CAP_ARCH_VCPU_CONF_TPR;
3471 cap->arch.xcr0_mask = vmx_xcr0_mask;
3472 cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
3473 cap->arch.conf_cpuid_maxops = VMX_NCPUIDS;
3474 }
3475
3476 const struct nvmm_impl nvmm_x86_vmx = {
3477 .name = "x86-vmx",
3478 .ident = vmx_ident,
3479 .init = vmx_init,
3480 .fini = vmx_fini,
3481 .capability = vmx_capability,
3482 .mach_conf_max = NVMM_X86_MACH_NCONF,
3483 .mach_conf_sizes = NULL,
3484 .vcpu_conf_max = NVMM_X86_VCPU_NCONF,
3485 .vcpu_conf_sizes = vmx_vcpu_conf_sizes,
3486 .state_size = sizeof(struct nvmm_x64_state),
3487 .machine_create = vmx_machine_create,
3488 .machine_destroy = vmx_machine_destroy,
3489 .machine_configure = vmx_machine_configure,
3490 .vcpu_create = vmx_vcpu_create,
3491 .vcpu_destroy = vmx_vcpu_destroy,
3492 .vcpu_configure = vmx_vcpu_configure,
3493 .vcpu_setstate = vmx_vcpu_setstate,
3494 .vcpu_getstate = vmx_vcpu_getstate,
3495 .vcpu_inject = vmx_vcpu_inject,
3496 .vcpu_run = vmx_vcpu_run
3497 };
3498