nvmm_x86_vmx.c revision 1.36.2.6 1 /* $NetBSD: nvmm_x86_vmx.c,v 1.36.2.6 2020/05/13 12:21:56 martin Exp $ */
2
3 /*
4 * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.36.2.6 2020/05/13 12:21:56 martin Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/kmem.h>
39 #include <sys/cpu.h>
40 #include <sys/xcall.h>
41 #include <sys/mman.h>
42 #include <sys/bitops.h>
43
44 #include <uvm/uvm.h>
45 #include <uvm/uvm_page.h>
46
47 #include <x86/cputypes.h>
48 #include <x86/specialreg.h>
49 #include <x86/pmap.h>
50 #include <x86/dbregs.h>
51 #include <x86/cpu_counter.h>
52 #include <machine/cpuvar.h>
53
54 #include <dev/nvmm/nvmm.h>
55 #include <dev/nvmm/nvmm_internal.h>
56 #include <dev/nvmm/x86/nvmm_x86.h>
57
58 int _vmx_vmxon(paddr_t *pa);
59 int _vmx_vmxoff(void);
60 int vmx_vmlaunch(uint64_t *gprs);
61 int vmx_vmresume(uint64_t *gprs);
62
63 #define vmx_vmxon(a) \
64 if (__predict_false(_vmx_vmxon(a) != 0)) { \
65 panic("%s: VMXON failed", __func__); \
66 }
67 #define vmx_vmxoff() \
68 if (__predict_false(_vmx_vmxoff() != 0)) { \
69 panic("%s: VMXOFF failed", __func__); \
70 }
71
72 struct ept_desc {
73 uint64_t eptp;
74 uint64_t mbz;
75 } __packed;
76
77 struct vpid_desc {
78 uint64_t vpid;
79 uint64_t addr;
80 } __packed;
81
82 static inline void
83 vmx_invept(uint64_t op, struct ept_desc *desc)
84 {
85 asm volatile (
86 "invept %[desc],%[op];"
87 "jz vmx_insn_failvalid;"
88 "jc vmx_insn_failinvalid;"
89 :
90 : [desc] "m" (*desc), [op] "r" (op)
91 : "memory", "cc"
92 );
93 }
94
95 static inline void
96 vmx_invvpid(uint64_t op, struct vpid_desc *desc)
97 {
98 asm volatile (
99 "invvpid %[desc],%[op];"
100 "jz vmx_insn_failvalid;"
101 "jc vmx_insn_failinvalid;"
102 :
103 : [desc] "m" (*desc), [op] "r" (op)
104 : "memory", "cc"
105 );
106 }
107
108 static inline uint64_t
109 vmx_vmread(uint64_t field)
110 {
111 uint64_t value;
112
113 asm volatile (
114 "vmread %[field],%[value];"
115 "jz vmx_insn_failvalid;"
116 "jc vmx_insn_failinvalid;"
117 : [value] "=r" (value)
118 : [field] "r" (field)
119 : "cc"
120 );
121
122 return value;
123 }
124
125 static inline void
126 vmx_vmwrite(uint64_t field, uint64_t value)
127 {
128 asm volatile (
129 "vmwrite %[value],%[field];"
130 "jz vmx_insn_failvalid;"
131 "jc vmx_insn_failinvalid;"
132 :
133 : [field] "r" (field), [value] "r" (value)
134 : "cc"
135 );
136 }
137
138 static inline paddr_t
139 vmx_vmptrst(void)
140 {
141 paddr_t pa;
142
143 asm volatile (
144 "vmptrst %[pa];"
145 :
146 : [pa] "m" (*(paddr_t *)&pa)
147 : "memory"
148 );
149
150 return pa;
151 }
152
153 static inline void
154 vmx_vmptrld(paddr_t *pa)
155 {
156 asm volatile (
157 "vmptrld %[pa];"
158 "jz vmx_insn_failvalid;"
159 "jc vmx_insn_failinvalid;"
160 :
161 : [pa] "m" (*pa)
162 : "memory", "cc"
163 );
164 }
165
166 static inline void
167 vmx_vmclear(paddr_t *pa)
168 {
169 asm volatile (
170 "vmclear %[pa];"
171 "jz vmx_insn_failvalid;"
172 "jc vmx_insn_failinvalid;"
173 :
174 : [pa] "m" (*pa)
175 : "memory", "cc"
176 );
177 }
178
179 #define MSR_IA32_FEATURE_CONTROL 0x003A
180 #define IA32_FEATURE_CONTROL_LOCK __BIT(0)
181 #define IA32_FEATURE_CONTROL_IN_SMX __BIT(1)
182 #define IA32_FEATURE_CONTROL_OUT_SMX __BIT(2)
183
184 #define MSR_IA32_VMX_BASIC 0x0480
185 #define IA32_VMX_BASIC_IDENT __BITS(30,0)
186 #define IA32_VMX_BASIC_DATA_SIZE __BITS(44,32)
187 #define IA32_VMX_BASIC_MEM_WIDTH __BIT(48)
188 #define IA32_VMX_BASIC_DUAL __BIT(49)
189 #define IA32_VMX_BASIC_MEM_TYPE __BITS(53,50)
190 #define MEM_TYPE_UC 0
191 #define MEM_TYPE_WB 6
192 #define IA32_VMX_BASIC_IO_REPORT __BIT(54)
193 #define IA32_VMX_BASIC_TRUE_CTLS __BIT(55)
194
195 #define MSR_IA32_VMX_PINBASED_CTLS 0x0481
196 #define MSR_IA32_VMX_PROCBASED_CTLS 0x0482
197 #define MSR_IA32_VMX_EXIT_CTLS 0x0483
198 #define MSR_IA32_VMX_ENTRY_CTLS 0x0484
199 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x048B
200
201 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x048D
202 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x048E
203 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x048F
204 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x0490
205
206 #define MSR_IA32_VMX_CR0_FIXED0 0x0486
207 #define MSR_IA32_VMX_CR0_FIXED1 0x0487
208 #define MSR_IA32_VMX_CR4_FIXED0 0x0488
209 #define MSR_IA32_VMX_CR4_FIXED1 0x0489
210
211 #define MSR_IA32_VMX_EPT_VPID_CAP 0x048C
212 #define IA32_VMX_EPT_VPID_WALKLENGTH_4 __BIT(6)
213 #define IA32_VMX_EPT_VPID_UC __BIT(8)
214 #define IA32_VMX_EPT_VPID_WB __BIT(14)
215 #define IA32_VMX_EPT_VPID_INVEPT __BIT(20)
216 #define IA32_VMX_EPT_VPID_FLAGS_AD __BIT(21)
217 #define IA32_VMX_EPT_VPID_INVEPT_CONTEXT __BIT(25)
218 #define IA32_VMX_EPT_VPID_INVEPT_ALL __BIT(26)
219 #define IA32_VMX_EPT_VPID_INVVPID __BIT(32)
220 #define IA32_VMX_EPT_VPID_INVVPID_ADDR __BIT(40)
221 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT __BIT(41)
222 #define IA32_VMX_EPT_VPID_INVVPID_ALL __BIT(42)
223 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG __BIT(43)
224
225 /* -------------------------------------------------------------------------- */
226
227 /* 16-bit control fields */
228 #define VMCS_VPID 0x00000000
229 #define VMCS_PIR_VECTOR 0x00000002
230 #define VMCS_EPTP_INDEX 0x00000004
231 /* 16-bit guest-state fields */
232 #define VMCS_GUEST_ES_SELECTOR 0x00000800
233 #define VMCS_GUEST_CS_SELECTOR 0x00000802
234 #define VMCS_GUEST_SS_SELECTOR 0x00000804
235 #define VMCS_GUEST_DS_SELECTOR 0x00000806
236 #define VMCS_GUEST_FS_SELECTOR 0x00000808
237 #define VMCS_GUEST_GS_SELECTOR 0x0000080A
238 #define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
239 #define VMCS_GUEST_TR_SELECTOR 0x0000080E
240 #define VMCS_GUEST_INTR_STATUS 0x00000810
241 #define VMCS_PML_INDEX 0x00000812
242 /* 16-bit host-state fields */
243 #define VMCS_HOST_ES_SELECTOR 0x00000C00
244 #define VMCS_HOST_CS_SELECTOR 0x00000C02
245 #define VMCS_HOST_SS_SELECTOR 0x00000C04
246 #define VMCS_HOST_DS_SELECTOR 0x00000C06
247 #define VMCS_HOST_FS_SELECTOR 0x00000C08
248 #define VMCS_HOST_GS_SELECTOR 0x00000C0A
249 #define VMCS_HOST_TR_SELECTOR 0x00000C0C
250 /* 64-bit control fields */
251 #define VMCS_IO_BITMAP_A 0x00002000
252 #define VMCS_IO_BITMAP_B 0x00002002
253 #define VMCS_MSR_BITMAP 0x00002004
254 #define VMCS_EXIT_MSR_STORE_ADDRESS 0x00002006
255 #define VMCS_EXIT_MSR_LOAD_ADDRESS 0x00002008
256 #define VMCS_ENTRY_MSR_LOAD_ADDRESS 0x0000200A
257 #define VMCS_EXECUTIVE_VMCS 0x0000200C
258 #define VMCS_PML_ADDRESS 0x0000200E
259 #define VMCS_TSC_OFFSET 0x00002010
260 #define VMCS_VIRTUAL_APIC 0x00002012
261 #define VMCS_APIC_ACCESS 0x00002014
262 #define VMCS_PIR_DESC 0x00002016
263 #define VMCS_VM_CONTROL 0x00002018
264 #define VMCS_EPTP 0x0000201A
265 #define EPTP_TYPE __BITS(2,0)
266 #define EPTP_TYPE_UC 0
267 #define EPTP_TYPE_WB 6
268 #define EPTP_WALKLEN __BITS(5,3)
269 #define EPTP_FLAGS_AD __BIT(6)
270 #define EPTP_PHYSADDR __BITS(63,12)
271 #define VMCS_EOI_EXIT0 0x0000201C
272 #define VMCS_EOI_EXIT1 0x0000201E
273 #define VMCS_EOI_EXIT2 0x00002020
274 #define VMCS_EOI_EXIT3 0x00002022
275 #define VMCS_EPTP_LIST 0x00002024
276 #define VMCS_VMREAD_BITMAP 0x00002026
277 #define VMCS_VMWRITE_BITMAP 0x00002028
278 #define VMCS_VIRTUAL_EXCEPTION 0x0000202A
279 #define VMCS_XSS_EXIT_BITMAP 0x0000202C
280 #define VMCS_ENCLS_EXIT_BITMAP 0x0000202E
281 #define VMCS_SUBPAGE_PERM_TABLE_PTR 0x00002030
282 #define VMCS_TSC_MULTIPLIER 0x00002032
283 /* 64-bit read-only fields */
284 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
285 /* 64-bit guest-state fields */
286 #define VMCS_LINK_POINTER 0x00002800
287 #define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
288 #define VMCS_GUEST_IA32_PAT 0x00002804
289 #define VMCS_GUEST_IA32_EFER 0x00002806
290 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
291 #define VMCS_GUEST_PDPTE0 0x0000280A
292 #define VMCS_GUEST_PDPTE1 0x0000280C
293 #define VMCS_GUEST_PDPTE2 0x0000280E
294 #define VMCS_GUEST_PDPTE3 0x00002810
295 #define VMCS_GUEST_BNDCFGS 0x00002812
296 /* 64-bit host-state fields */
297 #define VMCS_HOST_IA32_PAT 0x00002C00
298 #define VMCS_HOST_IA32_EFER 0x00002C02
299 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
300 /* 32-bit control fields */
301 #define VMCS_PINBASED_CTLS 0x00004000
302 #define PIN_CTLS_INT_EXITING __BIT(0)
303 #define PIN_CTLS_NMI_EXITING __BIT(3)
304 #define PIN_CTLS_VIRTUAL_NMIS __BIT(5)
305 #define PIN_CTLS_ACTIVATE_PREEMPT_TIMER __BIT(6)
306 #define PIN_CTLS_PROCESS_POSTED_INTS __BIT(7)
307 #define VMCS_PROCBASED_CTLS 0x00004002
308 #define PROC_CTLS_INT_WINDOW_EXITING __BIT(2)
309 #define PROC_CTLS_USE_TSC_OFFSETTING __BIT(3)
310 #define PROC_CTLS_HLT_EXITING __BIT(7)
311 #define PROC_CTLS_INVLPG_EXITING __BIT(9)
312 #define PROC_CTLS_MWAIT_EXITING __BIT(10)
313 #define PROC_CTLS_RDPMC_EXITING __BIT(11)
314 #define PROC_CTLS_RDTSC_EXITING __BIT(12)
315 #define PROC_CTLS_RCR3_EXITING __BIT(15)
316 #define PROC_CTLS_LCR3_EXITING __BIT(16)
317 #define PROC_CTLS_RCR8_EXITING __BIT(19)
318 #define PROC_CTLS_LCR8_EXITING __BIT(20)
319 #define PROC_CTLS_USE_TPR_SHADOW __BIT(21)
320 #define PROC_CTLS_NMI_WINDOW_EXITING __BIT(22)
321 #define PROC_CTLS_DR_EXITING __BIT(23)
322 #define PROC_CTLS_UNCOND_IO_EXITING __BIT(24)
323 #define PROC_CTLS_USE_IO_BITMAPS __BIT(25)
324 #define PROC_CTLS_MONITOR_TRAP_FLAG __BIT(27)
325 #define PROC_CTLS_USE_MSR_BITMAPS __BIT(28)
326 #define PROC_CTLS_MONITOR_EXITING __BIT(29)
327 #define PROC_CTLS_PAUSE_EXITING __BIT(30)
328 #define PROC_CTLS_ACTIVATE_CTLS2 __BIT(31)
329 #define VMCS_EXCEPTION_BITMAP 0x00004004
330 #define VMCS_PF_ERROR_MASK 0x00004006
331 #define VMCS_PF_ERROR_MATCH 0x00004008
332 #define VMCS_CR3_TARGET_COUNT 0x0000400A
333 #define VMCS_EXIT_CTLS 0x0000400C
334 #define EXIT_CTLS_SAVE_DEBUG_CONTROLS __BIT(2)
335 #define EXIT_CTLS_HOST_LONG_MODE __BIT(9)
336 #define EXIT_CTLS_LOAD_PERFGLOBALCTRL __BIT(12)
337 #define EXIT_CTLS_ACK_INTERRUPT __BIT(15)
338 #define EXIT_CTLS_SAVE_PAT __BIT(18)
339 #define EXIT_CTLS_LOAD_PAT __BIT(19)
340 #define EXIT_CTLS_SAVE_EFER __BIT(20)
341 #define EXIT_CTLS_LOAD_EFER __BIT(21)
342 #define EXIT_CTLS_SAVE_PREEMPT_TIMER __BIT(22)
343 #define EXIT_CTLS_CLEAR_BNDCFGS __BIT(23)
344 #define EXIT_CTLS_CONCEAL_PT __BIT(24)
345 #define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
346 #define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
347 #define VMCS_ENTRY_CTLS 0x00004012
348 #define ENTRY_CTLS_LOAD_DEBUG_CONTROLS __BIT(2)
349 #define ENTRY_CTLS_LONG_MODE __BIT(9)
350 #define ENTRY_CTLS_SMM __BIT(10)
351 #define ENTRY_CTLS_DISABLE_DUAL __BIT(11)
352 #define ENTRY_CTLS_LOAD_PERFGLOBALCTRL __BIT(13)
353 #define ENTRY_CTLS_LOAD_PAT __BIT(14)
354 #define ENTRY_CTLS_LOAD_EFER __BIT(15)
355 #define ENTRY_CTLS_LOAD_BNDCFGS __BIT(16)
356 #define ENTRY_CTLS_CONCEAL_PT __BIT(17)
357 #define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
358 #define VMCS_ENTRY_INTR_INFO 0x00004016
359 #define INTR_INFO_VECTOR __BITS(7,0)
360 #define INTR_INFO_TYPE __BITS(10,8)
361 #define INTR_TYPE_EXT_INT 0
362 #define INTR_TYPE_NMI 2
363 #define INTR_TYPE_HW_EXC 3
364 #define INTR_TYPE_SW_INT 4
365 #define INTR_TYPE_PRIV_SW_EXC 5
366 #define INTR_TYPE_SW_EXC 6
367 #define INTR_TYPE_OTHER 7
368 #define INTR_INFO_ERROR __BIT(11)
369 #define INTR_INFO_VALID __BIT(31)
370 #define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
371 #define VMCS_ENTRY_INSTRUCTION_LENGTH 0x0000401A
372 #define VMCS_TPR_THRESHOLD 0x0000401C
373 #define VMCS_PROCBASED_CTLS2 0x0000401E
374 #define PROC_CTLS2_VIRT_APIC_ACCESSES __BIT(0)
375 #define PROC_CTLS2_ENABLE_EPT __BIT(1)
376 #define PROC_CTLS2_DESC_TABLE_EXITING __BIT(2)
377 #define PROC_CTLS2_ENABLE_RDTSCP __BIT(3)
378 #define PROC_CTLS2_VIRT_X2APIC __BIT(4)
379 #define PROC_CTLS2_ENABLE_VPID __BIT(5)
380 #define PROC_CTLS2_WBINVD_EXITING __BIT(6)
381 #define PROC_CTLS2_UNRESTRICTED_GUEST __BIT(7)
382 #define PROC_CTLS2_APIC_REG_VIRT __BIT(8)
383 #define PROC_CTLS2_VIRT_INT_DELIVERY __BIT(9)
384 #define PROC_CTLS2_PAUSE_LOOP_EXITING __BIT(10)
385 #define PROC_CTLS2_RDRAND_EXITING __BIT(11)
386 #define PROC_CTLS2_INVPCID_ENABLE __BIT(12)
387 #define PROC_CTLS2_VMFUNC_ENABLE __BIT(13)
388 #define PROC_CTLS2_VMCS_SHADOWING __BIT(14)
389 #define PROC_CTLS2_ENCLS_EXITING __BIT(15)
390 #define PROC_CTLS2_RDSEED_EXITING __BIT(16)
391 #define PROC_CTLS2_PML_ENABLE __BIT(17)
392 #define PROC_CTLS2_EPT_VIOLATION __BIT(18)
393 #define PROC_CTLS2_CONCEAL_VMX_FROM_PT __BIT(19)
394 #define PROC_CTLS2_XSAVES_ENABLE __BIT(20)
395 #define PROC_CTLS2_MODE_BASED_EXEC_EPT __BIT(22)
396 #define PROC_CTLS2_SUBPAGE_PERMISSIONS __BIT(23)
397 #define PROC_CTLS2_USE_TSC_SCALING __BIT(25)
398 #define PROC_CTLS2_ENCLV_EXITING __BIT(28)
399 #define VMCS_PLE_GAP 0x00004020
400 #define VMCS_PLE_WINDOW 0x00004022
401 /* 32-bit read-only data fields */
402 #define VMCS_INSTRUCTION_ERROR 0x00004400
403 #define VMCS_EXIT_REASON 0x00004402
404 #define VMCS_EXIT_INTR_INFO 0x00004404
405 #define VMCS_EXIT_INTR_ERRCODE 0x00004406
406 #define VMCS_IDT_VECTORING_INFO 0x00004408
407 #define VMCS_IDT_VECTORING_ERROR 0x0000440A
408 #define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
409 #define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
410 /* 32-bit guest-state fields */
411 #define VMCS_GUEST_ES_LIMIT 0x00004800
412 #define VMCS_GUEST_CS_LIMIT 0x00004802
413 #define VMCS_GUEST_SS_LIMIT 0x00004804
414 #define VMCS_GUEST_DS_LIMIT 0x00004806
415 #define VMCS_GUEST_FS_LIMIT 0x00004808
416 #define VMCS_GUEST_GS_LIMIT 0x0000480A
417 #define VMCS_GUEST_LDTR_LIMIT 0x0000480C
418 #define VMCS_GUEST_TR_LIMIT 0x0000480E
419 #define VMCS_GUEST_GDTR_LIMIT 0x00004810
420 #define VMCS_GUEST_IDTR_LIMIT 0x00004812
421 #define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
422 #define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
423 #define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
424 #define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
425 #define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
426 #define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
427 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
428 #define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
429 #define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
430 #define INT_STATE_STI __BIT(0)
431 #define INT_STATE_MOVSS __BIT(1)
432 #define INT_STATE_SMI __BIT(2)
433 #define INT_STATE_NMI __BIT(3)
434 #define INT_STATE_ENCLAVE __BIT(4)
435 #define VMCS_GUEST_ACTIVITY 0x00004826
436 #define VMCS_GUEST_SMBASE 0x00004828
437 #define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
438 #define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
439 /* 32-bit host state fields */
440 #define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
441 /* Natural-Width control fields */
442 #define VMCS_CR0_MASK 0x00006000
443 #define VMCS_CR4_MASK 0x00006002
444 #define VMCS_CR0_SHADOW 0x00006004
445 #define VMCS_CR4_SHADOW 0x00006006
446 #define VMCS_CR3_TARGET0 0x00006008
447 #define VMCS_CR3_TARGET1 0x0000600A
448 #define VMCS_CR3_TARGET2 0x0000600C
449 #define VMCS_CR3_TARGET3 0x0000600E
450 /* Natural-Width read-only fields */
451 #define VMCS_EXIT_QUALIFICATION 0x00006400
452 #define VMCS_IO_RCX 0x00006402
453 #define VMCS_IO_RSI 0x00006404
454 #define VMCS_IO_RDI 0x00006406
455 #define VMCS_IO_RIP 0x00006408
456 #define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
457 /* Natural-Width guest-state fields */
458 #define VMCS_GUEST_CR0 0x00006800
459 #define VMCS_GUEST_CR3 0x00006802
460 #define VMCS_GUEST_CR4 0x00006804
461 #define VMCS_GUEST_ES_BASE 0x00006806
462 #define VMCS_GUEST_CS_BASE 0x00006808
463 #define VMCS_GUEST_SS_BASE 0x0000680A
464 #define VMCS_GUEST_DS_BASE 0x0000680C
465 #define VMCS_GUEST_FS_BASE 0x0000680E
466 #define VMCS_GUEST_GS_BASE 0x00006810
467 #define VMCS_GUEST_LDTR_BASE 0x00006812
468 #define VMCS_GUEST_TR_BASE 0x00006814
469 #define VMCS_GUEST_GDTR_BASE 0x00006816
470 #define VMCS_GUEST_IDTR_BASE 0x00006818
471 #define VMCS_GUEST_DR7 0x0000681A
472 #define VMCS_GUEST_RSP 0x0000681C
473 #define VMCS_GUEST_RIP 0x0000681E
474 #define VMCS_GUEST_RFLAGS 0x00006820
475 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
476 #define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
477 #define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
478 /* Natural-Width host-state fields */
479 #define VMCS_HOST_CR0 0x00006C00
480 #define VMCS_HOST_CR3 0x00006C02
481 #define VMCS_HOST_CR4 0x00006C04
482 #define VMCS_HOST_FS_BASE 0x00006C06
483 #define VMCS_HOST_GS_BASE 0x00006C08
484 #define VMCS_HOST_TR_BASE 0x00006C0A
485 #define VMCS_HOST_GDTR_BASE 0x00006C0C
486 #define VMCS_HOST_IDTR_BASE 0x00006C0E
487 #define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
488 #define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
489 #define VMCS_HOST_RSP 0x00006C14
490 #define VMCS_HOST_RIP 0x00006c16
491
492 /* VMX basic exit reasons. */
493 #define VMCS_EXITCODE_EXC_NMI 0
494 #define VMCS_EXITCODE_EXT_INT 1
495 #define VMCS_EXITCODE_SHUTDOWN 2
496 #define VMCS_EXITCODE_INIT 3
497 #define VMCS_EXITCODE_SIPI 4
498 #define VMCS_EXITCODE_SMI 5
499 #define VMCS_EXITCODE_OTHER_SMI 6
500 #define VMCS_EXITCODE_INT_WINDOW 7
501 #define VMCS_EXITCODE_NMI_WINDOW 8
502 #define VMCS_EXITCODE_TASK_SWITCH 9
503 #define VMCS_EXITCODE_CPUID 10
504 #define VMCS_EXITCODE_GETSEC 11
505 #define VMCS_EXITCODE_HLT 12
506 #define VMCS_EXITCODE_INVD 13
507 #define VMCS_EXITCODE_INVLPG 14
508 #define VMCS_EXITCODE_RDPMC 15
509 #define VMCS_EXITCODE_RDTSC 16
510 #define VMCS_EXITCODE_RSM 17
511 #define VMCS_EXITCODE_VMCALL 18
512 #define VMCS_EXITCODE_VMCLEAR 19
513 #define VMCS_EXITCODE_VMLAUNCH 20
514 #define VMCS_EXITCODE_VMPTRLD 21
515 #define VMCS_EXITCODE_VMPTRST 22
516 #define VMCS_EXITCODE_VMREAD 23
517 #define VMCS_EXITCODE_VMRESUME 24
518 #define VMCS_EXITCODE_VMWRITE 25
519 #define VMCS_EXITCODE_VMXOFF 26
520 #define VMCS_EXITCODE_VMXON 27
521 #define VMCS_EXITCODE_CR 28
522 #define VMCS_EXITCODE_DR 29
523 #define VMCS_EXITCODE_IO 30
524 #define VMCS_EXITCODE_RDMSR 31
525 #define VMCS_EXITCODE_WRMSR 32
526 #define VMCS_EXITCODE_FAIL_GUEST_INVALID 33
527 #define VMCS_EXITCODE_FAIL_MSR_INVALID 34
528 #define VMCS_EXITCODE_MWAIT 36
529 #define VMCS_EXITCODE_TRAP_FLAG 37
530 #define VMCS_EXITCODE_MONITOR 39
531 #define VMCS_EXITCODE_PAUSE 40
532 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK 41
533 #define VMCS_EXITCODE_TPR_BELOW 43
534 #define VMCS_EXITCODE_APIC_ACCESS 44
535 #define VMCS_EXITCODE_VEOI 45
536 #define VMCS_EXITCODE_GDTR_IDTR 46
537 #define VMCS_EXITCODE_LDTR_TR 47
538 #define VMCS_EXITCODE_EPT_VIOLATION 48
539 #define VMCS_EXITCODE_EPT_MISCONFIG 49
540 #define VMCS_EXITCODE_INVEPT 50
541 #define VMCS_EXITCODE_RDTSCP 51
542 #define VMCS_EXITCODE_PREEMPT_TIMEOUT 52
543 #define VMCS_EXITCODE_INVVPID 53
544 #define VMCS_EXITCODE_WBINVD 54
545 #define VMCS_EXITCODE_XSETBV 55
546 #define VMCS_EXITCODE_APIC_WRITE 56
547 #define VMCS_EXITCODE_RDRAND 57
548 #define VMCS_EXITCODE_INVPCID 58
549 #define VMCS_EXITCODE_VMFUNC 59
550 #define VMCS_EXITCODE_ENCLS 60
551 #define VMCS_EXITCODE_RDSEED 61
552 #define VMCS_EXITCODE_PAGE_LOG_FULL 62
553 #define VMCS_EXITCODE_XSAVES 63
554 #define VMCS_EXITCODE_XRSTORS 64
555
556 /* -------------------------------------------------------------------------- */
557
558 static void vmx_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
559 static void vmx_vcpu_state_commit(struct nvmm_cpu *);
560
561 #define VMX_MSRLIST_STAR 0
562 #define VMX_MSRLIST_LSTAR 1
563 #define VMX_MSRLIST_CSTAR 2
564 #define VMX_MSRLIST_SFMASK 3
565 #define VMX_MSRLIST_KERNELGSBASE 4
566 #define VMX_MSRLIST_EXIT_NMSR 5
567 #define VMX_MSRLIST_L1DFLUSH 5
568
569 /* On entry, we may do +1 to include L1DFLUSH. */
570 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR;
571
572 struct vmxon {
573 uint32_t ident;
574 #define VMXON_IDENT_REVISION __BITS(30,0)
575
576 uint8_t data[PAGE_SIZE - 4];
577 } __packed;
578
579 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE);
580
581 struct vmxoncpu {
582 vaddr_t va;
583 paddr_t pa;
584 };
585
586 static struct vmxoncpu vmxoncpu[MAXCPUS];
587
588 struct vmcs {
589 uint32_t ident;
590 #define VMCS_IDENT_REVISION __BITS(30,0)
591 #define VMCS_IDENT_SHADOW __BIT(31)
592
593 uint32_t abort;
594 uint8_t data[PAGE_SIZE - 8];
595 } __packed;
596
597 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
598
599 struct msr_entry {
600 uint32_t msr;
601 uint32_t rsvd;
602 uint64_t val;
603 } __packed;
604
605 #define VPID_MAX 0xFFFF
606
607 /* Make sure we never run out of VPIDs. */
608 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS);
609
610 static uint64_t vmx_tlb_flush_op __read_mostly;
611 static uint64_t vmx_ept_flush_op __read_mostly;
612 static uint64_t vmx_eptp_type __read_mostly;
613
614 static uint64_t vmx_pinbased_ctls __read_mostly;
615 static uint64_t vmx_procbased_ctls __read_mostly;
616 static uint64_t vmx_procbased_ctls2 __read_mostly;
617 static uint64_t vmx_entry_ctls __read_mostly;
618 static uint64_t vmx_exit_ctls __read_mostly;
619
620 static uint64_t vmx_cr0_fixed0 __read_mostly;
621 static uint64_t vmx_cr0_fixed1 __read_mostly;
622 static uint64_t vmx_cr4_fixed0 __read_mostly;
623 static uint64_t vmx_cr4_fixed1 __read_mostly;
624
625 extern bool pmap_ept_has_ad;
626
627 #define VMX_PINBASED_CTLS_ONE \
628 (PIN_CTLS_INT_EXITING| \
629 PIN_CTLS_NMI_EXITING| \
630 PIN_CTLS_VIRTUAL_NMIS)
631
632 #define VMX_PINBASED_CTLS_ZERO 0
633
634 #define VMX_PROCBASED_CTLS_ONE \
635 (PROC_CTLS_USE_TSC_OFFSETTING| \
636 PROC_CTLS_HLT_EXITING| \
637 PROC_CTLS_MWAIT_EXITING | \
638 PROC_CTLS_RDPMC_EXITING | \
639 PROC_CTLS_RCR8_EXITING | \
640 PROC_CTLS_LCR8_EXITING | \
641 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \
642 PROC_CTLS_USE_MSR_BITMAPS | \
643 PROC_CTLS_MONITOR_EXITING | \
644 PROC_CTLS_ACTIVATE_CTLS2)
645
646 #define VMX_PROCBASED_CTLS_ZERO \
647 (PROC_CTLS_RCR3_EXITING| \
648 PROC_CTLS_LCR3_EXITING)
649
650 #define VMX_PROCBASED_CTLS2_ONE \
651 (PROC_CTLS2_ENABLE_EPT| \
652 PROC_CTLS2_ENABLE_VPID| \
653 PROC_CTLS2_UNRESTRICTED_GUEST)
654
655 #define VMX_PROCBASED_CTLS2_ZERO 0
656
657 #define VMX_ENTRY_CTLS_ONE \
658 (ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \
659 ENTRY_CTLS_LOAD_EFER| \
660 ENTRY_CTLS_LOAD_PAT)
661
662 #define VMX_ENTRY_CTLS_ZERO \
663 (ENTRY_CTLS_SMM| \
664 ENTRY_CTLS_DISABLE_DUAL)
665
666 #define VMX_EXIT_CTLS_ONE \
667 (EXIT_CTLS_SAVE_DEBUG_CONTROLS| \
668 EXIT_CTLS_HOST_LONG_MODE| \
669 EXIT_CTLS_SAVE_PAT| \
670 EXIT_CTLS_LOAD_PAT| \
671 EXIT_CTLS_SAVE_EFER| \
672 EXIT_CTLS_LOAD_EFER)
673
674 #define VMX_EXIT_CTLS_ZERO 0
675
676 static uint8_t *vmx_asidmap __read_mostly;
677 static uint32_t vmx_maxasid __read_mostly;
678 static kmutex_t vmx_asidlock __cacheline_aligned;
679
680 #define VMX_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE)
681 static uint64_t vmx_xcr0_mask __read_mostly;
682
683 #define VMX_NCPUIDS 32
684
685 #define VMCS_NPAGES 1
686 #define VMCS_SIZE (VMCS_NPAGES * PAGE_SIZE)
687
688 #define MSRBM_NPAGES 1
689 #define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE)
690
691 #define EFER_TLB_FLUSH \
692 (EFER_NXE|EFER_LMA|EFER_LME)
693 #define CR0_TLB_FLUSH \
694 (CR0_PG|CR0_WP|CR0_CD|CR0_NW)
695 #define CR4_TLB_FLUSH \
696 (CR4_PGE|CR4_PAE|CR4_PSE)
697
698 /* -------------------------------------------------------------------------- */
699
700 struct vmx_machdata {
701 volatile uint64_t mach_htlb_gen;
702 };
703
704 static const size_t vmx_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
705 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
706 sizeof(struct nvmm_vcpu_conf_cpuid),
707 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] =
708 sizeof(struct nvmm_vcpu_conf_tpr)
709 };
710
711 struct vmx_cpudata {
712 /* General */
713 uint64_t asid;
714 bool gtlb_want_flush;
715 bool gtsc_want_update;
716 uint64_t vcpu_htlb_gen;
717 kcpuset_t *htlb_want_flush;
718
719 /* VMCS */
720 struct vmcs *vmcs;
721 paddr_t vmcs_pa;
722 size_t vmcs_refcnt;
723 struct cpu_info *vmcs_ci;
724 bool vmcs_launched;
725
726 /* MSR bitmap */
727 uint8_t *msrbm;
728 paddr_t msrbm_pa;
729
730 /* Host state */
731 uint64_t hxcr0;
732 uint64_t star;
733 uint64_t lstar;
734 uint64_t cstar;
735 uint64_t sfmask;
736 uint64_t kernelgsbase;
737 bool ts_set;
738 struct xsave_header hfpu __aligned(64);
739
740 /* Intr state */
741 bool int_window_exit;
742 bool nmi_window_exit;
743 bool evt_pending;
744
745 /* Guest state */
746 struct msr_entry *gmsr;
747 paddr_t gmsr_pa;
748 uint64_t gmsr_misc_enable;
749 uint64_t gcr2;
750 uint64_t gcr8;
751 uint64_t gxcr0;
752 uint64_t gprs[NVMM_X64_NGPR];
753 uint64_t drs[NVMM_X64_NDR];
754 uint64_t gtsc;
755 struct xsave_header gfpu __aligned(64);
756
757 /* VCPU configuration. */
758 bool cpuidpresent[VMX_NCPUIDS];
759 struct nvmm_vcpu_conf_cpuid cpuid[VMX_NCPUIDS];
760 struct nvmm_vcpu_conf_tpr tpr;
761 };
762
763 static const struct {
764 uint64_t selector;
765 uint64_t attrib;
766 uint64_t limit;
767 uint64_t base;
768 } vmx_guest_segs[NVMM_X64_NSEG] = {
769 [NVMM_X64_SEG_ES] = {
770 VMCS_GUEST_ES_SELECTOR,
771 VMCS_GUEST_ES_ACCESS_RIGHTS,
772 VMCS_GUEST_ES_LIMIT,
773 VMCS_GUEST_ES_BASE
774 },
775 [NVMM_X64_SEG_CS] = {
776 VMCS_GUEST_CS_SELECTOR,
777 VMCS_GUEST_CS_ACCESS_RIGHTS,
778 VMCS_GUEST_CS_LIMIT,
779 VMCS_GUEST_CS_BASE
780 },
781 [NVMM_X64_SEG_SS] = {
782 VMCS_GUEST_SS_SELECTOR,
783 VMCS_GUEST_SS_ACCESS_RIGHTS,
784 VMCS_GUEST_SS_LIMIT,
785 VMCS_GUEST_SS_BASE
786 },
787 [NVMM_X64_SEG_DS] = {
788 VMCS_GUEST_DS_SELECTOR,
789 VMCS_GUEST_DS_ACCESS_RIGHTS,
790 VMCS_GUEST_DS_LIMIT,
791 VMCS_GUEST_DS_BASE
792 },
793 [NVMM_X64_SEG_FS] = {
794 VMCS_GUEST_FS_SELECTOR,
795 VMCS_GUEST_FS_ACCESS_RIGHTS,
796 VMCS_GUEST_FS_LIMIT,
797 VMCS_GUEST_FS_BASE
798 },
799 [NVMM_X64_SEG_GS] = {
800 VMCS_GUEST_GS_SELECTOR,
801 VMCS_GUEST_GS_ACCESS_RIGHTS,
802 VMCS_GUEST_GS_LIMIT,
803 VMCS_GUEST_GS_BASE
804 },
805 [NVMM_X64_SEG_GDT] = {
806 0, /* doesn't exist */
807 0, /* doesn't exist */
808 VMCS_GUEST_GDTR_LIMIT,
809 VMCS_GUEST_GDTR_BASE
810 },
811 [NVMM_X64_SEG_IDT] = {
812 0, /* doesn't exist */
813 0, /* doesn't exist */
814 VMCS_GUEST_IDTR_LIMIT,
815 VMCS_GUEST_IDTR_BASE
816 },
817 [NVMM_X64_SEG_LDT] = {
818 VMCS_GUEST_LDTR_SELECTOR,
819 VMCS_GUEST_LDTR_ACCESS_RIGHTS,
820 VMCS_GUEST_LDTR_LIMIT,
821 VMCS_GUEST_LDTR_BASE
822 },
823 [NVMM_X64_SEG_TR] = {
824 VMCS_GUEST_TR_SELECTOR,
825 VMCS_GUEST_TR_ACCESS_RIGHTS,
826 VMCS_GUEST_TR_LIMIT,
827 VMCS_GUEST_TR_BASE
828 }
829 };
830
831 /* -------------------------------------------------------------------------- */
832
833 static uint64_t
834 vmx_get_revision(void)
835 {
836 uint64_t msr;
837
838 msr = rdmsr(MSR_IA32_VMX_BASIC);
839 msr &= IA32_VMX_BASIC_IDENT;
840
841 return msr;
842 }
843
844 static void
845 vmx_vmclear_ipi(void *arg1, void *arg2)
846 {
847 paddr_t vmcs_pa = (paddr_t)arg1;
848 vmx_vmclear(&vmcs_pa);
849 }
850
851 static void
852 vmx_vmclear_remote(struct cpu_info *ci, paddr_t vmcs_pa)
853 {
854 uint64_t xc;
855 int bound;
856
857 KASSERT(kpreempt_disabled());
858
859 bound = curlwp_bind();
860 kpreempt_enable();
861
862 xc = xc_unicast(XC_HIGHPRI, vmx_vmclear_ipi, (void *)vmcs_pa, NULL, ci);
863 xc_wait(xc);
864
865 kpreempt_disable();
866 curlwp_bindx(bound);
867 }
868
869 static void
870 vmx_vmcs_enter(struct nvmm_cpu *vcpu)
871 {
872 struct vmx_cpudata *cpudata = vcpu->cpudata;
873 struct cpu_info *vmcs_ci;
874 paddr_t oldpa __diagused;
875
876 cpudata->vmcs_refcnt++;
877 if (cpudata->vmcs_refcnt > 1) {
878 #ifdef DIAGNOSTIC
879 KASSERT(kpreempt_disabled());
880 oldpa = vmx_vmptrst();
881 KASSERT(oldpa == cpudata->vmcs_pa);
882 #endif
883 return;
884 }
885
886 vmcs_ci = cpudata->vmcs_ci;
887 cpudata->vmcs_ci = (void *)0x00FFFFFFFFFFFFFF; /* clobber */
888
889 kpreempt_disable();
890
891 if (vmcs_ci == NULL) {
892 /* This VMCS is loaded for the first time. */
893 vmx_vmclear(&cpudata->vmcs_pa);
894 cpudata->vmcs_launched = false;
895 } else if (vmcs_ci != curcpu()) {
896 /* This VMCS is active on a remote CPU. */
897 vmx_vmclear_remote(vmcs_ci, cpudata->vmcs_pa);
898 cpudata->vmcs_launched = false;
899 } else {
900 /* This VMCS is active on curcpu, nothing to do. */
901 }
902
903 vmx_vmptrld(&cpudata->vmcs_pa);
904 }
905
906 static void
907 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
908 {
909 struct vmx_cpudata *cpudata = vcpu->cpudata;
910
911 KASSERT(kpreempt_disabled());
912 #ifdef DIAGNOSTIC
913 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
914 #endif
915 KASSERT(cpudata->vmcs_refcnt > 0);
916 cpudata->vmcs_refcnt--;
917
918 if (cpudata->vmcs_refcnt > 0) {
919 return;
920 }
921
922 cpudata->vmcs_ci = curcpu();
923 kpreempt_enable();
924 }
925
926 static void
927 vmx_vmcs_destroy(struct nvmm_cpu *vcpu)
928 {
929 struct vmx_cpudata *cpudata = vcpu->cpudata;
930
931 KASSERT(kpreempt_disabled());
932 #ifdef DIAGNOSTIC
933 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
934 #endif
935 KASSERT(cpudata->vmcs_refcnt == 1);
936 cpudata->vmcs_refcnt--;
937
938 vmx_vmclear(&cpudata->vmcs_pa);
939 kpreempt_enable();
940 }
941
942 /* -------------------------------------------------------------------------- */
943
944 static void
945 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
946 {
947 struct vmx_cpudata *cpudata = vcpu->cpudata;
948 uint64_t ctls1;
949
950 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
951
952 if (nmi) {
953 // XXX INT_STATE_NMI?
954 ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING;
955 cpudata->nmi_window_exit = true;
956 } else {
957 ctls1 |= PROC_CTLS_INT_WINDOW_EXITING;
958 cpudata->int_window_exit = true;
959 }
960
961 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
962 }
963
964 static void
965 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
966 {
967 struct vmx_cpudata *cpudata = vcpu->cpudata;
968 uint64_t ctls1;
969
970 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
971
972 if (nmi) {
973 ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
974 cpudata->nmi_window_exit = false;
975 } else {
976 ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING;
977 cpudata->int_window_exit = false;
978 }
979
980 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
981 }
982
983 static inline int
984 vmx_event_has_error(uint8_t vector)
985 {
986 switch (vector) {
987 case 8: /* #DF */
988 case 10: /* #TS */
989 case 11: /* #NP */
990 case 12: /* #SS */
991 case 13: /* #GP */
992 case 14: /* #PF */
993 case 17: /* #AC */
994 case 30: /* #SX */
995 return 1;
996 default:
997 return 0;
998 }
999 }
1000
1001 static int
1002 vmx_vcpu_inject(struct nvmm_cpu *vcpu)
1003 {
1004 struct nvmm_comm_page *comm = vcpu->comm;
1005 struct vmx_cpudata *cpudata = vcpu->cpudata;
1006 int type = 0, err = 0, ret = EINVAL;
1007 u_int evtype;
1008 uint8_t vector;
1009 uint64_t info, error;
1010
1011 evtype = comm->event.type;
1012 vector = comm->event.vector;
1013 error = comm->event.u.excp.error;
1014 __insn_barrier();
1015
1016 vmx_vmcs_enter(vcpu);
1017
1018 switch (evtype) {
1019 case NVMM_VCPU_EVENT_EXCP:
1020 if (vector == 2 || vector >= 32)
1021 goto out;
1022 if (vector == 3 || vector == 0)
1023 goto out;
1024 type = INTR_TYPE_HW_EXC;
1025 err = vmx_event_has_error(vector);
1026 break;
1027 case NVMM_VCPU_EVENT_INTR:
1028 type = INTR_TYPE_EXT_INT;
1029 if (vector == 2) {
1030 type = INTR_TYPE_NMI;
1031 vmx_event_waitexit_enable(vcpu, true);
1032 }
1033 err = 0;
1034 break;
1035 default:
1036 goto out;
1037 }
1038
1039 info =
1040 __SHIFTIN((uint64_t)vector, INTR_INFO_VECTOR) |
1041 __SHIFTIN((uint64_t)type, INTR_INFO_TYPE) |
1042 __SHIFTIN((uint64_t)err, INTR_INFO_ERROR) |
1043 __SHIFTIN((uint64_t)1, INTR_INFO_VALID);
1044 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
1045 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, error);
1046
1047 cpudata->evt_pending = true;
1048 ret = 0;
1049
1050 out:
1051 vmx_vmcs_leave(vcpu);
1052 return ret;
1053 }
1054
1055 static void
1056 vmx_inject_ud(struct nvmm_cpu *vcpu)
1057 {
1058 struct nvmm_comm_page *comm = vcpu->comm;
1059 int ret __diagused;
1060
1061 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1062 comm->event.vector = 6;
1063 comm->event.u.excp.error = 0;
1064
1065 ret = vmx_vcpu_inject(vcpu);
1066 KASSERT(ret == 0);
1067 }
1068
1069 static void
1070 vmx_inject_gp(struct nvmm_cpu *vcpu)
1071 {
1072 struct nvmm_comm_page *comm = vcpu->comm;
1073 int ret __diagused;
1074
1075 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1076 comm->event.vector = 13;
1077 comm->event.u.excp.error = 0;
1078
1079 ret = vmx_vcpu_inject(vcpu);
1080 KASSERT(ret == 0);
1081 }
1082
1083 static inline int
1084 vmx_vcpu_event_commit(struct nvmm_cpu *vcpu)
1085 {
1086 if (__predict_true(!vcpu->comm->event_commit)) {
1087 return 0;
1088 }
1089 vcpu->comm->event_commit = false;
1090 return vmx_vcpu_inject(vcpu);
1091 }
1092
1093 static inline void
1094 vmx_inkernel_advance(void)
1095 {
1096 uint64_t rip, inslen, intstate;
1097
1098 /*
1099 * Maybe we should also apply single-stepping and debug exceptions.
1100 * Matters for guest-ring3, because it can execute 'cpuid' under a
1101 * debugger.
1102 */
1103 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1104 rip = vmx_vmread(VMCS_GUEST_RIP);
1105 vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
1106 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
1107 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
1108 intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
1109 }
1110
1111 static void
1112 vmx_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
1113 {
1114 exit->u.inv.hwcode = code;
1115 exit->reason = NVMM_VCPU_EXIT_INVALID;
1116 }
1117
1118 static void
1119 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1120 struct nvmm_vcpu_exit *exit)
1121 {
1122 uint64_t qual;
1123
1124 qual = vmx_vmread(VMCS_EXIT_INTR_INFO);
1125
1126 if ((qual & INTR_INFO_VALID) == 0) {
1127 goto error;
1128 }
1129 if (__SHIFTOUT(qual, INTR_INFO_TYPE) != INTR_TYPE_NMI) {
1130 goto error;
1131 }
1132
1133 exit->reason = NVMM_VCPU_EXIT_NONE;
1134 return;
1135
1136 error:
1137 vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI);
1138 }
1139
1140 static void
1141 vmx_inkernel_handle_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1142 uint64_t eax, uint64_t ecx)
1143 {
1144 struct vmx_cpudata *cpudata = vcpu->cpudata;
1145 unsigned int ncpus;
1146 uint64_t cr4;
1147
1148 switch (eax) {
1149 case 0x00000001:
1150 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax;
1151
1152 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
1153 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
1154 CPUID_LOCAL_APIC_ID);
1155
1156 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
1157 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
1158 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1159 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_PCID;
1160 }
1161
1162 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
1163
1164 /* CPUID2_OSXSAVE depends on CR4. */
1165 cr4 = vmx_vmread(VMCS_GUEST_CR4);
1166 if (!(cr4 & CR4_OSXSAVE)) {
1167 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
1168 }
1169 break;
1170 case 0x00000005:
1171 case 0x00000006:
1172 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1173 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1174 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1175 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1176 break;
1177 case 0x00000007:
1178 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000007.eax;
1179 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
1180 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
1181 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
1182 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1183 cpudata->gprs[NVMM_X64_GPR_RBX] |= CPUID_SEF_INVPCID;
1184 }
1185 break;
1186 case 0x0000000A:
1187 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1188 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1189 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1190 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1191 break;
1192 case 0x0000000B:
1193 switch (ecx) {
1194 case 0: /* Threads */
1195 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1196 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1197 cpudata->gprs[NVMM_X64_GPR_RCX] =
1198 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
1199 __SHIFTIN(CPUID_TOP_LVLTYPE_SMT, CPUID_TOP_LVLTYPE);
1200 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1201 break;
1202 case 1: /* Cores */
1203 ncpus = atomic_load_relaxed(&mach->ncpus);
1204 cpudata->gprs[NVMM_X64_GPR_RAX] = ilog2(ncpus);
1205 cpudata->gprs[NVMM_X64_GPR_RBX] = ncpus;
1206 cpudata->gprs[NVMM_X64_GPR_RCX] =
1207 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
1208 __SHIFTIN(CPUID_TOP_LVLTYPE_CORE, CPUID_TOP_LVLTYPE);
1209 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1210 break;
1211 default:
1212 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1213 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1214 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; /* LVLTYPE_INVAL */
1215 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1216 break;
1217 }
1218 break;
1219 case 0x0000000D:
1220 if (vmx_xcr0_mask == 0) {
1221 break;
1222 }
1223 switch (ecx) {
1224 case 0:
1225 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF;
1226 if (cpudata->gxcr0 & XCR0_SSE) {
1227 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
1228 } else {
1229 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
1230 }
1231 cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
1232 cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64;
1233 cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32;
1234 break;
1235 case 1:
1236 cpudata->gprs[NVMM_X64_GPR_RAX] &=
1237 (CPUID_PES1_XSAVEOPT | CPUID_PES1_XSAVEC |
1238 CPUID_PES1_XGETBV);
1239 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1240 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1241 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1242 break;
1243 default:
1244 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1245 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1246 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1247 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1248 break;
1249 }
1250 break;
1251 case 0x40000000:
1252 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1253 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1254 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1255 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
1256 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
1257 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
1258 break;
1259 case 0x80000001:
1260 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax;
1261 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
1262 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
1263 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
1264 break;
1265 default:
1266 break;
1267 }
1268 }
1269
1270 static void
1271 vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason)
1272 {
1273 uint64_t inslen, rip;
1274
1275 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1276 rip = vmx_vmread(VMCS_GUEST_RIP);
1277 exit->u.insn.npc = rip + inslen;
1278 exit->reason = reason;
1279 }
1280
1281 static void
1282 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1283 struct nvmm_vcpu_exit *exit)
1284 {
1285 struct vmx_cpudata *cpudata = vcpu->cpudata;
1286 struct nvmm_vcpu_conf_cpuid *cpuid;
1287 uint64_t eax, ecx;
1288 u_int descs[4];
1289 size_t i;
1290
1291 eax = cpudata->gprs[NVMM_X64_GPR_RAX];
1292 ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
1293 x86_cpuid2(eax, ecx, descs);
1294
1295 cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0];
1296 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
1297 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
1298 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
1299
1300 vmx_inkernel_handle_cpuid(mach, vcpu, eax, ecx);
1301
1302 for (i = 0; i < VMX_NCPUIDS; i++) {
1303 if (!cpudata->cpuidpresent[i]) {
1304 continue;
1305 }
1306 cpuid = &cpudata->cpuid[i];
1307 if (cpuid->leaf != eax) {
1308 continue;
1309 }
1310
1311 if (cpuid->exit) {
1312 vmx_exit_insn(exit, NVMM_VCPU_EXIT_CPUID);
1313 return;
1314 }
1315 KASSERT(cpuid->mask);
1316
1317 /* del */
1318 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->u.mask.del.eax;
1319 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
1320 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
1321 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
1322
1323 /* set */
1324 cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->u.mask.set.eax;
1325 cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
1326 cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
1327 cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
1328
1329 break;
1330 }
1331
1332 vmx_inkernel_advance();
1333 exit->reason = NVMM_VCPU_EXIT_NONE;
1334 }
1335
1336 static void
1337 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1338 struct nvmm_vcpu_exit *exit)
1339 {
1340 struct vmx_cpudata *cpudata = vcpu->cpudata;
1341 uint64_t rflags;
1342
1343 if (cpudata->int_window_exit) {
1344 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1345 if (rflags & PSL_I) {
1346 vmx_event_waitexit_disable(vcpu, false);
1347 }
1348 }
1349
1350 vmx_inkernel_advance();
1351 exit->reason = NVMM_VCPU_EXIT_HALTED;
1352 }
1353
1354 #define VMX_QUAL_CR_NUM __BITS(3,0)
1355 #define VMX_QUAL_CR_TYPE __BITS(5,4)
1356 #define CR_TYPE_WRITE 0
1357 #define CR_TYPE_READ 1
1358 #define CR_TYPE_CLTS 2
1359 #define CR_TYPE_LMSW 3
1360 #define VMX_QUAL_CR_LMSW_OPMEM __BIT(6)
1361 #define VMX_QUAL_CR_GPR __BITS(11,8)
1362 #define VMX_QUAL_CR_LMSW_SRC __BIT(31,16)
1363
1364 static inline int
1365 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1)
1366 {
1367 /* Bits set to 1 in fixed0 are fixed to 1. */
1368 if ((crval & fixed0) != fixed0) {
1369 return -1;
1370 }
1371 /* Bits set to 0 in fixed1 are fixed to 0. */
1372 if (crval & ~fixed1) {
1373 return -1;
1374 }
1375 return 0;
1376 }
1377
1378 static int
1379 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1380 uint64_t qual)
1381 {
1382 struct vmx_cpudata *cpudata = vcpu->cpudata;
1383 uint64_t type, gpr, cr0;
1384 uint64_t efer, ctls1;
1385
1386 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1387 if (type != CR_TYPE_WRITE) {
1388 return -1;
1389 }
1390
1391 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1392 KASSERT(gpr < 16);
1393
1394 if (gpr == NVMM_X64_GPR_RSP) {
1395 gpr = vmx_vmread(VMCS_GUEST_RSP);
1396 } else {
1397 gpr = cpudata->gprs[gpr];
1398 }
1399
1400 cr0 = gpr | CR0_NE | CR0_ET;
1401 cr0 &= ~(CR0_NW|CR0_CD);
1402
1403 if (vmx_check_cr(cr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) {
1404 return -1;
1405 }
1406
1407 /*
1408 * XXX Handle 32bit PAE paging, need to set PDPTEs, fetched manually
1409 * from CR3.
1410 */
1411
1412 if (cr0 & CR0_PG) {
1413 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
1414 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
1415 if (efer & EFER_LME) {
1416 ctls1 |= ENTRY_CTLS_LONG_MODE;
1417 efer |= EFER_LMA;
1418 } else {
1419 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
1420 efer &= ~EFER_LMA;
1421 }
1422 vmx_vmwrite(VMCS_GUEST_IA32_EFER, efer);
1423 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
1424 }
1425
1426 vmx_vmwrite(VMCS_GUEST_CR0, cr0);
1427 vmx_inkernel_advance();
1428 return 0;
1429 }
1430
1431 static int
1432 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1433 uint64_t qual)
1434 {
1435 struct vmx_cpudata *cpudata = vcpu->cpudata;
1436 uint64_t type, gpr, cr4;
1437
1438 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1439 if (type != CR_TYPE_WRITE) {
1440 return -1;
1441 }
1442
1443 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1444 KASSERT(gpr < 16);
1445
1446 if (gpr == NVMM_X64_GPR_RSP) {
1447 gpr = vmx_vmread(VMCS_GUEST_RSP);
1448 } else {
1449 gpr = cpudata->gprs[gpr];
1450 }
1451
1452 cr4 = gpr | CR4_VMXE;
1453
1454 if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) {
1455 return -1;
1456 }
1457
1458 vmx_vmwrite(VMCS_GUEST_CR4, cr4);
1459 vmx_inkernel_advance();
1460 return 0;
1461 }
1462
1463 static int
1464 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1465 uint64_t qual, struct nvmm_vcpu_exit *exit)
1466 {
1467 struct vmx_cpudata *cpudata = vcpu->cpudata;
1468 uint64_t type, gpr;
1469 bool write;
1470
1471 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1472 if (type == CR_TYPE_WRITE) {
1473 write = true;
1474 } else if (type == CR_TYPE_READ) {
1475 write = false;
1476 } else {
1477 return -1;
1478 }
1479
1480 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1481 KASSERT(gpr < 16);
1482
1483 if (write) {
1484 if (gpr == NVMM_X64_GPR_RSP) {
1485 cpudata->gcr8 = vmx_vmread(VMCS_GUEST_RSP);
1486 } else {
1487 cpudata->gcr8 = cpudata->gprs[gpr];
1488 }
1489 if (cpudata->tpr.exit_changed) {
1490 exit->reason = NVMM_VCPU_EXIT_TPR_CHANGED;
1491 }
1492 } else {
1493 if (gpr == NVMM_X64_GPR_RSP) {
1494 vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8);
1495 } else {
1496 cpudata->gprs[gpr] = cpudata->gcr8;
1497 }
1498 }
1499
1500 vmx_inkernel_advance();
1501 return 0;
1502 }
1503
1504 static void
1505 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1506 struct nvmm_vcpu_exit *exit)
1507 {
1508 uint64_t qual;
1509 int ret;
1510
1511 exit->reason = NVMM_VCPU_EXIT_NONE;
1512
1513 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1514
1515 switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) {
1516 case 0:
1517 ret = vmx_inkernel_handle_cr0(mach, vcpu, qual);
1518 break;
1519 case 4:
1520 ret = vmx_inkernel_handle_cr4(mach, vcpu, qual);
1521 break;
1522 case 8:
1523 ret = vmx_inkernel_handle_cr8(mach, vcpu, qual, exit);
1524 break;
1525 default:
1526 ret = -1;
1527 break;
1528 }
1529
1530 if (ret == -1) {
1531 vmx_inject_gp(vcpu);
1532 }
1533 }
1534
1535 #define VMX_QUAL_IO_SIZE __BITS(2,0)
1536 #define IO_SIZE_8 0
1537 #define IO_SIZE_16 1
1538 #define IO_SIZE_32 3
1539 #define VMX_QUAL_IO_IN __BIT(3)
1540 #define VMX_QUAL_IO_STR __BIT(4)
1541 #define VMX_QUAL_IO_REP __BIT(5)
1542 #define VMX_QUAL_IO_DX __BIT(6)
1543 #define VMX_QUAL_IO_PORT __BITS(31,16)
1544
1545 #define VMX_INFO_IO_ADRSIZE __BITS(9,7)
1546 #define IO_ADRSIZE_16 0
1547 #define IO_ADRSIZE_32 1
1548 #define IO_ADRSIZE_64 2
1549 #define VMX_INFO_IO_SEG __BITS(17,15)
1550
1551 static void
1552 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1553 struct nvmm_vcpu_exit *exit)
1554 {
1555 uint64_t qual, info, inslen, rip;
1556
1557 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1558 info = vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO);
1559
1560 exit->reason = NVMM_VCPU_EXIT_IO;
1561
1562 exit->u.io.in = (qual & VMX_QUAL_IO_IN) != 0;
1563 exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
1564
1565 KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
1566 exit->u.io.seg = __SHIFTOUT(info, VMX_INFO_IO_SEG);
1567
1568 if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) {
1569 exit->u.io.address_size = 8;
1570 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) {
1571 exit->u.io.address_size = 4;
1572 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) {
1573 exit->u.io.address_size = 2;
1574 }
1575
1576 if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) {
1577 exit->u.io.operand_size = 4;
1578 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) {
1579 exit->u.io.operand_size = 2;
1580 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) {
1581 exit->u.io.operand_size = 1;
1582 }
1583
1584 exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
1585 exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
1586
1587 if (exit->u.io.in && exit->u.io.str) {
1588 exit->u.io.seg = NVMM_X64_SEG_ES;
1589 }
1590
1591 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1592 rip = vmx_vmread(VMCS_GUEST_RIP);
1593 exit->u.io.npc = rip + inslen;
1594
1595 vmx_vcpu_state_provide(vcpu,
1596 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
1597 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
1598 }
1599
1600 static const uint64_t msr_ignore_list[] = {
1601 MSR_BIOS_SIGN,
1602 MSR_IA32_PLATFORM_ID
1603 };
1604
1605 static bool
1606 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1607 struct nvmm_vcpu_exit *exit)
1608 {
1609 struct vmx_cpudata *cpudata = vcpu->cpudata;
1610 uint64_t val;
1611 size_t i;
1612
1613 if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
1614 if (exit->u.rdmsr.msr == MSR_CR_PAT) {
1615 val = vmx_vmread(VMCS_GUEST_IA32_PAT);
1616 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1617 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1618 goto handled;
1619 }
1620 if (exit->u.rdmsr.msr == MSR_MISC_ENABLE) {
1621 val = cpudata->gmsr_misc_enable;
1622 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1623 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1624 goto handled;
1625 }
1626 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1627 if (msr_ignore_list[i] != exit->u.rdmsr.msr)
1628 continue;
1629 val = 0;
1630 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1631 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1632 goto handled;
1633 }
1634 } else {
1635 if (exit->u.wrmsr.msr == MSR_TSC) {
1636 cpudata->gtsc = exit->u.wrmsr.val;
1637 cpudata->gtsc_want_update = true;
1638 goto handled;
1639 }
1640 if (exit->u.wrmsr.msr == MSR_CR_PAT) {
1641 val = exit->u.wrmsr.val;
1642 if (__predict_false(!nvmm_x86_pat_validate(val))) {
1643 goto error;
1644 }
1645 vmx_vmwrite(VMCS_GUEST_IA32_PAT, val);
1646 goto handled;
1647 }
1648 if (exit->u.wrmsr.msr == MSR_MISC_ENABLE) {
1649 /* Don't care. */
1650 goto handled;
1651 }
1652 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1653 if (msr_ignore_list[i] != exit->u.wrmsr.msr)
1654 continue;
1655 goto handled;
1656 }
1657 }
1658
1659 return false;
1660
1661 handled:
1662 vmx_inkernel_advance();
1663 return true;
1664
1665 error:
1666 vmx_inject_gp(vcpu);
1667 return true;
1668 }
1669
1670 static void
1671 vmx_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1672 struct nvmm_vcpu_exit *exit)
1673 {
1674 struct vmx_cpudata *cpudata = vcpu->cpudata;
1675 uint64_t inslen, rip;
1676
1677 exit->reason = NVMM_VCPU_EXIT_RDMSR;
1678 exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1679
1680 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1681 exit->reason = NVMM_VCPU_EXIT_NONE;
1682 return;
1683 }
1684
1685 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1686 rip = vmx_vmread(VMCS_GUEST_RIP);
1687 exit->u.rdmsr.npc = rip + inslen;
1688
1689 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1690 }
1691
1692 static void
1693 vmx_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1694 struct nvmm_vcpu_exit *exit)
1695 {
1696 struct vmx_cpudata *cpudata = vcpu->cpudata;
1697 uint64_t rdx, rax, inslen, rip;
1698
1699 rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
1700 rax = cpudata->gprs[NVMM_X64_GPR_RAX];
1701
1702 exit->reason = NVMM_VCPU_EXIT_WRMSR;
1703 exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1704 exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
1705
1706 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1707 exit->reason = NVMM_VCPU_EXIT_NONE;
1708 return;
1709 }
1710
1711 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1712 rip = vmx_vmread(VMCS_GUEST_RIP);
1713 exit->u.wrmsr.npc = rip + inslen;
1714
1715 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1716 }
1717
1718 static void
1719 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1720 struct nvmm_vcpu_exit *exit)
1721 {
1722 struct vmx_cpudata *cpudata = vcpu->cpudata;
1723 uint64_t val;
1724
1725 exit->reason = NVMM_VCPU_EXIT_NONE;
1726
1727 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
1728 (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
1729
1730 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
1731 goto error;
1732 } else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
1733 goto error;
1734 } else if (__predict_false((val & XCR0_X87) == 0)) {
1735 goto error;
1736 }
1737
1738 cpudata->gxcr0 = val;
1739
1740 vmx_inkernel_advance();
1741 return;
1742
1743 error:
1744 vmx_inject_gp(vcpu);
1745 }
1746
1747 #define VMX_EPT_VIOLATION_READ __BIT(0)
1748 #define VMX_EPT_VIOLATION_WRITE __BIT(1)
1749 #define VMX_EPT_VIOLATION_EXECUTE __BIT(2)
1750
1751 static void
1752 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1753 struct nvmm_vcpu_exit *exit)
1754 {
1755 uint64_t perm;
1756 gpaddr_t gpa;
1757
1758 gpa = vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS);
1759
1760 exit->reason = NVMM_VCPU_EXIT_MEMORY;
1761 perm = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1762 if (perm & VMX_EPT_VIOLATION_WRITE)
1763 exit->u.mem.prot = PROT_WRITE;
1764 else if (perm & VMX_EPT_VIOLATION_EXECUTE)
1765 exit->u.mem.prot = PROT_EXEC;
1766 else
1767 exit->u.mem.prot = PROT_READ;
1768 exit->u.mem.gpa = gpa;
1769 exit->u.mem.inst_len = 0;
1770
1771 vmx_vcpu_state_provide(vcpu,
1772 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
1773 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
1774 }
1775
1776 /* -------------------------------------------------------------------------- */
1777
1778 static void
1779 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
1780 {
1781 struct vmx_cpudata *cpudata = vcpu->cpudata;
1782
1783 cpudata->ts_set = (rcr0() & CR0_TS) != 0;
1784
1785 fpu_area_save(&cpudata->hfpu, vmx_xcr0_mask);
1786 fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask);
1787
1788 if (vmx_xcr0_mask != 0) {
1789 cpudata->hxcr0 = rdxcr(0);
1790 wrxcr(0, cpudata->gxcr0);
1791 }
1792 }
1793
1794 static void
1795 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
1796 {
1797 struct vmx_cpudata *cpudata = vcpu->cpudata;
1798
1799 if (vmx_xcr0_mask != 0) {
1800 cpudata->gxcr0 = rdxcr(0);
1801 wrxcr(0, cpudata->hxcr0);
1802 }
1803
1804 fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask);
1805 fpu_area_restore(&cpudata->hfpu, vmx_xcr0_mask);
1806
1807 if (cpudata->ts_set) {
1808 stts();
1809 }
1810 }
1811
1812 static void
1813 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
1814 {
1815 struct vmx_cpudata *cpudata = vcpu->cpudata;
1816
1817 x86_dbregs_save(curlwp);
1818
1819 ldr7(0);
1820
1821 ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
1822 ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
1823 ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
1824 ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
1825 ldr6(cpudata->drs[NVMM_X64_DR_DR6]);
1826 }
1827
1828 static void
1829 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
1830 {
1831 struct vmx_cpudata *cpudata = vcpu->cpudata;
1832
1833 cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
1834 cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
1835 cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
1836 cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
1837 cpudata->drs[NVMM_X64_DR_DR6] = rdr6();
1838
1839 x86_dbregs_restore(curlwp);
1840 }
1841
1842 static void
1843 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
1844 {
1845 struct vmx_cpudata *cpudata = vcpu->cpudata;
1846
1847 /* This gets restored automatically by the CPU. */
1848 vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE));
1849 vmx_vmwrite(VMCS_HOST_CR3, rcr3());
1850 vmx_vmwrite(VMCS_HOST_CR4, rcr4());
1851
1852 cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
1853 }
1854
1855 static void
1856 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
1857 {
1858 struct vmx_cpudata *cpudata = vcpu->cpudata;
1859
1860 wrmsr(MSR_STAR, cpudata->star);
1861 wrmsr(MSR_LSTAR, cpudata->lstar);
1862 wrmsr(MSR_CSTAR, cpudata->cstar);
1863 wrmsr(MSR_SFMASK, cpudata->sfmask);
1864 wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
1865 }
1866
1867 /* -------------------------------------------------------------------------- */
1868
1869 #define VMX_INVVPID_ADDRESS 0
1870 #define VMX_INVVPID_CONTEXT 1
1871 #define VMX_INVVPID_ALL 2
1872 #define VMX_INVVPID_CONTEXT_NOGLOBAL 3
1873
1874 #define VMX_INVEPT_CONTEXT 1
1875 #define VMX_INVEPT_ALL 2
1876
1877 static inline void
1878 vmx_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
1879 {
1880 struct vmx_cpudata *cpudata = vcpu->cpudata;
1881
1882 if (vcpu->hcpu_last != hcpu) {
1883 cpudata->gtlb_want_flush = true;
1884 }
1885 }
1886
1887 static inline void
1888 vmx_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
1889 {
1890 struct vmx_cpudata *cpudata = vcpu->cpudata;
1891 struct ept_desc ept_desc;
1892
1893 if (__predict_true(!kcpuset_isset(cpudata->htlb_want_flush, hcpu))) {
1894 return;
1895 }
1896
1897 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
1898 ept_desc.mbz = 0;
1899 vmx_invept(vmx_ept_flush_op, &ept_desc);
1900 kcpuset_clear(cpudata->htlb_want_flush, hcpu);
1901 }
1902
1903 static inline uint64_t
1904 vmx_htlb_flush(struct vmx_machdata *machdata, struct vmx_cpudata *cpudata)
1905 {
1906 struct ept_desc ept_desc;
1907 uint64_t machgen;
1908
1909 machgen = machdata->mach_htlb_gen;
1910 if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
1911 return machgen;
1912 }
1913
1914 kcpuset_copy(cpudata->htlb_want_flush, kcpuset_running);
1915
1916 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
1917 ept_desc.mbz = 0;
1918 vmx_invept(vmx_ept_flush_op, &ept_desc);
1919
1920 return machgen;
1921 }
1922
1923 static inline void
1924 vmx_htlb_flush_ack(struct vmx_cpudata *cpudata, uint64_t machgen)
1925 {
1926 cpudata->vcpu_htlb_gen = machgen;
1927 kcpuset_clear(cpudata->htlb_want_flush, cpu_number());
1928 }
1929
1930 static inline void
1931 vmx_exit_evt(struct vmx_cpudata *cpudata)
1932 {
1933 uint64_t info, err, inslen;
1934
1935 cpudata->evt_pending = false;
1936
1937 info = vmx_vmread(VMCS_IDT_VECTORING_INFO);
1938 if (__predict_true((info & INTR_INFO_VALID) == 0)) {
1939 return;
1940 }
1941 err = vmx_vmread(VMCS_IDT_VECTORING_ERROR);
1942
1943 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
1944 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, err);
1945
1946 switch (__SHIFTOUT(info, INTR_INFO_TYPE)) {
1947 case INTR_TYPE_SW_INT:
1948 case INTR_TYPE_PRIV_SW_EXC:
1949 case INTR_TYPE_SW_EXC:
1950 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1951 vmx_vmwrite(VMCS_ENTRY_INSTRUCTION_LENGTH, inslen);
1952 }
1953
1954 cpudata->evt_pending = true;
1955 }
1956
1957 static int
1958 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1959 struct nvmm_vcpu_exit *exit)
1960 {
1961 struct nvmm_comm_page *comm = vcpu->comm;
1962 struct vmx_machdata *machdata = mach->machdata;
1963 struct vmx_cpudata *cpudata = vcpu->cpudata;
1964 struct vpid_desc vpid_desc;
1965 struct cpu_info *ci;
1966 uint64_t exitcode;
1967 uint64_t intstate;
1968 uint64_t machgen;
1969 int hcpu, s, ret;
1970 bool launched;
1971
1972 vmx_vmcs_enter(vcpu);
1973
1974 if (__predict_false(vmx_vcpu_event_commit(vcpu) != 0)) {
1975 vmx_vmcs_leave(vcpu);
1976 return EINVAL;
1977 }
1978 vmx_vcpu_state_commit(vcpu);
1979 comm->state_cached = 0;
1980
1981 ci = curcpu();
1982 hcpu = cpu_number();
1983 launched = cpudata->vmcs_launched;
1984
1985 vmx_gtlb_catchup(vcpu, hcpu);
1986 vmx_htlb_catchup(vcpu, hcpu);
1987
1988 if (vcpu->hcpu_last != hcpu) {
1989 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel);
1990 vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss);
1991 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt);
1992 vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
1993 cpudata->gtsc_want_update = true;
1994 vcpu->hcpu_last = hcpu;
1995 }
1996
1997 vmx_vcpu_guest_dbregs_enter(vcpu);
1998 vmx_vcpu_guest_misc_enter(vcpu);
1999
2000 while (1) {
2001 if (cpudata->gtlb_want_flush) {
2002 vpid_desc.vpid = cpudata->asid;
2003 vpid_desc.addr = 0;
2004 vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
2005 cpudata->gtlb_want_flush = false;
2006 }
2007
2008 if (__predict_false(cpudata->gtsc_want_update)) {
2009 vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc - rdtsc());
2010 cpudata->gtsc_want_update = false;
2011 }
2012
2013 s = splhigh();
2014 machgen = vmx_htlb_flush(machdata, cpudata);
2015 vmx_vcpu_guest_fpu_enter(vcpu);
2016 lcr2(cpudata->gcr2);
2017 if (launched) {
2018 ret = vmx_vmresume(cpudata->gprs);
2019 } else {
2020 ret = vmx_vmlaunch(cpudata->gprs);
2021 }
2022 cpudata->gcr2 = rcr2();
2023 vmx_vcpu_guest_fpu_leave(vcpu);
2024 vmx_htlb_flush_ack(cpudata, machgen);
2025 splx(s);
2026
2027 if (__predict_false(ret != 0)) {
2028 vmx_exit_invalid(exit, -1);
2029 break;
2030 }
2031 vmx_exit_evt(cpudata);
2032
2033 launched = true;
2034
2035 exitcode = vmx_vmread(VMCS_EXIT_REASON);
2036 exitcode &= __BITS(15,0);
2037
2038 switch (exitcode) {
2039 case VMCS_EXITCODE_EXC_NMI:
2040 vmx_exit_exc_nmi(mach, vcpu, exit);
2041 break;
2042 case VMCS_EXITCODE_EXT_INT:
2043 exit->reason = NVMM_VCPU_EXIT_NONE;
2044 break;
2045 case VMCS_EXITCODE_CPUID:
2046 vmx_exit_cpuid(mach, vcpu, exit);
2047 break;
2048 case VMCS_EXITCODE_HLT:
2049 vmx_exit_hlt(mach, vcpu, exit);
2050 break;
2051 case VMCS_EXITCODE_CR:
2052 vmx_exit_cr(mach, vcpu, exit);
2053 break;
2054 case VMCS_EXITCODE_IO:
2055 vmx_exit_io(mach, vcpu, exit);
2056 break;
2057 case VMCS_EXITCODE_RDMSR:
2058 vmx_exit_rdmsr(mach, vcpu, exit);
2059 break;
2060 case VMCS_EXITCODE_WRMSR:
2061 vmx_exit_wrmsr(mach, vcpu, exit);
2062 break;
2063 case VMCS_EXITCODE_SHUTDOWN:
2064 exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
2065 break;
2066 case VMCS_EXITCODE_MONITOR:
2067 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MONITOR);
2068 break;
2069 case VMCS_EXITCODE_MWAIT:
2070 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MWAIT);
2071 break;
2072 case VMCS_EXITCODE_XSETBV:
2073 vmx_exit_xsetbv(mach, vcpu, exit);
2074 break;
2075 case VMCS_EXITCODE_RDPMC:
2076 case VMCS_EXITCODE_RDTSCP:
2077 case VMCS_EXITCODE_INVVPID:
2078 case VMCS_EXITCODE_INVEPT:
2079 case VMCS_EXITCODE_VMCALL:
2080 case VMCS_EXITCODE_VMCLEAR:
2081 case VMCS_EXITCODE_VMLAUNCH:
2082 case VMCS_EXITCODE_VMPTRLD:
2083 case VMCS_EXITCODE_VMPTRST:
2084 case VMCS_EXITCODE_VMREAD:
2085 case VMCS_EXITCODE_VMRESUME:
2086 case VMCS_EXITCODE_VMWRITE:
2087 case VMCS_EXITCODE_VMXOFF:
2088 case VMCS_EXITCODE_VMXON:
2089 vmx_inject_ud(vcpu);
2090 exit->reason = NVMM_VCPU_EXIT_NONE;
2091 break;
2092 case VMCS_EXITCODE_EPT_VIOLATION:
2093 vmx_exit_epf(mach, vcpu, exit);
2094 break;
2095 case VMCS_EXITCODE_INT_WINDOW:
2096 vmx_event_waitexit_disable(vcpu, false);
2097 exit->reason = NVMM_VCPU_EXIT_INT_READY;
2098 break;
2099 case VMCS_EXITCODE_NMI_WINDOW:
2100 vmx_event_waitexit_disable(vcpu, true);
2101 exit->reason = NVMM_VCPU_EXIT_NMI_READY;
2102 break;
2103 default:
2104 vmx_exit_invalid(exit, exitcode);
2105 break;
2106 }
2107
2108 /* If no reason to return to userland, keep rolling. */
2109 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
2110 break;
2111 }
2112 if (curcpu()->ci_data.cpu_softints != 0) {
2113 break;
2114 }
2115 if (curlwp->l_flag & LW_USERRET) {
2116 break;
2117 }
2118 if (exit->reason != NVMM_VCPU_EXIT_NONE) {
2119 break;
2120 }
2121 }
2122
2123 cpudata->vmcs_launched = launched;
2124
2125 cpudata->gtsc = vmx_vmread(VMCS_TSC_OFFSET) + rdtsc();
2126
2127 vmx_vcpu_guest_misc_leave(vcpu);
2128 vmx_vcpu_guest_dbregs_leave(vcpu);
2129
2130 exit->exitstate.rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
2131 exit->exitstate.cr8 = cpudata->gcr8;
2132 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2133 exit->exitstate.int_shadow =
2134 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2135 exit->exitstate.int_window_exiting = cpudata->int_window_exit;
2136 exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit;
2137 exit->exitstate.evt_pending = cpudata->evt_pending;
2138
2139 vmx_vmcs_leave(vcpu);
2140
2141 return 0;
2142 }
2143
2144 /* -------------------------------------------------------------------------- */
2145
2146 static int
2147 vmx_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
2148 {
2149 struct pglist pglist;
2150 paddr_t _pa;
2151 vaddr_t _va;
2152 size_t i;
2153 int ret;
2154
2155 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
2156 &pglist, 1, 0);
2157 if (ret != 0)
2158 return ENOMEM;
2159 _pa = TAILQ_FIRST(&pglist)->phys_addr;
2160 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
2161 UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
2162 if (_va == 0)
2163 goto error;
2164
2165 for (i = 0; i < npages; i++) {
2166 pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
2167 VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
2168 }
2169 pmap_update(pmap_kernel());
2170
2171 memset((void *)_va, 0, npages * PAGE_SIZE);
2172
2173 *pa = _pa;
2174 *va = _va;
2175 return 0;
2176
2177 error:
2178 for (i = 0; i < npages; i++) {
2179 uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
2180 }
2181 return ENOMEM;
2182 }
2183
2184 static void
2185 vmx_memfree(paddr_t pa, vaddr_t va, size_t npages)
2186 {
2187 size_t i;
2188
2189 pmap_kremove(va, npages * PAGE_SIZE);
2190 pmap_update(pmap_kernel());
2191 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
2192 for (i = 0; i < npages; i++) {
2193 uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
2194 }
2195 }
2196
2197 /* -------------------------------------------------------------------------- */
2198
2199 static void
2200 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
2201 {
2202 uint64_t byte;
2203 uint8_t bitoff;
2204
2205 if (msr < 0x00002000) {
2206 /* Range 1 */
2207 byte = ((msr - 0x00000000) / 8) + 0;
2208 } else if (msr >= 0xC0000000 && msr < 0xC0002000) {
2209 /* Range 2 */
2210 byte = ((msr - 0xC0000000) / 8) + 1024;
2211 } else {
2212 panic("%s: wrong range", __func__);
2213 }
2214
2215 bitoff = (msr & 0x7);
2216
2217 if (read) {
2218 bitmap[byte] &= ~__BIT(bitoff);
2219 }
2220 if (write) {
2221 bitmap[2048 + byte] &= ~__BIT(bitoff);
2222 }
2223 }
2224
2225 #define VMX_SEG_ATTRIB_TYPE __BITS(3,0)
2226 #define VMX_SEG_ATTRIB_S __BIT(4)
2227 #define VMX_SEG_ATTRIB_DPL __BITS(6,5)
2228 #define VMX_SEG_ATTRIB_P __BIT(7)
2229 #define VMX_SEG_ATTRIB_AVL __BIT(12)
2230 #define VMX_SEG_ATTRIB_L __BIT(13)
2231 #define VMX_SEG_ATTRIB_DEF __BIT(14)
2232 #define VMX_SEG_ATTRIB_G __BIT(15)
2233 #define VMX_SEG_ATTRIB_UNUSABLE __BIT(16)
2234
2235 static void
2236 vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg *segs, int idx)
2237 {
2238 uint64_t attrib;
2239
2240 attrib =
2241 __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) |
2242 __SHIFTIN(segs[idx].attrib.s, VMX_SEG_ATTRIB_S) |
2243 __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) |
2244 __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) |
2245 __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) |
2246 __SHIFTIN(segs[idx].attrib.l, VMX_SEG_ATTRIB_L) |
2247 __SHIFTIN(segs[idx].attrib.def, VMX_SEG_ATTRIB_DEF) |
2248 __SHIFTIN(segs[idx].attrib.g, VMX_SEG_ATTRIB_G) |
2249 (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0);
2250
2251 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2252 vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector);
2253 vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib);
2254 }
2255 vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit);
2256 vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base);
2257 }
2258
2259 static void
2260 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx)
2261 {
2262 uint64_t selector = 0, attrib = 0, base, limit;
2263
2264 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2265 selector = vmx_vmread(vmx_guest_segs[idx].selector);
2266 attrib = vmx_vmread(vmx_guest_segs[idx].attrib);
2267 }
2268 limit = vmx_vmread(vmx_guest_segs[idx].limit);
2269 base = vmx_vmread(vmx_guest_segs[idx].base);
2270
2271 segs[idx].selector = selector;
2272 segs[idx].limit = limit;
2273 segs[idx].base = base;
2274 segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE);
2275 segs[idx].attrib.s = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_S);
2276 segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL);
2277 segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P);
2278 segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL);
2279 segs[idx].attrib.l = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_L);
2280 segs[idx].attrib.def = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF);
2281 segs[idx].attrib.g = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_G);
2282 if (attrib & VMX_SEG_ATTRIB_UNUSABLE) {
2283 segs[idx].attrib.p = 0;
2284 }
2285 }
2286
2287 static inline bool
2288 vmx_state_tlb_flush(const struct nvmm_x64_state *state, uint64_t flags)
2289 {
2290 uint64_t cr0, cr3, cr4, efer;
2291
2292 if (flags & NVMM_X64_STATE_CRS) {
2293 cr0 = vmx_vmread(VMCS_GUEST_CR0);
2294 if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
2295 return true;
2296 }
2297 cr3 = vmx_vmread(VMCS_GUEST_CR3);
2298 if (cr3 != state->crs[NVMM_X64_CR_CR3]) {
2299 return true;
2300 }
2301 cr4 = vmx_vmread(VMCS_GUEST_CR4);
2302 if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
2303 return true;
2304 }
2305 }
2306
2307 if (flags & NVMM_X64_STATE_MSRS) {
2308 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
2309 if ((efer ^
2310 state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
2311 return true;
2312 }
2313 }
2314
2315 return false;
2316 }
2317
2318 static void
2319 vmx_vcpu_setstate(struct nvmm_cpu *vcpu)
2320 {
2321 struct nvmm_comm_page *comm = vcpu->comm;
2322 const struct nvmm_x64_state *state = &comm->state;
2323 struct vmx_cpudata *cpudata = vcpu->cpudata;
2324 struct fxsave *fpustate;
2325 uint64_t ctls1, intstate;
2326 uint64_t flags;
2327
2328 flags = comm->state_wanted;
2329
2330 vmx_vmcs_enter(vcpu);
2331
2332 if (vmx_state_tlb_flush(state, flags)) {
2333 cpudata->gtlb_want_flush = true;
2334 }
2335
2336 if (flags & NVMM_X64_STATE_SEGS) {
2337 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS);
2338 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS);
2339 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES);
2340 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS);
2341 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS);
2342 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS);
2343 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT);
2344 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT);
2345 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT);
2346 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR);
2347 }
2348
2349 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2350 if (flags & NVMM_X64_STATE_GPRS) {
2351 memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
2352
2353 vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]);
2354 vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]);
2355 vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]);
2356 }
2357
2358 if (flags & NVMM_X64_STATE_CRS) {
2359 /*
2360 * CR0_NE and CR4_VMXE are mandatory.
2361 */
2362 vmx_vmwrite(VMCS_GUEST_CR0,
2363 state->crs[NVMM_X64_CR_CR0] | CR0_NE);
2364 cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2];
2365 vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]); // XXX PDPTE?
2366 vmx_vmwrite(VMCS_GUEST_CR4,
2367 state->crs[NVMM_X64_CR_CR4] | CR4_VMXE);
2368 cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8];
2369
2370 if (vmx_xcr0_mask != 0) {
2371 /* Clear illegal XCR0 bits, set mandatory X87 bit. */
2372 cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
2373 cpudata->gxcr0 &= vmx_xcr0_mask;
2374 cpudata->gxcr0 |= XCR0_X87;
2375 }
2376 }
2377
2378 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2379 if (flags & NVMM_X64_STATE_DRS) {
2380 memcpy(cpudata->drs, state->drs, sizeof(state->drs));
2381
2382 cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF;
2383 vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]);
2384 }
2385
2386 if (flags & NVMM_X64_STATE_MSRS) {
2387 cpudata->gmsr[VMX_MSRLIST_STAR].val =
2388 state->msrs[NVMM_X64_MSR_STAR];
2389 cpudata->gmsr[VMX_MSRLIST_LSTAR].val =
2390 state->msrs[NVMM_X64_MSR_LSTAR];
2391 cpudata->gmsr[VMX_MSRLIST_CSTAR].val =
2392 state->msrs[NVMM_X64_MSR_CSTAR];
2393 cpudata->gmsr[VMX_MSRLIST_SFMASK].val =
2394 state->msrs[NVMM_X64_MSR_SFMASK];
2395 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val =
2396 state->msrs[NVMM_X64_MSR_KERNELGSBASE];
2397
2398 vmx_vmwrite(VMCS_GUEST_IA32_EFER,
2399 state->msrs[NVMM_X64_MSR_EFER]);
2400 vmx_vmwrite(VMCS_GUEST_IA32_PAT,
2401 state->msrs[NVMM_X64_MSR_PAT]);
2402 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS,
2403 state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
2404 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP,
2405 state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
2406 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP,
2407 state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
2408
2409 cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC];
2410 cpudata->gtsc_want_update = true;
2411
2412 /* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */
2413 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
2414 if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) {
2415 ctls1 |= ENTRY_CTLS_LONG_MODE;
2416 } else {
2417 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
2418 }
2419 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
2420 }
2421
2422 if (flags & NVMM_X64_STATE_INTR) {
2423 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2424 intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS);
2425 if (state->intr.int_shadow) {
2426 intstate |= INT_STATE_MOVSS;
2427 }
2428 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate);
2429
2430 if (state->intr.int_window_exiting) {
2431 vmx_event_waitexit_enable(vcpu, false);
2432 } else {
2433 vmx_event_waitexit_disable(vcpu, false);
2434 }
2435
2436 if (state->intr.nmi_window_exiting) {
2437 vmx_event_waitexit_enable(vcpu, true);
2438 } else {
2439 vmx_event_waitexit_disable(vcpu, true);
2440 }
2441 }
2442
2443 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
2444 if (flags & NVMM_X64_STATE_FPU) {
2445 memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
2446 sizeof(state->fpu));
2447
2448 fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
2449 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
2450 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
2451
2452 if (vmx_xcr0_mask != 0) {
2453 /* Reset XSTATE_BV, to force a reload. */
2454 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2455 }
2456 }
2457
2458 vmx_vmcs_leave(vcpu);
2459
2460 comm->state_wanted = 0;
2461 comm->state_cached |= flags;
2462 }
2463
2464 static void
2465 vmx_vcpu_getstate(struct nvmm_cpu *vcpu)
2466 {
2467 struct nvmm_comm_page *comm = vcpu->comm;
2468 struct nvmm_x64_state *state = &comm->state;
2469 struct vmx_cpudata *cpudata = vcpu->cpudata;
2470 uint64_t intstate, flags;
2471
2472 flags = comm->state_wanted;
2473
2474 vmx_vmcs_enter(vcpu);
2475
2476 if (flags & NVMM_X64_STATE_SEGS) {
2477 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS);
2478 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS);
2479 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES);
2480 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS);
2481 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS);
2482 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS);
2483 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT);
2484 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT);
2485 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT);
2486 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR);
2487 }
2488
2489 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2490 if (flags & NVMM_X64_STATE_GPRS) {
2491 memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
2492
2493 state->gprs[NVMM_X64_GPR_RIP] = vmx_vmread(VMCS_GUEST_RIP);
2494 state->gprs[NVMM_X64_GPR_RSP] = vmx_vmread(VMCS_GUEST_RSP);
2495 state->gprs[NVMM_X64_GPR_RFLAGS] = vmx_vmread(VMCS_GUEST_RFLAGS);
2496 }
2497
2498 if (flags & NVMM_X64_STATE_CRS) {
2499 state->crs[NVMM_X64_CR_CR0] = vmx_vmread(VMCS_GUEST_CR0);
2500 state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2;
2501 state->crs[NVMM_X64_CR_CR3] = vmx_vmread(VMCS_GUEST_CR3);
2502 state->crs[NVMM_X64_CR_CR4] = vmx_vmread(VMCS_GUEST_CR4);
2503 state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8;
2504 state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
2505
2506 /* Hide VMXE. */
2507 state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE;
2508 }
2509
2510 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2511 if (flags & NVMM_X64_STATE_DRS) {
2512 memcpy(state->drs, cpudata->drs, sizeof(state->drs));
2513
2514 state->drs[NVMM_X64_DR_DR7] = vmx_vmread(VMCS_GUEST_DR7);
2515 }
2516
2517 if (flags & NVMM_X64_STATE_MSRS) {
2518 state->msrs[NVMM_X64_MSR_STAR] =
2519 cpudata->gmsr[VMX_MSRLIST_STAR].val;
2520 state->msrs[NVMM_X64_MSR_LSTAR] =
2521 cpudata->gmsr[VMX_MSRLIST_LSTAR].val;
2522 state->msrs[NVMM_X64_MSR_CSTAR] =
2523 cpudata->gmsr[VMX_MSRLIST_CSTAR].val;
2524 state->msrs[NVMM_X64_MSR_SFMASK] =
2525 cpudata->gmsr[VMX_MSRLIST_SFMASK].val;
2526 state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
2527 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val;
2528 state->msrs[NVMM_X64_MSR_EFER] =
2529 vmx_vmread(VMCS_GUEST_IA32_EFER);
2530 state->msrs[NVMM_X64_MSR_PAT] =
2531 vmx_vmread(VMCS_GUEST_IA32_PAT);
2532 state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
2533 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS);
2534 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
2535 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP);
2536 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
2537 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP);
2538 state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc;
2539 }
2540
2541 if (flags & NVMM_X64_STATE_INTR) {
2542 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2543 state->intr.int_shadow =
2544 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2545 state->intr.int_window_exiting = cpudata->int_window_exit;
2546 state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
2547 state->intr.evt_pending = cpudata->evt_pending;
2548 }
2549
2550 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
2551 if (flags & NVMM_X64_STATE_FPU) {
2552 memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
2553 sizeof(state->fpu));
2554 }
2555
2556 vmx_vmcs_leave(vcpu);
2557
2558 comm->state_wanted = 0;
2559 comm->state_cached |= flags;
2560 }
2561
2562 static void
2563 vmx_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
2564 {
2565 vcpu->comm->state_wanted = flags;
2566 vmx_vcpu_getstate(vcpu);
2567 }
2568
2569 static void
2570 vmx_vcpu_state_commit(struct nvmm_cpu *vcpu)
2571 {
2572 vcpu->comm->state_wanted = vcpu->comm->state_commit;
2573 vcpu->comm->state_commit = 0;
2574 vmx_vcpu_setstate(vcpu);
2575 }
2576
2577 /* -------------------------------------------------------------------------- */
2578
2579 static void
2580 vmx_asid_alloc(struct nvmm_cpu *vcpu)
2581 {
2582 struct vmx_cpudata *cpudata = vcpu->cpudata;
2583 size_t i, oct, bit;
2584
2585 mutex_enter(&vmx_asidlock);
2586
2587 for (i = 0; i < vmx_maxasid; i++) {
2588 oct = i / 8;
2589 bit = i % 8;
2590
2591 if (vmx_asidmap[oct] & __BIT(bit)) {
2592 continue;
2593 }
2594
2595 cpudata->asid = i;
2596
2597 vmx_asidmap[oct] |= __BIT(bit);
2598 vmx_vmwrite(VMCS_VPID, i);
2599 mutex_exit(&vmx_asidlock);
2600 return;
2601 }
2602
2603 mutex_exit(&vmx_asidlock);
2604
2605 panic("%s: impossible", __func__);
2606 }
2607
2608 static void
2609 vmx_asid_free(struct nvmm_cpu *vcpu)
2610 {
2611 size_t oct, bit;
2612 uint64_t asid;
2613
2614 asid = vmx_vmread(VMCS_VPID);
2615
2616 oct = asid / 8;
2617 bit = asid % 8;
2618
2619 mutex_enter(&vmx_asidlock);
2620 vmx_asidmap[oct] &= ~__BIT(bit);
2621 mutex_exit(&vmx_asidlock);
2622 }
2623
2624 static void
2625 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2626 {
2627 struct vmx_cpudata *cpudata = vcpu->cpudata;
2628 struct vmcs *vmcs = cpudata->vmcs;
2629 struct msr_entry *gmsr = cpudata->gmsr;
2630 extern uint8_t vmx_resume_rip;
2631 uint64_t rev, eptp;
2632
2633 rev = vmx_get_revision();
2634
2635 memset(vmcs, 0, VMCS_SIZE);
2636 vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION);
2637 vmcs->abort = 0;
2638
2639 vmx_vmcs_enter(vcpu);
2640
2641 /* No link pointer. */
2642 vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF);
2643
2644 /* Install the CTLSs. */
2645 vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls);
2646 vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls);
2647 vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2);
2648 vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls);
2649 vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls);
2650
2651 /* Allow direct access to certain MSRs. */
2652 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
2653 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true);
2654 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
2655 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
2656 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
2657 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
2658 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
2659 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
2660 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
2661 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
2662 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
2663 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
2664 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
2665 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_IA32_ARCH_CAPABILITIES,
2666 true, false);
2667 vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa);
2668
2669 /*
2670 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This
2671 * includes the L1D_FLUSH MSR, to mitigate L1TF.
2672 */
2673 gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR;
2674 gmsr[VMX_MSRLIST_STAR].val = 0;
2675 gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR;
2676 gmsr[VMX_MSRLIST_LSTAR].val = 0;
2677 gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR;
2678 gmsr[VMX_MSRLIST_CSTAR].val = 0;
2679 gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK;
2680 gmsr[VMX_MSRLIST_SFMASK].val = 0;
2681 gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE;
2682 gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0;
2683 gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD;
2684 gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH;
2685 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa);
2686 vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa);
2687 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr);
2688 vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR);
2689
2690 /* Force CR0_NW and CR0_CD to zero, CR0_ET to one. */
2691 vmx_vmwrite(VMCS_CR0_MASK, CR0_NW|CR0_CD|CR0_ET);
2692 vmx_vmwrite(VMCS_CR0_SHADOW, CR0_ET);
2693
2694 /* Force CR4_VMXE to zero. */
2695 vmx_vmwrite(VMCS_CR4_MASK, CR4_VMXE);
2696
2697 /* Set the Host state for resuming. */
2698 vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)&vmx_resume_rip);
2699 vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL));
2700 vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2701 vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2702 vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2703 vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0);
2704 vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0);
2705 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0);
2706 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0);
2707 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0);
2708 vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)idt);
2709 vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT));
2710 vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER));
2711 vmx_vmwrite(VMCS_HOST_CR0, rcr0());
2712
2713 /* Generate ASID. */
2714 vmx_asid_alloc(vcpu);
2715
2716 /* Enable Extended Paging, 4-Level. */
2717 eptp =
2718 __SHIFTIN(vmx_eptp_type, EPTP_TYPE) |
2719 __SHIFTIN(4-1, EPTP_WALKLEN) |
2720 (pmap_ept_has_ad ? EPTP_FLAGS_AD : 0) |
2721 mach->vm->vm_map.pmap->pm_pdirpa[0];
2722 vmx_vmwrite(VMCS_EPTP, eptp);
2723
2724 /* Init IA32_MISC_ENABLE. */
2725 cpudata->gmsr_misc_enable = rdmsr(MSR_MISC_ENABLE);
2726 cpudata->gmsr_misc_enable &=
2727 ~(IA32_MISC_PERFMON_EN|IA32_MISC_EISST_EN|IA32_MISC_MWAIT_EN);
2728 cpudata->gmsr_misc_enable |=
2729 (IA32_MISC_BTS_UNAVAIL|IA32_MISC_PEBS_UNAVAIL);
2730
2731 /* Init XSAVE header. */
2732 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2733 cpudata->gfpu.xsh_xcomp_bv = 0;
2734
2735 /* These MSRs are static. */
2736 cpudata->star = rdmsr(MSR_STAR);
2737 cpudata->lstar = rdmsr(MSR_LSTAR);
2738 cpudata->cstar = rdmsr(MSR_CSTAR);
2739 cpudata->sfmask = rdmsr(MSR_SFMASK);
2740
2741 /* Install the RESET state. */
2742 memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
2743 sizeof(nvmm_x86_reset_state));
2744 vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
2745 vcpu->comm->state_cached = 0;
2746 vmx_vcpu_setstate(vcpu);
2747
2748 vmx_vmcs_leave(vcpu);
2749 }
2750
2751 static int
2752 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2753 {
2754 struct vmx_cpudata *cpudata;
2755 int error;
2756
2757 /* Allocate the VMX cpudata. */
2758 cpudata = (struct vmx_cpudata *)uvm_km_alloc(kernel_map,
2759 roundup(sizeof(*cpudata), PAGE_SIZE), 0,
2760 UVM_KMF_WIRED|UVM_KMF_ZERO);
2761 vcpu->cpudata = cpudata;
2762
2763 /* VMCS */
2764 error = vmx_memalloc(&cpudata->vmcs_pa, (vaddr_t *)&cpudata->vmcs,
2765 VMCS_NPAGES);
2766 if (error)
2767 goto error;
2768
2769 /* MSR Bitmap */
2770 error = vmx_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
2771 MSRBM_NPAGES);
2772 if (error)
2773 goto error;
2774
2775 /* Guest MSR List */
2776 error = vmx_memalloc(&cpudata->gmsr_pa, (vaddr_t *)&cpudata->gmsr, 1);
2777 if (error)
2778 goto error;
2779
2780 kcpuset_create(&cpudata->htlb_want_flush, true);
2781
2782 /* Init the VCPU info. */
2783 vmx_vcpu_init(mach, vcpu);
2784
2785 return 0;
2786
2787 error:
2788 if (cpudata->vmcs_pa) {
2789 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
2790 VMCS_NPAGES);
2791 }
2792 if (cpudata->msrbm_pa) {
2793 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
2794 MSRBM_NPAGES);
2795 }
2796 if (cpudata->gmsr_pa) {
2797 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
2798 }
2799
2800 kmem_free(cpudata, sizeof(*cpudata));
2801 return error;
2802 }
2803
2804 static void
2805 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2806 {
2807 struct vmx_cpudata *cpudata = vcpu->cpudata;
2808
2809 vmx_vmcs_enter(vcpu);
2810 vmx_asid_free(vcpu);
2811 vmx_vmcs_destroy(vcpu);
2812
2813 kcpuset_destroy(cpudata->htlb_want_flush);
2814
2815 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, VMCS_NPAGES);
2816 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
2817 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
2818 uvm_km_free(kernel_map, (vaddr_t)cpudata,
2819 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
2820 }
2821
2822 /* -------------------------------------------------------------------------- */
2823
2824 static int
2825 vmx_vcpu_configure_cpuid(struct vmx_cpudata *cpudata, void *data)
2826 {
2827 struct nvmm_vcpu_conf_cpuid *cpuid = data;
2828 size_t i;
2829
2830 if (__predict_false(cpuid->mask && cpuid->exit)) {
2831 return EINVAL;
2832 }
2833 if (__predict_false(cpuid->mask &&
2834 ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
2835 (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
2836 (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
2837 (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
2838 return EINVAL;
2839 }
2840
2841 /* If unset, delete, to restore the default behavior. */
2842 if (!cpuid->mask && !cpuid->exit) {
2843 for (i = 0; i < VMX_NCPUIDS; i++) {
2844 if (!cpudata->cpuidpresent[i]) {
2845 continue;
2846 }
2847 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
2848 cpudata->cpuidpresent[i] = false;
2849 }
2850 }
2851 return 0;
2852 }
2853
2854 /* If already here, replace. */
2855 for (i = 0; i < VMX_NCPUIDS; i++) {
2856 if (!cpudata->cpuidpresent[i]) {
2857 continue;
2858 }
2859 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
2860 memcpy(&cpudata->cpuid[i], cpuid,
2861 sizeof(struct nvmm_vcpu_conf_cpuid));
2862 return 0;
2863 }
2864 }
2865
2866 /* Not here, insert. */
2867 for (i = 0; i < VMX_NCPUIDS; i++) {
2868 if (!cpudata->cpuidpresent[i]) {
2869 cpudata->cpuidpresent[i] = true;
2870 memcpy(&cpudata->cpuid[i], cpuid,
2871 sizeof(struct nvmm_vcpu_conf_cpuid));
2872 return 0;
2873 }
2874 }
2875
2876 return ENOBUFS;
2877 }
2878
2879 static int
2880 vmx_vcpu_configure_tpr(struct vmx_cpudata *cpudata, void *data)
2881 {
2882 struct nvmm_vcpu_conf_tpr *tpr = data;
2883
2884 memcpy(&cpudata->tpr, tpr, sizeof(*tpr));
2885 return 0;
2886 }
2887
2888 static int
2889 vmx_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
2890 {
2891 struct vmx_cpudata *cpudata = vcpu->cpudata;
2892
2893 switch (op) {
2894 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID):
2895 return vmx_vcpu_configure_cpuid(cpudata, data);
2896 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR):
2897 return vmx_vcpu_configure_tpr(cpudata, data);
2898 default:
2899 return EINVAL;
2900 }
2901 }
2902
2903 /* -------------------------------------------------------------------------- */
2904
2905 static void
2906 vmx_tlb_flush(struct pmap *pm)
2907 {
2908 struct nvmm_machine *mach = pm->pm_data;
2909 struct vmx_machdata *machdata = mach->machdata;
2910
2911 atomic_inc_64(&machdata->mach_htlb_gen);
2912
2913 /* Generates IPIs, which cause #VMEXITs. */
2914 pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_UPDATE);
2915 }
2916
2917 static void
2918 vmx_machine_create(struct nvmm_machine *mach)
2919 {
2920 struct pmap *pmap = mach->vm->vm_map.pmap;
2921 struct vmx_machdata *machdata;
2922
2923 /* Convert to EPT. */
2924 pmap_ept_transform(pmap);
2925
2926 /* Fill in pmap info. */
2927 pmap->pm_data = (void *)mach;
2928 pmap->pm_tlb_flush = vmx_tlb_flush;
2929
2930 machdata = kmem_zalloc(sizeof(struct vmx_machdata), KM_SLEEP);
2931 mach->machdata = machdata;
2932
2933 /* Start with an hTLB flush everywhere. */
2934 machdata->mach_htlb_gen = 1;
2935 }
2936
2937 static void
2938 vmx_machine_destroy(struct nvmm_machine *mach)
2939 {
2940 struct vmx_machdata *machdata = mach->machdata;
2941
2942 kmem_free(machdata, sizeof(struct vmx_machdata));
2943 }
2944
2945 static int
2946 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
2947 {
2948 panic("%s: impossible", __func__);
2949 }
2950
2951 /* -------------------------------------------------------------------------- */
2952
2953 #define CTLS_ONE_ALLOWED(msrval, bitoff) \
2954 ((msrval & __BIT(32 + bitoff)) != 0)
2955 #define CTLS_ZERO_ALLOWED(msrval, bitoff) \
2956 ((msrval & __BIT(bitoff)) == 0)
2957
2958 static int
2959 vmx_check_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls, uint64_t set_one)
2960 {
2961 uint64_t basic, val, true_val;
2962 bool has_true;
2963 size_t i;
2964
2965 basic = rdmsr(MSR_IA32_VMX_BASIC);
2966 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
2967
2968 val = rdmsr(msr_ctls);
2969 if (has_true) {
2970 true_val = rdmsr(msr_true_ctls);
2971 } else {
2972 true_val = val;
2973 }
2974
2975 for (i = 0; i < 32; i++) {
2976 if (!(set_one & __BIT(i))) {
2977 continue;
2978 }
2979 if (!CTLS_ONE_ALLOWED(true_val, i)) {
2980 return -1;
2981 }
2982 }
2983
2984 return 0;
2985 }
2986
2987 static int
2988 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls,
2989 uint64_t set_one, uint64_t set_zero, uint64_t *res)
2990 {
2991 uint64_t basic, val, true_val;
2992 bool one_allowed, zero_allowed, has_true;
2993 size_t i;
2994
2995 basic = rdmsr(MSR_IA32_VMX_BASIC);
2996 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
2997
2998 val = rdmsr(msr_ctls);
2999 if (has_true) {
3000 true_val = rdmsr(msr_true_ctls);
3001 } else {
3002 true_val = val;
3003 }
3004
3005 for (i = 0; i < 32; i++) {
3006 one_allowed = CTLS_ONE_ALLOWED(true_val, i);
3007 zero_allowed = CTLS_ZERO_ALLOWED(true_val, i);
3008
3009 if (zero_allowed && !one_allowed) {
3010 if (set_one & __BIT(i))
3011 return -1;
3012 *res &= ~__BIT(i);
3013 } else if (one_allowed && !zero_allowed) {
3014 if (set_zero & __BIT(i))
3015 return -1;
3016 *res |= __BIT(i);
3017 } else {
3018 if (set_zero & __BIT(i)) {
3019 *res &= ~__BIT(i);
3020 } else if (set_one & __BIT(i)) {
3021 *res |= __BIT(i);
3022 } else if (!has_true) {
3023 *res &= ~__BIT(i);
3024 } else if (CTLS_ZERO_ALLOWED(val, i)) {
3025 *res &= ~__BIT(i);
3026 } else if (CTLS_ONE_ALLOWED(val, i)) {
3027 *res |= __BIT(i);
3028 } else {
3029 return -1;
3030 }
3031 }
3032 }
3033
3034 return 0;
3035 }
3036
3037 static bool
3038 vmx_ident(void)
3039 {
3040 uint64_t msr;
3041 int ret;
3042
3043 if (!(cpu_feature[1] & CPUID2_VMX)) {
3044 return false;
3045 }
3046
3047 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
3048 if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) {
3049 printf("NVMM: VMX disabled in BIOS\n");
3050 return false;
3051 }
3052 if ((msr & IA32_FEATURE_CONTROL_OUT_SMX) == 0) {
3053 printf("NVMM: VMX disabled in BIOS\n");
3054 return false;
3055 }
3056
3057 msr = rdmsr(MSR_IA32_VMX_BASIC);
3058 if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) {
3059 printf("NVMM: I/O reporting not supported\n");
3060 return false;
3061 }
3062 if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) {
3063 printf("NVMM: WB memory not supported\n");
3064 return false;
3065 }
3066
3067 /* PG and PE are reported, even if Unrestricted Guests is supported. */
3068 vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE);
3069 vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE);
3070 ret = vmx_check_cr(rcr0(), vmx_cr0_fixed0, vmx_cr0_fixed1);
3071 if (ret == -1) {
3072 printf("NVMM: CR0 requirements not satisfied\n");
3073 return false;
3074 }
3075
3076 vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
3077 vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
3078 ret = vmx_check_cr(rcr4() | CR4_VMXE, vmx_cr4_fixed0, vmx_cr4_fixed1);
3079 if (ret == -1) {
3080 printf("NVMM: CR4 requirements not satisfied\n");
3081 return false;
3082 }
3083
3084 /* Init the CTLSs right now, and check for errors. */
3085 ret = vmx_init_ctls(
3086 MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3087 VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO,
3088 &vmx_pinbased_ctls);
3089 if (ret == -1) {
3090 printf("NVMM: pin-based-ctls requirements not satisfied\n");
3091 return false;
3092 }
3093 ret = vmx_init_ctls(
3094 MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3095 VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO,
3096 &vmx_procbased_ctls);
3097 if (ret == -1) {
3098 printf("NVMM: proc-based-ctls requirements not satisfied\n");
3099 return false;
3100 }
3101 ret = vmx_init_ctls(
3102 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3103 VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO,
3104 &vmx_procbased_ctls2);
3105 if (ret == -1) {
3106 printf("NVMM: proc-based-ctls2 requirements not satisfied\n");
3107 return false;
3108 }
3109 ret = vmx_check_ctls(
3110 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3111 PROC_CTLS2_INVPCID_ENABLE);
3112 if (ret != -1) {
3113 vmx_procbased_ctls2 |= PROC_CTLS2_INVPCID_ENABLE;
3114 }
3115 ret = vmx_init_ctls(
3116 MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3117 VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO,
3118 &vmx_entry_ctls);
3119 if (ret == -1) {
3120 printf("NVMM: entry-ctls requirements not satisfied\n");
3121 return false;
3122 }
3123 ret = vmx_init_ctls(
3124 MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS,
3125 VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO,
3126 &vmx_exit_ctls);
3127 if (ret == -1) {
3128 printf("NVMM: exit-ctls requirements not satisfied\n");
3129 return false;
3130 }
3131
3132 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3133 if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) {
3134 printf("NVMM: 4-level page tree not supported\n");
3135 return false;
3136 }
3137 if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) {
3138 printf("NVMM: INVEPT not supported\n");
3139 return false;
3140 }
3141 if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) {
3142 printf("NVMM: INVVPID not supported\n");
3143 return false;
3144 }
3145 if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) != 0) {
3146 pmap_ept_has_ad = true;
3147 } else {
3148 pmap_ept_has_ad = false;
3149 }
3150 if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) {
3151 printf("NVMM: EPT UC/WB memory types not supported\n");
3152 return false;
3153 }
3154
3155 return true;
3156 }
3157
3158 static void
3159 vmx_init_asid(uint32_t maxasid)
3160 {
3161 size_t allocsz;
3162
3163 mutex_init(&vmx_asidlock, MUTEX_DEFAULT, IPL_NONE);
3164
3165 vmx_maxasid = maxasid;
3166 allocsz = roundup(maxasid, 8) / 8;
3167 vmx_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
3168
3169 /* ASID 0 is reserved for the host. */
3170 vmx_asidmap[0] |= __BIT(0);
3171 }
3172
3173 static void
3174 vmx_change_cpu(void *arg1, void *arg2)
3175 {
3176 struct cpu_info *ci = curcpu();
3177 bool enable = (bool)arg1;
3178 uint64_t cr4;
3179
3180 if (!enable) {
3181 vmx_vmxoff();
3182 }
3183
3184 cr4 = rcr4();
3185 if (enable) {
3186 cr4 |= CR4_VMXE;
3187 } else {
3188 cr4 &= ~CR4_VMXE;
3189 }
3190 lcr4(cr4);
3191
3192 if (enable) {
3193 vmx_vmxon(&vmxoncpu[cpu_index(ci)].pa);
3194 }
3195 }
3196
3197 static void
3198 vmx_init_l1tf(void)
3199 {
3200 u_int descs[4];
3201 uint64_t msr;
3202
3203 if (cpuid_level < 7) {
3204 return;
3205 }
3206
3207 x86_cpuid(7, descs);
3208
3209 if (descs[3] & CPUID_SEF_ARCH_CAP) {
3210 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
3211 if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) {
3212 /* No mitigation needed. */
3213 return;
3214 }
3215 }
3216
3217 if (descs[3] & CPUID_SEF_L1D_FLUSH) {
3218 /* Enable hardware mitigation. */
3219 vmx_msrlist_entry_nmsr += 1;
3220 }
3221 }
3222
3223 static void
3224 vmx_init(void)
3225 {
3226 CPU_INFO_ITERATOR cii;
3227 struct cpu_info *ci;
3228 uint64_t xc, msr;
3229 struct vmxon *vmxon;
3230 uint32_t revision;
3231 paddr_t pa;
3232 vaddr_t va;
3233 int error;
3234
3235 /* Init the ASID bitmap (VPID). */
3236 vmx_init_asid(VPID_MAX);
3237
3238 /* Init the XCR0 mask. */
3239 vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
3240
3241 /* Init the TLB flush op, the EPT flush op and the EPTP type. */
3242 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3243 if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
3244 vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
3245 } else {
3246 vmx_tlb_flush_op = VMX_INVVPID_ALL;
3247 }
3248 if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
3249 vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
3250 } else {
3251 vmx_ept_flush_op = VMX_INVEPT_ALL;
3252 }
3253 if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {
3254 vmx_eptp_type = EPTP_TYPE_WB;
3255 } else {
3256 vmx_eptp_type = EPTP_TYPE_UC;
3257 }
3258
3259 /* Init the L1TF mitigation. */
3260 vmx_init_l1tf();
3261
3262 memset(vmxoncpu, 0, sizeof(vmxoncpu));
3263 revision = vmx_get_revision();
3264
3265 for (CPU_INFO_FOREACH(cii, ci)) {
3266 error = vmx_memalloc(&pa, &va, 1);
3267 if (error) {
3268 panic("%s: out of memory", __func__);
3269 }
3270 vmxoncpu[cpu_index(ci)].pa = pa;
3271 vmxoncpu[cpu_index(ci)].va = va;
3272
3273 vmxon = (struct vmxon *)vmxoncpu[cpu_index(ci)].va;
3274 vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION);
3275 }
3276
3277 xc = xc_broadcast(0, vmx_change_cpu, (void *)true, NULL);
3278 xc_wait(xc);
3279 }
3280
3281 static void
3282 vmx_fini_asid(void)
3283 {
3284 size_t allocsz;
3285
3286 allocsz = roundup(vmx_maxasid, 8) / 8;
3287 kmem_free(vmx_asidmap, allocsz);
3288
3289 mutex_destroy(&vmx_asidlock);
3290 }
3291
3292 static void
3293 vmx_fini(void)
3294 {
3295 uint64_t xc;
3296 size_t i;
3297
3298 xc = xc_broadcast(0, vmx_change_cpu, (void *)false, NULL);
3299 xc_wait(xc);
3300
3301 for (i = 0; i < MAXCPUS; i++) {
3302 if (vmxoncpu[i].pa != 0)
3303 vmx_memfree(vmxoncpu[i].pa, vmxoncpu[i].va, 1);
3304 }
3305
3306 vmx_fini_asid();
3307 }
3308
3309 static void
3310 vmx_capability(struct nvmm_capability *cap)
3311 {
3312 cap->arch.mach_conf_support = 0;
3313 cap->arch.vcpu_conf_support =
3314 NVMM_CAP_ARCH_VCPU_CONF_CPUID |
3315 NVMM_CAP_ARCH_VCPU_CONF_TPR;
3316 cap->arch.xcr0_mask = vmx_xcr0_mask;
3317 cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
3318 cap->arch.conf_cpuid_maxops = VMX_NCPUIDS;
3319 }
3320
3321 const struct nvmm_impl nvmm_x86_vmx = {
3322 .ident = vmx_ident,
3323 .init = vmx_init,
3324 .fini = vmx_fini,
3325 .capability = vmx_capability,
3326 .mach_conf_max = NVMM_X86_MACH_NCONF,
3327 .mach_conf_sizes = NULL,
3328 .vcpu_conf_max = NVMM_X86_VCPU_NCONF,
3329 .vcpu_conf_sizes = vmx_vcpu_conf_sizes,
3330 .state_size = sizeof(struct nvmm_x64_state),
3331 .machine_create = vmx_machine_create,
3332 .machine_destroy = vmx_machine_destroy,
3333 .machine_configure = vmx_machine_configure,
3334 .vcpu_create = vmx_vcpu_create,
3335 .vcpu_destroy = vmx_vcpu_destroy,
3336 .vcpu_configure = vmx_vcpu_configure,
3337 .vcpu_setstate = vmx_vcpu_setstate,
3338 .vcpu_getstate = vmx_vcpu_getstate,
3339 .vcpu_inject = vmx_vcpu_inject,
3340 .vcpu_run = vmx_vcpu_run
3341 };
3342