nvmm_x86_vmx.c revision 1.2 1 /* $NetBSD: nvmm_x86_vmx.c,v 1.2 2019/02/14 09:37:31 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.2 2019/02/14 09:37:31 maxv Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/kmem.h>
39 #include <sys/cpu.h>
40 #include <sys/xcall.h>
41
42 #include <uvm/uvm.h>
43 #include <uvm/uvm_page.h>
44
45 #include <x86/cputypes.h>
46 #include <x86/specialreg.h>
47 #include <x86/pmap.h>
48 #include <x86/dbregs.h>
49 #include <machine/cpuvar.h>
50
51 #include <dev/nvmm/nvmm.h>
52 #include <dev/nvmm/nvmm_internal.h>
53 #include <dev/nvmm/x86/nvmm_x86.h>
54
55 int _vmx_vmxon(paddr_t *pa);
56 int _vmx_vmxoff(void);
57 int _vmx_invept(uint64_t op, void *desc);
58 int _vmx_invvpid(uint64_t op, void *desc);
59 int _vmx_vmread(uint64_t op, uint64_t *val);
60 int _vmx_vmwrite(uint64_t op, uint64_t val);
61 int _vmx_vmptrld(paddr_t *pa);
62 int _vmx_vmptrst(paddr_t *pa);
63 int _vmx_vmclear(paddr_t *pa);
64 int vmx_vmlaunch(uint64_t *gprs);
65 int vmx_vmresume(uint64_t *gprs);
66
67 #define vmx_vmxon(a) \
68 if (__predict_false(_vmx_vmxon(a) != 0)) { \
69 panic("%s: VMXON failed", __func__); \
70 }
71 #define vmx_vmxoff() \
72 if (__predict_false(_vmx_vmxoff() != 0)) { \
73 panic("%s: VMXOFF failed", __func__); \
74 }
75 #define vmx_invept(a, b) \
76 if (__predict_false(_vmx_invept(a, b) != 0)) { \
77 panic("%s: INVEPT failed", __func__); \
78 }
79 #define vmx_invvpid(a, b) \
80 if (__predict_false(_vmx_invvpid(a, b) != 0)) { \
81 panic("%s: INVVPID failed", __func__); \
82 }
83 #define vmx_vmread(a, b) \
84 if (__predict_false(_vmx_vmread(a, b) != 0)) { \
85 panic("%s: VMREAD failed", __func__); \
86 }
87 #define vmx_vmwrite(a, b) \
88 if (__predict_false(_vmx_vmwrite(a, b) != 0)) { \
89 panic("%s: VMWRITE failed", __func__); \
90 }
91 #define vmx_vmptrld(a) \
92 if (__predict_false(_vmx_vmptrld(a) != 0)) { \
93 panic("%s: VMPTRLD failed", __func__); \
94 }
95 #define vmx_vmptrst(a) \
96 if (__predict_false(_vmx_vmptrst(a) != 0)) { \
97 panic("%s: VMPTRST failed", __func__); \
98 }
99 #define vmx_vmclear(a) \
100 if (__predict_false(_vmx_vmclear(a) != 0)) { \
101 panic("%s: VMCLEAR failed", __func__); \
102 }
103
104 #define MSR_IA32_FEATURE_CONTROL 0x003A
105 #define IA32_FEATURE_CONTROL_LOCK __BIT(0)
106 #define IA32_FEATURE_CONTROL_IN_SMX __BIT(1)
107 #define IA32_FEATURE_CONTROL_OUT_SMX __BIT(2)
108
109 #define MSR_IA32_VMX_BASIC 0x0480
110 #define IA32_VMX_BASIC_IDENT __BITS(30,0)
111 #define IA32_VMX_BASIC_DATA_SIZE __BITS(44,32)
112 #define IA32_VMX_BASIC_MEM_WIDTH __BIT(48)
113 #define IA32_VMX_BASIC_DUAL __BIT(49)
114 #define IA32_VMX_BASIC_MEM_TYPE __BITS(53,50)
115 #define MEM_TYPE_UC 0
116 #define MEM_TYPE_WB 6
117 #define IA32_VMX_BASIC_IO_REPORT __BIT(54)
118 #define IA32_VMX_BASIC_TRUE_CTLS __BIT(55)
119
120 #define MSR_IA32_VMX_PINBASED_CTLS 0x0481
121 #define MSR_IA32_VMX_PROCBASED_CTLS 0x0482
122 #define MSR_IA32_VMX_EXIT_CTLS 0x0483
123 #define MSR_IA32_VMX_ENTRY_CTLS 0x0484
124 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x048B
125
126 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x048D
127 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x048E
128 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x048F
129 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x0490
130
131 #define MSR_IA32_VMX_CR0_FIXED0 0x0486
132 #define MSR_IA32_VMX_CR0_FIXED1 0x0487
133 #define MSR_IA32_VMX_CR4_FIXED0 0x0488
134 #define MSR_IA32_VMX_CR4_FIXED1 0x0489
135
136 #define MSR_IA32_VMX_EPT_VPID_CAP 0x048C
137 #define IA32_VMX_EPT_VPID_WALKLENGTH_4 __BIT(6)
138 #define IA32_VMX_EPT_VPID_UC __BIT(8)
139 #define IA32_VMX_EPT_VPID_WB __BIT(14)
140 #define IA32_VMX_EPT_VPID_INVEPT __BIT(20)
141 #define IA32_VMX_EPT_VPID_FLAGS_AD __BIT(21)
142 #define IA32_VMX_EPT_VPID_INVEPT_CONTEXT __BIT(25)
143 #define IA32_VMX_EPT_VPID_INVEPT_ALL __BIT(26)
144 #define IA32_VMX_EPT_VPID_INVVPID __BIT(32)
145 #define IA32_VMX_EPT_VPID_INVVPID_ADDR __BIT(40)
146 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT __BIT(41)
147 #define IA32_VMX_EPT_VPID_INVVPID_ALL __BIT(42)
148 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG __BIT(43)
149
150 /* -------------------------------------------------------------------------- */
151
152 /* 16-bit control fields */
153 #define VMCS_VPID 0x00000000
154 #define VMCS_PIR_VECTOR 0x00000002
155 #define VMCS_EPTP_INDEX 0x00000004
156 /* 16-bit guest-state fields */
157 #define VMCS_GUEST_ES_SELECTOR 0x00000800
158 #define VMCS_GUEST_CS_SELECTOR 0x00000802
159 #define VMCS_GUEST_SS_SELECTOR 0x00000804
160 #define VMCS_GUEST_DS_SELECTOR 0x00000806
161 #define VMCS_GUEST_FS_SELECTOR 0x00000808
162 #define VMCS_GUEST_GS_SELECTOR 0x0000080A
163 #define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
164 #define VMCS_GUEST_TR_SELECTOR 0x0000080E
165 #define VMCS_GUEST_INTR_STATUS 0x00000810
166 #define VMCS_PML_INDEX 0x00000812
167 /* 16-bit host-state fields */
168 #define VMCS_HOST_ES_SELECTOR 0x00000C00
169 #define VMCS_HOST_CS_SELECTOR 0x00000C02
170 #define VMCS_HOST_SS_SELECTOR 0x00000C04
171 #define VMCS_HOST_DS_SELECTOR 0x00000C06
172 #define VMCS_HOST_FS_SELECTOR 0x00000C08
173 #define VMCS_HOST_GS_SELECTOR 0x00000C0A
174 #define VMCS_HOST_TR_SELECTOR 0x00000C0C
175 /* 64-bit control fields */
176 #define VMCS_IO_BITMAP_A 0x00002000
177 #define VMCS_IO_BITMAP_B 0x00002002
178 #define VMCS_MSR_BITMAP 0x00002004
179 #define VMCS_EXIT_MSR_STORE_ADDRESS 0x00002006
180 #define VMCS_EXIT_MSR_LOAD_ADDRESS 0x00002008
181 #define VMCS_ENTRY_MSR_LOAD_ADDRESS 0x0000200A
182 #define VMCS_EXECUTIVE_VMCS 0x0000200C
183 #define VMCS_PML_ADDRESS 0x0000200E
184 #define VMCS_TSC_OFFSET 0x00002010
185 #define VMCS_VIRTUAL_APIC 0x00002012
186 #define VMCS_APIC_ACCESS 0x00002014
187 #define VMCS_PIR_DESC 0x00002016
188 #define VMCS_VM_CONTROL 0x00002018
189 #define VMCS_EPTP 0x0000201A
190 #define EPTP_TYPE __BITS(2,0)
191 #define EPTP_TYPE_UC 0
192 #define EPTP_TYPE_WB 6
193 #define EPTP_WALKLEN __BITS(5,3)
194 #define EPTP_FLAGS_AD __BIT(6)
195 #define EPTP_PHYSADDR __BITS(63,12)
196 #define VMCS_EOI_EXIT0 0x0000201C
197 #define VMCS_EOI_EXIT1 0x0000201E
198 #define VMCS_EOI_EXIT2 0x00002020
199 #define VMCS_EOI_EXIT3 0x00002022
200 #define VMCS_EPTP_LIST 0x00002024
201 #define VMCS_VMREAD_BITMAP 0x00002026
202 #define VMCS_VMWRITE_BITMAP 0x00002028
203 #define VMCS_VIRTUAL_EXCEPTION 0x0000202A
204 #define VMCS_XSS_EXIT_BITMAP 0x0000202C
205 #define VMCS_ENCLS_EXIT_BITMAP 0x0000202E
206 #define VMCS_TSC_MULTIPLIER 0x00002032
207 /* 64-bit read-only fields */
208 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
209 /* 64-bit guest-state fields */
210 #define VMCS_LINK_POINTER 0x00002800
211 #define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
212 #define VMCS_GUEST_IA32_PAT 0x00002804
213 #define VMCS_GUEST_IA32_EFER 0x00002806
214 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
215 #define VMCS_GUEST_PDPTE0 0x0000280A
216 #define VMCS_GUEST_PDPTE1 0x0000280C
217 #define VMCS_GUEST_PDPTE2 0x0000280E
218 #define VMCS_GUEST_PDPTE3 0x00002810
219 #define VMCS_GUEST_BNDCFGS 0x00002812
220 /* 64-bit host-state fields */
221 #define VMCS_HOST_IA32_PAT 0x00002C00
222 #define VMCS_HOST_IA32_EFER 0x00002C02
223 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
224 /* 32-bit control fields */
225 #define VMCS_PINBASED_CTLS 0x00004000
226 #define PIN_CTLS_INT_EXITING __BIT(0)
227 #define PIN_CTLS_NMI_EXITING __BIT(3)
228 #define PIN_CTLS_VIRTUAL_NMIS __BIT(5)
229 #define PIN_CTLS_ACTIVATE_PREEMPT_TIMER __BIT(6)
230 #define PIN_CTLS_PROCESS_POSTEd_INTS __BIT(7)
231 #define VMCS_PROCBASED_CTLS 0x00004002
232 #define PROC_CTLS_INT_WINDOW_EXITING __BIT(2)
233 #define PROC_CTLS_USE_TSC_OFFSETTING __BIT(3)
234 #define PROC_CTLS_HLT_EXITING __BIT(7)
235 #define PROC_CTLS_INVLPG_EXITING __BIT(9)
236 #define PROC_CTLS_MWAIT_EXITING __BIT(10)
237 #define PROC_CTLS_RDPMC_EXITING __BIT(11)
238 #define PROC_CTLS_RDTSC_EXITING __BIT(12)
239 #define PROC_CTLS_RCR3_EXITING __BIT(15)
240 #define PROC_CTLS_LCR3_EXITING __BIT(16)
241 #define PROC_CTLS_RCR8_EXITING __BIT(19)
242 #define PROC_CTLS_LCR8_EXITING __BIT(20)
243 #define PROC_CTLS_USE_TPR_SHADOW __BIT(21)
244 #define PROC_CTLS_NMI_WINDOW_EXITING __BIT(22)
245 #define PROC_CTLS_DR_EXITING __BIT(23)
246 #define PROC_CTLS_UNCOND_IO_EXITING __BIT(24)
247 #define PROC_CTLS_USE_IO_BITMAPS __BIT(25)
248 #define PROC_CTLS_MONITOR_TRAP_FLAG __BIT(27)
249 #define PROC_CTLS_USE_MSR_BITMAPS __BIT(28)
250 #define PROC_CTLS_MONITOR_EXITING __BIT(29)
251 #define PROC_CTLS_PAUSE_EXITING __BIT(30)
252 #define PROC_CTLS_ACTIVATE_CTLS2 __BIT(31)
253 #define VMCS_EXCEPTION_BITMAP 0x00004004
254 #define VMCS_PF_ERROR_MASK 0x00004006
255 #define VMCS_PF_ERROR_MATCH 0x00004008
256 #define VMCS_CR3_TARGET_COUNT 0x0000400A
257 #define VMCS_EXIT_CTLS 0x0000400C
258 #define EXIT_CTLS_SAVE_DEBUG_CONTROLS __BIT(2)
259 #define EXIT_CTLS_HOST_LONG_MODE __BIT(9)
260 #define EXIT_CTLS_LOAD_PERFGLOBALCTRL __BIT(12)
261 #define EXIT_CTLS_ACK_INTERRUPT __BIT(15)
262 #define EXIT_CTLS_SAVE_PAT __BIT(18)
263 #define EXIT_CTLS_LOAD_PAT __BIT(19)
264 #define EXIT_CTLS_SAVE_EFER __BIT(20)
265 #define EXIT_CTLS_LOAD_EFER __BIT(21)
266 #define EXIT_CTLS_SAVE_PREEMPT_TIMER __BIT(22)
267 #define EXIT_CTLS_CLEAR_BNDCFGS __BIT(23)
268 #define EXIT_CTLS_CONCEAL_PT __BIT(24)
269 #define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
270 #define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
271 #define VMCS_ENTRY_CTLS 0x00004012
272 #define ENTRY_CTLS_LOAD_DEBUG_CONTROLS __BIT(2)
273 #define ENTRY_CTLS_LONG_MODE __BIT(9)
274 #define ENTRY_CTLS_SMM __BIT(10)
275 #define ENTRY_CTLS_DISABLE_DUAL __BIT(11)
276 #define ENTRY_CTLS_LOAD_PERFGLOBALCTRL __BIT(13)
277 #define ENTRY_CTLS_LOAD_PAT __BIT(14)
278 #define ENTRY_CTLS_LOAD_EFER __BIT(15)
279 #define ENTRY_CTLS_LOAD_BNDCFGS __BIT(16)
280 #define ENTRY_CTLS_CONCEAL_PT __BIT(17)
281 #define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
282 #define VMCS_ENTRY_INTR_INFO 0x00004016
283 #define INTR_INFO_VECTOR __BITS(7,0)
284 #define INTR_INFO_TYPE_EXT_INT (0 << 8)
285 #define INTR_INFO_TYPE_NMI (2 << 8)
286 #define INTR_INFO_TYPE_HW_EXC (3 << 8)
287 #define INTR_INFO_TYPE_SW_INT (4 << 8)
288 #define INTR_INFO_TYPE_PRIV_SW_EXC (5 << 8)
289 #define INTR_INFO_TYPE_SW_EXC (6 << 8)
290 #define INTR_INFO_TYPE_OTHER (7 << 8)
291 #define INTR_INFO_ERROR __BIT(11)
292 #define INTR_INFO_VALID __BIT(31)
293 #define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
294 #define VMCS_ENTRY_INST_LENGTH 0x0000401A
295 #define VMCS_TPR_THRESHOLD 0x0000401C
296 #define VMCS_PROCBASED_CTLS2 0x0000401E
297 #define PROC_CTLS2_VIRT_APIC_ACCESSES __BIT(0)
298 #define PROC_CTLS2_ENABLE_EPT __BIT(1)
299 #define PROC_CTLS2_DESC_TABLE_EXITING __BIT(2)
300 #define PROC_CTLS2_ENABLE_RDTSCP __BIT(3)
301 #define PROC_CTLS2_VIRT_X2APIC __BIT(4)
302 #define PROC_CTLS2_ENABLE_VPID __BIT(5)
303 #define PROC_CTLS2_WBINVD_EXITING __BIT(6)
304 #define PROC_CTLS2_UNRESTRICTED_GUEST __BIT(7)
305 #define PROC_CTLS2_APIC_REG_VIRT __BIT(8)
306 #define PROC_CTLS2_VIRT_INT_DELIVERY __BIT(9)
307 #define PROC_CTLS2_PAUSE_LOOP_EXITING __BIT(10)
308 #define PROC_CTLS2_RDRAND_EXITING __BIT(11)
309 #define PROC_CTLS2_INVPCID_ENABLE __BIT(12)
310 #define PROC_CTLS2_VMFUNC_ENABLE __BIT(13)
311 #define PROC_CTLS2_VMCS_SHADOWING __BIT(14)
312 #define PROC_CTLS2_ENCLS_EXITING __BIT(15)
313 #define PROC_CTLS2_RDSEED_EXITING __BIT(16)
314 #define PROC_CTLS2_PML_ENABLE __BIT(17)
315 #define PROC_CTLS2_EPT_VIOLATION __BIT(18)
316 #define PROC_CTLS2_CONCEAL_VMX_FROM_PT __BIT(19)
317 #define PROC_CTLS2_XSAVES_ENABLE __BIT(20)
318 #define PROC_CTLS2_MODE_BASED_EXEC_EPT __BIT(22)
319 #define PROC_CTLS2_USE_TSC_SCALING __BIT(25)
320 #define VMCS_PLE_GAP 0x00004020
321 #define VMCS_PLE_WINDOW 0x00004022
322 /* 32-bit read-only data fields */
323 #define VMCS_INSTRUCTION_ERROR 0x00004400
324 #define VMCS_EXIT_REASON 0x00004402
325 #define VMCS_EXIT_INTR_INFO 0x00004404
326 #define VMCS_EXIT_INTR_ERRCODE 0x00004406
327 #define VMCS_IDT_VECTORING_INFO 0x00004408
328 #define VMCS_IDT_VECTORING_ERROR 0x0000440A
329 #define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
330 #define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
331 /* 32-bit guest-state fields */
332 #define VMCS_GUEST_ES_LIMIT 0x00004800
333 #define VMCS_GUEST_CS_LIMIT 0x00004802
334 #define VMCS_GUEST_SS_LIMIT 0x00004804
335 #define VMCS_GUEST_DS_LIMIT 0x00004806
336 #define VMCS_GUEST_FS_LIMIT 0x00004808
337 #define VMCS_GUEST_GS_LIMIT 0x0000480A
338 #define VMCS_GUEST_LDTR_LIMIT 0x0000480C
339 #define VMCS_GUEST_TR_LIMIT 0x0000480E
340 #define VMCS_GUEST_GDTR_LIMIT 0x00004810
341 #define VMCS_GUEST_IDTR_LIMIT 0x00004812
342 #define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
343 #define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
344 #define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
345 #define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
346 #define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
347 #define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
348 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
349 #define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
350 #define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
351 #define INT_STATE_STI __BIT(0)
352 #define INT_STATE_MOVSS __BIT(1)
353 #define INT_STATE_SMI __BIT(2)
354 #define INT_STATE_NMI __BIT(3)
355 #define INT_STATE_ENCLAVE __BIT(4)
356 #define VMCS_GUEST_ACTIVITY 0x00004826
357 #define VMCS_GUEST_SMBASE 0x00004828
358 #define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
359 #define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
360 /* 32-bit host state fields */
361 #define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
362 /* Natural-Width control fields */
363 #define VMCS_CR0_MASK 0x00006000
364 #define VMCS_CR4_MASK 0x00006002
365 #define VMCS_CR0_SHADOW 0x00006004
366 #define VMCS_CR4_SHADOW 0x00006006
367 #define VMCS_CR3_TARGET0 0x00006008
368 #define VMCS_CR3_TARGET1 0x0000600A
369 #define VMCS_CR3_TARGET2 0x0000600C
370 #define VMCS_CR3_TARGET3 0x0000600E
371 /* Natural-Width read-only fields */
372 #define VMCS_EXIT_QUALIFICATION 0x00006400
373 #define VMCS_IO_RCX 0x00006402
374 #define VMCS_IO_RSI 0x00006404
375 #define VMCS_IO_RDI 0x00006406
376 #define VMCS_IO_RIP 0x00006408
377 #define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
378 /* Natural-Width guest-state fields */
379 #define VMCS_GUEST_CR0 0x00006800
380 #define VMCS_GUEST_CR3 0x00006802
381 #define VMCS_GUEST_CR4 0x00006804
382 #define VMCS_GUEST_ES_BASE 0x00006806
383 #define VMCS_GUEST_CS_BASE 0x00006808
384 #define VMCS_GUEST_SS_BASE 0x0000680A
385 #define VMCS_GUEST_DS_BASE 0x0000680C
386 #define VMCS_GUEST_FS_BASE 0x0000680E
387 #define VMCS_GUEST_GS_BASE 0x00006810
388 #define VMCS_GUEST_LDTR_BASE 0x00006812
389 #define VMCS_GUEST_TR_BASE 0x00006814
390 #define VMCS_GUEST_GDTR_BASE 0x00006816
391 #define VMCS_GUEST_IDTR_BASE 0x00006818
392 #define VMCS_GUEST_DR7 0x0000681A
393 #define VMCS_GUEST_RSP 0x0000681C
394 #define VMCS_GUEST_RIP 0x0000681E
395 #define VMCS_GUEST_RFLAGS 0x00006820
396 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
397 #define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
398 #define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
399 /* Natural-Width host-state fields */
400 #define VMCS_HOST_CR0 0x00006C00
401 #define VMCS_HOST_CR3 0x00006C02
402 #define VMCS_HOST_CR4 0x00006C04
403 #define VMCS_HOST_FS_BASE 0x00006C06
404 #define VMCS_HOST_GS_BASE 0x00006C08
405 #define VMCS_HOST_TR_BASE 0x00006C0A
406 #define VMCS_HOST_GDTR_BASE 0x00006C0C
407 #define VMCS_HOST_IDTR_BASE 0x00006C0E
408 #define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
409 #define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
410 #define VMCS_HOST_RSP 0x00006C14
411 #define VMCS_HOST_RIP 0x00006c16
412
413 /* VMX basic exit reasons. */
414 #define VMCS_EXITCODE_EXC_NMI 0
415 #define VMCS_EXITCODE_EXT_INT 1
416 #define VMCS_EXITCODE_SHUTDOWN 2
417 #define VMCS_EXITCODE_INIT 3
418 #define VMCS_EXITCODE_SIPI 4
419 #define VMCS_EXITCODE_SMI 5
420 #define VMCS_EXITCODE_OTHER_SMI 6
421 #define VMCS_EXITCODE_INT_WINDOW 7
422 #define VMCS_EXITCODE_NMI_WINDOW 8
423 #define VMCS_EXITCODE_TASK_SWITCH 9
424 #define VMCS_EXITCODE_CPUID 10
425 #define VMCS_EXITCODE_GETSEC 11
426 #define VMCS_EXITCODE_HLT 12
427 #define VMCS_EXITCODE_INVD 13
428 #define VMCS_EXITCODE_INVLPG 14
429 #define VMCS_EXITCODE_RDPMC 15
430 #define VMCS_EXITCODE_RDTSC 16
431 #define VMCS_EXITCODE_RSM 17
432 #define VMCS_EXITCODE_VMCALL 18
433 #define VMCS_EXITCODE_VMCLEAR 19
434 #define VMCS_EXITCODE_VMLAUNCH 20
435 #define VMCS_EXITCODE_VMPTRLD 21
436 #define VMCS_EXITCODE_VMPTRST 22
437 #define VMCS_EXITCODE_VMREAD 23
438 #define VMCS_EXITCODE_VMRESUME 24
439 #define VMCS_EXITCODE_VMWRITE 25
440 #define VMCS_EXITCODE_VMXOFF 26
441 #define VMCS_EXITCODE_VMXON 27
442 #define VMCS_EXITCODE_CR 28
443 #define VMCS_EXITCODE_DR 29
444 #define VMCS_EXITCODE_IO 30
445 #define VMCS_EXITCODE_RDMSR 31
446 #define VMCS_EXITCODE_WRMSR 32
447 #define VMCS_EXITCODE_FAIL_GUEST_INVALID 33
448 #define VMCS_EXITCODE_FAIL_MSR_INVALID 34
449 #define VMCS_EXITCODE_MWAIT 36
450 #define VMCS_EXITCODE_TRAP_FLAG 37
451 #define VMCS_EXITCODE_MONITOR 39
452 #define VMCS_EXITCODE_PAUSE 40
453 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK 41
454 #define VMCS_EXITCODE_TPR_BELOW 43
455 #define VMCS_EXITCODE_APIC_ACCESS 44
456 #define VMCS_EXITCODE_VEOI 45
457 #define VMCS_EXITCODE_GDTR_IDTR 46
458 #define VMCS_EXITCODE_LDTR_TR 47
459 #define VMCS_EXITCODE_EPT_VIOLATION 48
460 #define VMCS_EXITCODE_EPT_MISCONFIG 49
461 #define VMCS_EXITCODE_INVEPT 50
462 #define VMCS_EXITCODE_RDTSCP 51
463 #define VMCS_EXITCODE_PREEMPT_TIMEOUT 52
464 #define VMCS_EXITCODE_INVVPID 53
465 #define VMCS_EXITCODE_WBINVD 54
466 #define VMCS_EXITCODE_XSETBV 55
467 #define VMCS_EXITCODE_APIC_WRITE 56
468 #define VMCS_EXITCODE_RDRAND 57
469 #define VMCS_EXITCODE_INVPCID 58
470 #define VMCS_EXITCODE_VMFUNC 59
471 #define VMCS_EXITCODE_ENCLS 60
472 #define VMCS_EXITCODE_RDSEED 61
473 #define VMCS_EXITCODE_PAGE_LOG_FULL 62
474 #define VMCS_EXITCODE_XSAVES 63
475 #define VMCS_EXITCODE_XRSTORS 64
476
477 /* -------------------------------------------------------------------------- */
478
479 #define VMX_MSRLIST_STAR 0
480 #define VMX_MSRLIST_LSTAR 1
481 #define VMX_MSRLIST_CSTAR 2
482 #define VMX_MSRLIST_SFMASK 3
483 #define VMX_MSRLIST_KERNELGSBASE 4
484 #define VMX_MSRLIST_EXIT_NMSR 5
485 #define VMX_MSRLIST_L1DFLUSH 5
486
487 /* On entry, we may do +1 to include L1DFLUSH. */
488 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR;
489
490 struct vmxon {
491 uint32_t ident;
492 #define VMXON_IDENT_REVISION __BITS(30,0)
493
494 uint8_t data[PAGE_SIZE - 4];
495 } __packed;
496
497 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE);
498
499 struct vmxoncpu {
500 vaddr_t va;
501 paddr_t pa;
502 };
503
504 static struct vmxoncpu vmxoncpu[MAXCPUS];
505
506 struct vmcs {
507 uint32_t ident;
508 #define VMCS_IDENT_REVISION __BITS(30,0)
509 #define VMCS_IDENT_SHADOW __BIT(31)
510
511 uint32_t abort;
512 uint8_t data[PAGE_SIZE - 8];
513 } __packed;
514
515 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
516
517 struct msr_entry {
518 uint32_t msr;
519 uint32_t rsvd;
520 uint64_t val;
521 } __packed;
522
523 struct ept_desc {
524 uint64_t eptp;
525 uint64_t mbz;
526 } __packed;
527
528 struct vpid_desc {
529 uint64_t vpid;
530 uint64_t addr;
531 } __packed;
532
533 #define VPID_MAX 0xFFFF
534
535 /* Make sure we never run out of VPIDs. */
536 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS);
537
538 static uint64_t vmx_tlb_flush_op __read_mostly;
539 static uint64_t vmx_ept_flush_op __read_mostly;
540 static uint64_t vmx_eptp_type __read_mostly;
541
542 static uint64_t vmx_pinbased_ctls __read_mostly;
543 static uint64_t vmx_procbased_ctls __read_mostly;
544 static uint64_t vmx_procbased_ctls2 __read_mostly;
545 static uint64_t vmx_entry_ctls __read_mostly;
546 static uint64_t vmx_exit_ctls __read_mostly;
547
548 static uint64_t vmx_cr0_fixed0 __read_mostly;
549 static uint64_t vmx_cr0_fixed1 __read_mostly;
550 static uint64_t vmx_cr4_fixed0 __read_mostly;
551 static uint64_t vmx_cr4_fixed1 __read_mostly;
552
553 #define VMX_PINBASED_CTLS_ONE \
554 (PIN_CTLS_INT_EXITING| \
555 PIN_CTLS_NMI_EXITING| \
556 PIN_CTLS_VIRTUAL_NMIS)
557
558 #define VMX_PINBASED_CTLS_ZERO 0
559
560 #define VMX_PROCBASED_CTLS_ONE \
561 (PROC_CTLS_USE_TSC_OFFSETTING| \
562 PROC_CTLS_HLT_EXITING| \
563 PROC_CTLS_MWAIT_EXITING | \
564 PROC_CTLS_RDPMC_EXITING | \
565 PROC_CTLS_RCR8_EXITING | \
566 PROC_CTLS_LCR8_EXITING | \
567 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \
568 PROC_CTLS_USE_MSR_BITMAPS | \
569 PROC_CTLS_MONITOR_EXITING | \
570 PROC_CTLS_ACTIVATE_CTLS2)
571
572 #define VMX_PROCBASED_CTLS_ZERO \
573 (PROC_CTLS_RCR3_EXITING| \
574 PROC_CTLS_LCR3_EXITING)
575
576 #define VMX_PROCBASED_CTLS2_ONE \
577 (PROC_CTLS2_ENABLE_EPT| \
578 PROC_CTLS2_ENABLE_VPID| \
579 PROC_CTLS2_UNRESTRICTED_GUEST)
580
581 #define VMX_PROCBASED_CTLS2_ZERO 0
582
583 #define VMX_ENTRY_CTLS_ONE \
584 (ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \
585 ENTRY_CTLS_LOAD_EFER| \
586 ENTRY_CTLS_LOAD_PAT)
587
588 #define VMX_ENTRY_CTLS_ZERO \
589 (ENTRY_CTLS_SMM| \
590 ENTRY_CTLS_DISABLE_DUAL)
591
592 #define VMX_EXIT_CTLS_ONE \
593 (EXIT_CTLS_SAVE_DEBUG_CONTROLS| \
594 EXIT_CTLS_HOST_LONG_MODE| \
595 EXIT_CTLS_SAVE_PAT| \
596 EXIT_CTLS_LOAD_PAT| \
597 EXIT_CTLS_SAVE_EFER| \
598 EXIT_CTLS_LOAD_EFER)
599
600 #define VMX_EXIT_CTLS_ZERO 0
601
602 static uint8_t *vmx_asidmap __read_mostly;
603 static uint32_t vmx_maxasid __read_mostly;
604 static kmutex_t vmx_asidlock __cacheline_aligned;
605
606 #define VMX_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE)
607 static uint64_t vmx_xcr0_mask __read_mostly;
608
609 #define VMX_NCPUIDS 32
610
611 #define VMCS_NPAGES 1
612 #define VMCS_SIZE (VMCS_NPAGES * PAGE_SIZE)
613
614 #define MSRBM_NPAGES 1
615 #define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE)
616
617 #define EFER_TLB_FLUSH \
618 (EFER_NXE|EFER_LMA|EFER_LME)
619 #define CR0_TLB_FLUSH \
620 (CR0_PG|CR0_WP|CR0_CD|CR0_NW)
621 #define CR4_TLB_FLUSH \
622 (CR4_PGE|CR4_PAE|CR4_PSE)
623
624 /* -------------------------------------------------------------------------- */
625
626 struct vmx_machdata {
627 bool cpuidpresent[VMX_NCPUIDS];
628 struct nvmm_x86_conf_cpuid cpuid[VMX_NCPUIDS];
629 kcpuset_t *ept_want_flush;
630 };
631
632 static const size_t vmx_conf_sizes[NVMM_X86_NCONF] = {
633 [NVMM_X86_CONF_CPUID] = sizeof(struct nvmm_x86_conf_cpuid)
634 };
635
636 struct vmx_cpudata {
637 /* General */
638 uint64_t asid;
639 bool tlb_want_flush;
640
641 /* VMCS */
642 struct vmcs *vmcs;
643 paddr_t vmcs_pa;
644 size_t vmcs_refcnt;
645
646 /* MSR bitmap */
647 uint8_t *msrbm;
648 paddr_t msrbm_pa;
649
650 /* Host state */
651 uint64_t hxcr0;
652 uint64_t star;
653 uint64_t lstar;
654 uint64_t cstar;
655 uint64_t sfmask;
656 uint64_t kernelgsbase;
657 bool ts_set;
658 struct xsave_header hfpu __aligned(64);
659
660 /* Event state */
661 bool int_window_exit;
662 bool nmi_window_exit;
663
664 /* Guest state */
665 struct msr_entry *gmsr;
666 paddr_t gmsr_pa;
667 uint64_t gcr2;
668 uint64_t gcr8;
669 uint64_t gxcr0;
670 uint64_t gprs[NVMM_X64_NGPR];
671 uint64_t drs[NVMM_X64_NDR];
672 uint64_t tsc_offset;
673 struct xsave_header gfpu __aligned(64);
674 };
675
676 static const struct {
677 uint64_t selector;
678 uint64_t attrib;
679 uint64_t limit;
680 uint64_t base;
681 } vmx_guest_segs[NVMM_X64_NSEG] = {
682 [NVMM_X64_SEG_ES] = {
683 VMCS_GUEST_ES_SELECTOR,
684 VMCS_GUEST_ES_ACCESS_RIGHTS,
685 VMCS_GUEST_ES_LIMIT,
686 VMCS_GUEST_ES_BASE
687 },
688 [NVMM_X64_SEG_CS] = {
689 VMCS_GUEST_CS_SELECTOR,
690 VMCS_GUEST_CS_ACCESS_RIGHTS,
691 VMCS_GUEST_CS_LIMIT,
692 VMCS_GUEST_CS_BASE
693 },
694 [NVMM_X64_SEG_SS] = {
695 VMCS_GUEST_SS_SELECTOR,
696 VMCS_GUEST_SS_ACCESS_RIGHTS,
697 VMCS_GUEST_SS_LIMIT,
698 VMCS_GUEST_SS_BASE
699 },
700 [NVMM_X64_SEG_DS] = {
701 VMCS_GUEST_DS_SELECTOR,
702 VMCS_GUEST_DS_ACCESS_RIGHTS,
703 VMCS_GUEST_DS_LIMIT,
704 VMCS_GUEST_DS_BASE
705 },
706 [NVMM_X64_SEG_FS] = {
707 VMCS_GUEST_FS_SELECTOR,
708 VMCS_GUEST_FS_ACCESS_RIGHTS,
709 VMCS_GUEST_FS_LIMIT,
710 VMCS_GUEST_FS_BASE
711 },
712 [NVMM_X64_SEG_GS] = {
713 VMCS_GUEST_GS_SELECTOR,
714 VMCS_GUEST_GS_ACCESS_RIGHTS,
715 VMCS_GUEST_GS_LIMIT,
716 VMCS_GUEST_GS_BASE
717 },
718 [NVMM_X64_SEG_GDT] = {
719 0, /* doesn't exist */
720 0, /* doesn't exist */
721 VMCS_GUEST_GDTR_LIMIT,
722 VMCS_GUEST_GDTR_BASE
723 },
724 [NVMM_X64_SEG_IDT] = {
725 0, /* doesn't exist */
726 0, /* doesn't exist */
727 VMCS_GUEST_IDTR_LIMIT,
728 VMCS_GUEST_IDTR_BASE
729 },
730 [NVMM_X64_SEG_LDT] = {
731 VMCS_GUEST_LDTR_SELECTOR,
732 VMCS_GUEST_LDTR_ACCESS_RIGHTS,
733 VMCS_GUEST_LDTR_LIMIT,
734 VMCS_GUEST_LDTR_BASE
735 },
736 [NVMM_X64_SEG_TR] = {
737 VMCS_GUEST_TR_SELECTOR,
738 VMCS_GUEST_TR_ACCESS_RIGHTS,
739 VMCS_GUEST_TR_LIMIT,
740 VMCS_GUEST_TR_BASE
741 }
742 };
743
744 /* -------------------------------------------------------------------------- */
745
746 static uint64_t
747 vmx_get_revision(void)
748 {
749 uint64_t msr;
750
751 msr = rdmsr(MSR_IA32_VMX_BASIC);
752 msr &= IA32_VMX_BASIC_IDENT;
753
754 return msr;
755 }
756
757 static void
758 vmx_vmcs_enter(struct nvmm_cpu *vcpu)
759 {
760 struct vmx_cpudata *cpudata = vcpu->cpudata;
761 paddr_t oldpa __diagused;
762
763 cpudata->vmcs_refcnt++;
764 if (cpudata->vmcs_refcnt > 1) {
765 #ifdef DIAGNOSTIC
766 KASSERT(kpreempt_disabled());
767 vmx_vmptrst(&oldpa);
768 KASSERT(oldpa == cpudata->vmcs_pa);
769 #endif
770 return;
771 }
772
773 kpreempt_disable();
774
775 #ifdef DIAGNOSTIC
776 vmx_vmptrst(&oldpa);
777 KASSERT(oldpa == 0xFFFFFFFFFFFFFFFF);
778 #endif
779
780 vmx_vmptrld(&cpudata->vmcs_pa);
781 }
782
783 static void
784 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
785 {
786 struct vmx_cpudata *cpudata = vcpu->cpudata;
787 paddr_t oldpa __diagused;
788
789 KASSERT(kpreempt_disabled());
790 KASSERT(cpudata->vmcs_refcnt > 0);
791 cpudata->vmcs_refcnt--;
792
793 if (cpudata->vmcs_refcnt > 0) {
794 #ifdef DIAGNOSTIC
795 vmx_vmptrst(&oldpa);
796 KASSERT(oldpa == cpudata->vmcs_pa);
797 #endif
798 return;
799 }
800
801 vmx_vmclear(&cpudata->vmcs_pa);
802 kpreempt_enable();
803 }
804
805 /* -------------------------------------------------------------------------- */
806
807 static void
808 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
809 {
810 struct vmx_cpudata *cpudata = vcpu->cpudata;
811 uint64_t ctls1;
812
813 vmx_vmread(VMCS_PROCBASED_CTLS, &ctls1);
814
815 if (nmi) {
816 // XXX INT_STATE_NMI?
817 ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING;
818 cpudata->nmi_window_exit = true;
819 } else {
820 ctls1 |= PROC_CTLS_INT_WINDOW_EXITING;
821 cpudata->int_window_exit = true;
822 }
823
824 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
825 }
826
827 static void
828 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
829 {
830 struct vmx_cpudata *cpudata = vcpu->cpudata;
831 uint64_t ctls1;
832
833 vmx_vmread(VMCS_PROCBASED_CTLS, &ctls1);
834
835 if (nmi) {
836 ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
837 cpudata->nmi_window_exit = false;
838 } else {
839 ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING;
840 cpudata->int_window_exit = false;
841 }
842
843 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
844 }
845
846 static inline int
847 vmx_event_has_error(uint64_t vector)
848 {
849 switch (vector) {
850 case 8: /* #DF */
851 case 10: /* #TS */
852 case 11: /* #NP */
853 case 12: /* #SS */
854 case 13: /* #GP */
855 case 14: /* #PF */
856 case 17: /* #AC */
857 case 30: /* #SX */
858 return 1;
859 default:
860 return 0;
861 }
862 }
863
864 static int
865 vmx_vcpu_inject(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
866 struct nvmm_event *event)
867 {
868 struct vmx_cpudata *cpudata = vcpu->cpudata;
869 int type = 0, err = 0, ret = 0;
870 uint64_t info, intstate, rflags;
871
872 if (event->vector >= 256) {
873 return EINVAL;
874 }
875
876 vmx_vmcs_enter(vcpu);
877
878 switch (event->type) {
879 case NVMM_EVENT_INTERRUPT_HW:
880 type = INTR_INFO_TYPE_EXT_INT;
881 if (event->vector == 2) {
882 type = INTR_INFO_TYPE_NMI;
883 }
884 vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
885 if (type == INTR_INFO_TYPE_NMI) {
886 if (cpudata->nmi_window_exit) {
887 ret = EAGAIN;
888 goto out;
889 }
890 vmx_event_waitexit_enable(vcpu, true);
891 } else {
892 vmx_vmread(VMCS_GUEST_RFLAGS, &rflags);
893 if ((rflags & PSL_I) == 0 ||
894 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0) {
895 vmx_event_waitexit_enable(vcpu, false);
896 ret = EAGAIN;
897 goto out;
898 }
899 }
900 err = 0;
901 break;
902 case NVMM_EVENT_INTERRUPT_SW:
903 ret = EINVAL;
904 goto out;
905 case NVMM_EVENT_EXCEPTION:
906 if (event->vector == 2 || event->vector >= 32) {
907 ret = EINVAL;
908 goto out;
909 }
910 if (event->vector == 3 || event->vector == 0) {
911 ret = EINVAL;
912 goto out;
913 }
914 type = INTR_INFO_TYPE_HW_EXC;
915 err = vmx_event_has_error(event->vector);
916 break;
917 default:
918 ret = EAGAIN;
919 goto out;
920 }
921
922 info =
923 __SHIFTIN(event->vector, INTR_INFO_VECTOR) |
924 type |
925 __SHIFTIN(err, INTR_INFO_ERROR) |
926 __SHIFTIN(1, INTR_INFO_VALID);
927 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
928 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, event->u.error);
929
930 out:
931 vmx_vmcs_leave(vcpu);
932 return ret;
933 }
934
935 static void
936 vmx_inject_ud(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
937 {
938 struct nvmm_event event;
939 int ret __diagused;
940
941 event.type = NVMM_EVENT_EXCEPTION;
942 event.vector = 6;
943 event.u.error = 0;
944
945 ret = vmx_vcpu_inject(mach, vcpu, &event);
946 KASSERT(ret == 0);
947 }
948
949 static void
950 vmx_inject_gp(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
951 {
952 struct nvmm_event event;
953 int ret __diagused;
954
955 event.type = NVMM_EVENT_EXCEPTION;
956 event.vector = 13;
957 event.u.error = 0;
958
959 ret = vmx_vcpu_inject(mach, vcpu, &event);
960 KASSERT(ret == 0);
961 }
962
963 static inline void
964 vmx_inkernel_advance(void)
965 {
966 uint64_t rip, inslen, intstate;
967
968 /*
969 * Maybe we should also apply single-stepping and debug exceptions.
970 * Matters for guest-ring3, because it can execute 'cpuid' under a
971 * debugger.
972 */
973 vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH, &inslen);
974 vmx_vmread(VMCS_GUEST_RIP, &rip);
975 vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
976 vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
977 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
978 intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
979 }
980
981 static void
982 vmx_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx)
983 {
984 struct vmx_cpudata *cpudata = vcpu->cpudata;
985
986 switch (eax) {
987 case 0x00000001:
988 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
989 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
990 CPUID_LOCAL_APIC_ID);
991 cpudata->gprs[NVMM_X64_GPR_RCX] &=
992 ~(CPUID2_VMX|CPUID2_SMX|CPUID2_EST|CPUID2_TM2|CPUID2_PDCM|
993 CPUID2_PCID|CPUID2_DEADLINE);
994 cpudata->gprs[NVMM_X64_GPR_RDX] &=
995 ~(CPUID_DS|CPUID_ACPI|CPUID_TM);
996 break;
997 case 0x00000005:
998 case 0x00000006:
999 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1000 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1001 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1002 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1003 break;
1004 case 0x00000007:
1005 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_SEF_INVPCID;
1006 cpudata->gprs[NVMM_X64_GPR_RDX] &=
1007 ~(CPUID_SEF_IBRS|CPUID_SEF_STIBP|CPUID_SEF_L1D_FLUSH|
1008 CPUID_SEF_SSBD);
1009 break;
1010 case 0x0000000D:
1011 if (ecx != 0 || vmx_xcr0_mask == 0) {
1012 break;
1013 }
1014 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF;
1015 if (cpudata->gxcr0 & XCR0_SSE) {
1016 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
1017 } else {
1018 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
1019 }
1020 cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
1021 cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave);
1022 cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32;
1023 break;
1024 case 0x40000000:
1025 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1026 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1027 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1028 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
1029 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
1030 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
1031 break;
1032 case 0x80000001:
1033 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~CPUID_RDTSCP;
1034 break;
1035 default:
1036 break;
1037 }
1038 }
1039
1040 static void
1041 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1042 struct nvmm_exit *exit)
1043 {
1044 struct vmx_machdata *machdata = mach->machdata;
1045 struct vmx_cpudata *cpudata = vcpu->cpudata;
1046 struct nvmm_x86_conf_cpuid *cpuid;
1047 uint64_t eax, ecx;
1048 u_int descs[4];
1049 size_t i;
1050
1051 eax = cpudata->gprs[NVMM_X64_GPR_RAX];
1052 ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
1053 x86_cpuid2(eax, ecx, descs);
1054
1055 cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0];
1056 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
1057 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
1058 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
1059
1060 for (i = 0; i < VMX_NCPUIDS; i++) {
1061 cpuid = &machdata->cpuid[i];
1062 if (!machdata->cpuidpresent[i]) {
1063 continue;
1064 }
1065 if (cpuid->leaf != eax) {
1066 continue;
1067 }
1068
1069 /* del */
1070 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->del.eax;
1071 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->del.ebx;
1072 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->del.ecx;
1073 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->del.edx;
1074
1075 /* set */
1076 cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->set.eax;
1077 cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->set.ebx;
1078 cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->set.ecx;
1079 cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->set.edx;
1080
1081 break;
1082 }
1083
1084 /* Overwrite non-tunable leaves. */
1085 vmx_inkernel_handle_cpuid(vcpu, eax, ecx);
1086
1087 vmx_inkernel_advance();
1088 exit->reason = NVMM_EXIT_NONE;
1089 }
1090
1091 static void
1092 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1093 struct nvmm_exit *exit)
1094 {
1095 struct vmx_cpudata *cpudata = vcpu->cpudata;
1096 uint64_t rflags;
1097
1098 if (cpudata->int_window_exit) {
1099 vmx_vmread(VMCS_GUEST_RFLAGS, &rflags);
1100 if (rflags & PSL_I) {
1101 vmx_event_waitexit_disable(vcpu, false);
1102 }
1103 }
1104
1105 vmx_inkernel_advance();
1106 exit->reason = NVMM_EXIT_HALTED;
1107 }
1108
1109 #define VMX_QUAL_CR_NUM __BITS(3,0)
1110 #define VMX_QUAL_CR_TYPE __BITS(5,4)
1111 #define CR_TYPE_WRITE 0
1112 #define CR_TYPE_READ 1
1113 #define CR_TYPE_CLTS 2
1114 #define CR_TYPE_LMSW 3
1115 #define VMX_QUAL_CR_LMSW_OPMEM __BIT(6)
1116 #define VMX_QUAL_CR_GPR __BITS(11,8)
1117 #define VMX_QUAL_CR_LMSW_SRC __BIT(31,16)
1118
1119 static inline int
1120 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1)
1121 {
1122 /* Bits set to 1 in fixed0 are fixed to 1. */
1123 if ((crval & fixed0) != fixed0) {
1124 return -1;
1125 }
1126 /* Bits set to 0 in fixed1 are fixed to 0. */
1127 if (crval & ~fixed1) {
1128 return -1;
1129 }
1130 return 0;
1131 }
1132
1133 static int
1134 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1135 uint64_t qual)
1136 {
1137 struct vmx_cpudata *cpudata = vcpu->cpudata;
1138 uint64_t type, gpr, cr0;
1139
1140 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1141 if (type != CR_TYPE_WRITE) {
1142 return -1;
1143 }
1144
1145 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1146 KASSERT(gpr < 16);
1147
1148 if (gpr == NVMM_X64_GPR_RSP) {
1149 vmx_vmread(VMCS_GUEST_RSP, &gpr);
1150 } else {
1151 gpr = cpudata->gprs[gpr];
1152 }
1153
1154 cr0 = gpr | CR0_NE | CR0_ET;
1155 cr0 &= ~(CR0_NW|CR0_CD);
1156
1157 if (vmx_check_cr(cr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) {
1158 return -1;
1159 }
1160
1161 vmx_vmwrite(VMCS_GUEST_CR0, cr0);
1162 vmx_inkernel_advance();
1163 return 0;
1164 }
1165
1166 static int
1167 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1168 uint64_t qual)
1169 {
1170 struct vmx_cpudata *cpudata = vcpu->cpudata;
1171 uint64_t type, gpr, cr4;
1172
1173 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1174 if (type != CR_TYPE_WRITE) {
1175 return -1;
1176 }
1177
1178 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1179 KASSERT(gpr < 16);
1180
1181 if (gpr == NVMM_X64_GPR_RSP) {
1182 vmx_vmread(VMCS_GUEST_RSP, &gpr);
1183 } else {
1184 gpr = cpudata->gprs[gpr];
1185 }
1186
1187 cr4 = gpr | CR4_VMXE;
1188
1189 if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) {
1190 return -1;
1191 }
1192
1193 vmx_vmwrite(VMCS_GUEST_CR4, cr4);
1194 vmx_inkernel_advance();
1195 return 0;
1196 }
1197
1198 static int
1199 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1200 uint64_t qual)
1201 {
1202 struct vmx_cpudata *cpudata = vcpu->cpudata;
1203 uint64_t type, gpr;
1204 bool write;
1205
1206 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1207 if (type == CR_TYPE_WRITE) {
1208 write = true;
1209 } else if (type == CR_TYPE_READ) {
1210 write = false;
1211 } else {
1212 return -1;
1213 }
1214
1215 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1216 KASSERT(gpr < 16);
1217
1218 if (write) {
1219 if (gpr == NVMM_X64_GPR_RSP) {
1220 vmx_vmread(VMCS_GUEST_RSP, &cpudata->gcr8);
1221 } else {
1222 cpudata->gcr8 = cpudata->gprs[gpr];
1223 }
1224 } else {
1225 if (gpr == NVMM_X64_GPR_RSP) {
1226 vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8);
1227 } else {
1228 cpudata->gprs[gpr] = cpudata->gcr8;
1229 }
1230 }
1231
1232 vmx_inkernel_advance();
1233 return 0;
1234 }
1235
1236 static void
1237 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1238 struct nvmm_exit *exit)
1239 {
1240 uint64_t qual;
1241 int ret;
1242
1243 vmx_vmread(VMCS_EXIT_QUALIFICATION, &qual);
1244
1245 switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) {
1246 case 0:
1247 ret = vmx_inkernel_handle_cr0(mach, vcpu, qual);
1248 break;
1249 case 4:
1250 ret = vmx_inkernel_handle_cr4(mach, vcpu, qual);
1251 break;
1252 case 8:
1253 ret = vmx_inkernel_handle_cr8(mach, vcpu, qual);
1254 break;
1255 default:
1256 ret = -1;
1257 break;
1258 }
1259
1260 if (ret == -1) {
1261 vmx_inject_gp(mach, vcpu);
1262 }
1263
1264 exit->reason = NVMM_EXIT_NONE;
1265 }
1266
1267 #define VMX_QUAL_IO_SIZE __BITS(2,0)
1268 #define IO_SIZE_8 0
1269 #define IO_SIZE_16 1
1270 #define IO_SIZE_32 3
1271 #define VMX_QUAL_IO_IN __BIT(3)
1272 #define VMX_QUAL_IO_STR __BIT(4)
1273 #define VMX_QUAL_IO_REP __BIT(5)
1274 #define VMX_QUAL_IO_DX __BIT(6)
1275 #define VMX_QUAL_IO_PORT __BITS(31,16)
1276
1277 #define VMX_INFO_IO_ADRSIZE __BITS(9,7)
1278 #define IO_ADRSIZE_16 0
1279 #define IO_ADRSIZE_32 1
1280 #define IO_ADRSIZE_64 2
1281 #define VMX_INFO_IO_SEG __BITS(17,15)
1282
1283 static const int seg_to_nvmm[] = {
1284 [0] = NVMM_X64_SEG_ES,
1285 [1] = NVMM_X64_SEG_CS,
1286 [2] = NVMM_X64_SEG_SS,
1287 [3] = NVMM_X64_SEG_DS,
1288 [4] = NVMM_X64_SEG_FS,
1289 [5] = NVMM_X64_SEG_GS
1290 };
1291
1292 static void
1293 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1294 struct nvmm_exit *exit)
1295 {
1296 uint64_t qual, info, inslen, rip;
1297
1298 vmx_vmread(VMCS_EXIT_QUALIFICATION, &qual);
1299 vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO, &info);
1300
1301 exit->reason = NVMM_EXIT_IO;
1302
1303 if (qual & VMX_QUAL_IO_IN) {
1304 exit->u.io.type = NVMM_EXIT_IO_IN;
1305 } else {
1306 exit->u.io.type = NVMM_EXIT_IO_OUT;
1307 }
1308
1309 exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
1310
1311 KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
1312 exit->u.io.seg = seg_to_nvmm[__SHIFTOUT(info, VMX_INFO_IO_SEG)];
1313
1314 if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) {
1315 exit->u.io.address_size = 8;
1316 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) {
1317 exit->u.io.address_size = 4;
1318 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) {
1319 exit->u.io.address_size = 2;
1320 }
1321
1322 if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) {
1323 exit->u.io.operand_size = 4;
1324 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) {
1325 exit->u.io.operand_size = 2;
1326 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) {
1327 exit->u.io.operand_size = 1;
1328 }
1329
1330 exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
1331 exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
1332
1333 if ((exit->u.io.type == NVMM_EXIT_IO_IN) && exit->u.io.str) {
1334 exit->u.io.seg = NVMM_X64_SEG_ES;
1335 }
1336
1337 vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH, &inslen);
1338 vmx_vmread(VMCS_GUEST_RIP, &rip);
1339 exit->u.io.npc = rip + inslen;
1340 }
1341
1342 static const uint64_t msr_ignore_list[] = {
1343 MSR_BIOS_SIGN,
1344 MSR_IA32_PLATFORM_ID
1345 };
1346
1347 static bool
1348 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1349 struct nvmm_exit *exit)
1350 {
1351 struct vmx_cpudata *cpudata = vcpu->cpudata;
1352 uint64_t val;
1353 size_t i;
1354
1355 switch (exit->u.msr.type) {
1356 case NVMM_EXIT_MSR_RDMSR:
1357 if (exit->u.msr.msr == MSR_CR_PAT) {
1358 vmx_vmread(VMCS_GUEST_IA32_PAT, &val);
1359 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1360 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1361 goto handled;
1362 }
1363 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1364 if (msr_ignore_list[i] != exit->u.msr.msr)
1365 continue;
1366 val = 0;
1367 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1368 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1369 goto handled;
1370 }
1371 break;
1372 case NVMM_EXIT_MSR_WRMSR:
1373 if (exit->u.msr.msr == MSR_CR_PAT) {
1374 vmx_vmwrite(VMCS_GUEST_IA32_PAT, exit->u.msr.val);
1375 goto handled;
1376 }
1377 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1378 if (msr_ignore_list[i] != exit->u.msr.msr)
1379 continue;
1380 goto handled;
1381 }
1382 break;
1383 }
1384
1385 return false;
1386
1387 handled:
1388 vmx_inkernel_advance();
1389 return true;
1390 }
1391
1392 static void
1393 vmx_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1394 struct nvmm_exit *exit, bool rdmsr)
1395 {
1396 struct vmx_cpudata *cpudata = vcpu->cpudata;
1397 uint64_t inslen, rip;
1398
1399 if (rdmsr) {
1400 exit->u.msr.type = NVMM_EXIT_MSR_RDMSR;
1401 } else {
1402 exit->u.msr.type = NVMM_EXIT_MSR_WRMSR;
1403 }
1404
1405 exit->u.msr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1406
1407 if (rdmsr) {
1408 exit->u.msr.val = 0;
1409 } else {
1410 uint64_t rdx, rax;
1411 rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
1412 rax = cpudata->gprs[NVMM_X64_GPR_RAX];
1413 exit->u.msr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
1414 }
1415
1416 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1417 exit->reason = NVMM_EXIT_NONE;
1418 return;
1419 }
1420
1421 exit->reason = NVMM_EXIT_MSR;
1422 vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH, &inslen);
1423 vmx_vmread(VMCS_GUEST_RIP, &rip);
1424 exit->u.msr.npc = rip + inslen;
1425 }
1426
1427 static void
1428 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1429 struct nvmm_exit *exit)
1430 {
1431 struct vmx_cpudata *cpudata = vcpu->cpudata;
1432 uint16_t val;
1433 uint64_t ss;
1434
1435 exit->reason = NVMM_EXIT_NONE;
1436
1437 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
1438 (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
1439
1440 vmx_vmread(VMCS_GUEST_SS_SELECTOR, &ss);
1441
1442 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
1443 goto error;
1444 } else if (__predict_false((ss & SEL_UPL) != 0)) {
1445 goto error;
1446 } else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
1447 goto error;
1448 } else if (__predict_false((val & XCR0_X87) == 0)) {
1449 goto error;
1450 }
1451
1452 cpudata->gxcr0 = val;
1453
1454 vmx_inkernel_advance();
1455 return;
1456
1457 error:
1458 vmx_inject_gp(mach, vcpu);
1459 }
1460
1461 #define VMX_EPT_VIOLATION_READ __BIT(0)
1462 #define VMX_EPT_VIOLATION_WRITE __BIT(1)
1463 #define VMX_EPT_VIOLATION_EXECUTE __BIT(2)
1464
1465 static void
1466 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1467 struct nvmm_exit *exit)
1468 {
1469 uint64_t perm;
1470 gpaddr_t gpa;
1471 int error;
1472
1473 vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS, &gpa);
1474
1475 error = uvm_fault(&mach->vm->vm_map, gpa, VM_PROT_ALL);
1476
1477 if (error) {
1478 exit->reason = NVMM_EXIT_MEMORY;
1479 vmx_vmread(VMCS_EXIT_QUALIFICATION, &perm);
1480 if (perm & VMX_EPT_VIOLATION_WRITE)
1481 exit->u.mem.perm = NVMM_EXIT_MEMORY_WRITE;
1482 else if (perm & VMX_EPT_VIOLATION_EXECUTE)
1483 exit->u.mem.perm = NVMM_EXIT_MEMORY_EXEC;
1484 else
1485 exit->u.mem.perm = NVMM_EXIT_MEMORY_READ;
1486 exit->u.mem.gpa = gpa;
1487 exit->u.mem.inst_len = 0;
1488 } else {
1489 exit->reason = NVMM_EXIT_NONE;
1490 }
1491 }
1492
1493 static void
1494 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
1495 {
1496 struct vmx_cpudata *cpudata = vcpu->cpudata;
1497
1498 cpudata->ts_set = (rcr0() & CR0_TS) != 0;
1499
1500 fpu_area_save(&cpudata->hfpu, vmx_xcr0_mask);
1501 fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask);
1502
1503 if (vmx_xcr0_mask != 0) {
1504 cpudata->hxcr0 = rdxcr(0);
1505 wrxcr(0, cpudata->gxcr0);
1506 }
1507 }
1508
1509 static void
1510 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
1511 {
1512 struct vmx_cpudata *cpudata = vcpu->cpudata;
1513
1514 if (vmx_xcr0_mask != 0) {
1515 cpudata->gxcr0 = rdxcr(0);
1516 wrxcr(0, cpudata->hxcr0);
1517 }
1518
1519 fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask);
1520 fpu_area_restore(&cpudata->hfpu, vmx_xcr0_mask);
1521
1522 if (cpudata->ts_set) {
1523 stts();
1524 }
1525 }
1526
1527 static void
1528 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
1529 {
1530 struct vmx_cpudata *cpudata = vcpu->cpudata;
1531
1532 x86_dbregs_save(curlwp);
1533
1534 ldr7(0);
1535
1536 ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
1537 ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
1538 ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
1539 ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
1540 ldr6(cpudata->drs[NVMM_X64_DR_DR6]);
1541 }
1542
1543 static void
1544 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
1545 {
1546 struct vmx_cpudata *cpudata = vcpu->cpudata;
1547
1548 cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
1549 cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
1550 cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
1551 cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
1552 cpudata->drs[NVMM_X64_DR_DR6] = rdr6();
1553
1554 x86_dbregs_restore(curlwp);
1555 }
1556
1557 static void
1558 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
1559 {
1560 struct vmx_cpudata *cpudata = vcpu->cpudata;
1561
1562 /* This gets restored automatically by the CPU. */
1563 vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE));
1564 vmx_vmwrite(VMCS_HOST_CR3, rcr3());
1565 vmx_vmwrite(VMCS_HOST_CR4, rcr4());
1566
1567 /* Note: MSR_LSTAR is not static, because of SVS. */
1568 cpudata->lstar = rdmsr(MSR_LSTAR);
1569 cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
1570 }
1571
1572 static void
1573 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
1574 {
1575 struct vmx_cpudata *cpudata = vcpu->cpudata;
1576
1577 wrmsr(MSR_STAR, cpudata->star);
1578 wrmsr(MSR_LSTAR, cpudata->lstar);
1579 wrmsr(MSR_CSTAR, cpudata->cstar);
1580 wrmsr(MSR_SFMASK, cpudata->sfmask);
1581 wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
1582 }
1583
1584 #define VMX_INVVPID_ADDRESS 0
1585 #define VMX_INVVPID_CONTEXT 1
1586 #define VMX_INVVPID_ALL 2
1587 #define VMX_INVVPID_CONTEXT_NOGLOBAL 3
1588
1589 #define VMX_INVEPT_CONTEXT 1
1590 #define VMX_INVEPT_ALL 2
1591
1592 static int
1593 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1594 struct nvmm_exit *exit)
1595 {
1596 struct vmx_machdata *machdata = mach->machdata;
1597 struct vmx_cpudata *cpudata = vcpu->cpudata;
1598 bool tlb_need_flush = false;
1599 struct vpid_desc vpid_desc;
1600 struct ept_desc ept_desc;
1601 struct cpu_info *ci;
1602 uint64_t exitcode;
1603 uint64_t intstate;
1604 int hcpu, s, ret;
1605 bool launched = false;
1606
1607 vmx_vmcs_enter(vcpu);
1608 ci = curcpu();
1609 hcpu = cpu_number();
1610
1611 if (__predict_false(kcpuset_isset(machdata->ept_want_flush, hcpu))) {
1612 vmx_vmread(VMCS_EPTP, &ept_desc.eptp);
1613 ept_desc.mbz = 0;
1614 vmx_invept(vmx_ept_flush_op, &ept_desc);
1615 kcpuset_clear(machdata->ept_want_flush, hcpu);
1616 }
1617
1618 if (vcpu->hcpu_last != hcpu) {
1619 tlb_need_flush = true;
1620 }
1621
1622 if (vcpu->hcpu_last != hcpu) {
1623 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel);
1624 vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss);
1625 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt);
1626 vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
1627 vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->tsc_offset +
1628 curcpu()->ci_data.cpu_cc_skew);
1629 vcpu->hcpu_last = hcpu;
1630 }
1631
1632 vmx_vcpu_guest_dbregs_enter(vcpu);
1633 vmx_vcpu_guest_misc_enter(vcpu);
1634
1635 while (1) {
1636 if (cpudata->tlb_want_flush || tlb_need_flush) {
1637 vpid_desc.vpid = cpudata->asid;
1638 vpid_desc.addr = 0;
1639 vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
1640 cpudata->tlb_want_flush = false;
1641 tlb_need_flush = false;
1642 }
1643
1644 s = splhigh();
1645 vmx_vcpu_guest_fpu_enter(vcpu);
1646 lcr2(cpudata->gcr2);
1647 if (launched) {
1648 ret = vmx_vmresume(cpudata->gprs);
1649 } else {
1650 ret = vmx_vmlaunch(cpudata->gprs);
1651 }
1652 cpudata->gcr2 = rcr2();
1653 vmx_vcpu_guest_fpu_leave(vcpu);
1654 splx(s);
1655
1656 if (__predict_false(ret != 0)) {
1657 exit->reason = NVMM_EXIT_INVALID;
1658 break;
1659 }
1660
1661 launched = true;
1662
1663 vmx_vmread(VMCS_EXIT_REASON, &exitcode);
1664 exitcode &= __BITS(15,0);
1665
1666 switch (exitcode) {
1667 case VMCS_EXITCODE_EXT_INT:
1668 exit->reason = NVMM_EXIT_NONE;
1669 break;
1670 case VMCS_EXITCODE_CPUID:
1671 vmx_exit_cpuid(mach, vcpu, exit);
1672 break;
1673 case VMCS_EXITCODE_HLT:
1674 vmx_exit_hlt(mach, vcpu, exit);
1675 break;
1676 case VMCS_EXITCODE_CR:
1677 vmx_exit_cr(mach, vcpu, exit);
1678 break;
1679 case VMCS_EXITCODE_IO:
1680 vmx_exit_io(mach, vcpu, exit);
1681 break;
1682 case VMCS_EXITCODE_RDMSR:
1683 vmx_exit_msr(mach, vcpu, exit, true);
1684 break;
1685 case VMCS_EXITCODE_WRMSR:
1686 vmx_exit_msr(mach, vcpu, exit, false);
1687 break;
1688 case VMCS_EXITCODE_SHUTDOWN:
1689 exit->reason = NVMM_EXIT_SHUTDOWN;
1690 break;
1691 case VMCS_EXITCODE_MONITOR:
1692 exit->reason = NVMM_EXIT_MONITOR;
1693 break;
1694 case VMCS_EXITCODE_MWAIT:
1695 exit->reason = NVMM_EXIT_MWAIT;
1696 break;
1697 case VMCS_EXITCODE_XSETBV:
1698 vmx_exit_xsetbv(mach, vcpu, exit);
1699 break;
1700 case VMCS_EXITCODE_RDPMC:
1701 case VMCS_EXITCODE_RDTSCP:
1702 case VMCS_EXITCODE_INVVPID:
1703 case VMCS_EXITCODE_INVEPT:
1704 case VMCS_EXITCODE_VMCALL:
1705 case VMCS_EXITCODE_VMCLEAR:
1706 case VMCS_EXITCODE_VMLAUNCH:
1707 case VMCS_EXITCODE_VMPTRLD:
1708 case VMCS_EXITCODE_VMPTRST:
1709 case VMCS_EXITCODE_VMREAD:
1710 case VMCS_EXITCODE_VMRESUME:
1711 case VMCS_EXITCODE_VMWRITE:
1712 case VMCS_EXITCODE_VMXOFF:
1713 case VMCS_EXITCODE_VMXON:
1714 vmx_inject_ud(mach, vcpu);
1715 exit->reason = NVMM_EXIT_NONE;
1716 break;
1717 case VMCS_EXITCODE_EPT_VIOLATION:
1718 vmx_exit_epf(mach, vcpu, exit);
1719 break;
1720 case VMCS_EXITCODE_INT_WINDOW:
1721 vmx_event_waitexit_disable(vcpu, false);
1722 exit->reason = NVMM_EXIT_INT_READY;
1723 break;
1724 case VMCS_EXITCODE_NMI_WINDOW:
1725 vmx_event_waitexit_disable(vcpu, true);
1726 exit->reason = NVMM_EXIT_NMI_READY;
1727 break;
1728 default:
1729 exit->reason = NVMM_EXIT_INVALID;
1730 break;
1731 }
1732
1733 /* If no reason to return to userland, keep rolling. */
1734 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
1735 break;
1736 }
1737 if (curcpu()->ci_data.cpu_softints != 0) {
1738 break;
1739 }
1740 if (curlwp->l_flag & LW_USERRET) {
1741 break;
1742 }
1743 if (exit->reason != NVMM_EXIT_NONE) {
1744 break;
1745 }
1746 }
1747
1748 vmx_vcpu_guest_misc_leave(vcpu);
1749 vmx_vcpu_guest_dbregs_leave(vcpu);
1750
1751 exit->exitstate[NVMM_X64_EXITSTATE_CR8] = cpudata->gcr8;
1752 vmx_vmread(VMCS_GUEST_RFLAGS,
1753 &exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS]);
1754 vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
1755 exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] =
1756 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
1757 exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] =
1758 cpudata->int_window_exit;
1759 exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT] =
1760 cpudata->nmi_window_exit;
1761
1762 vmx_vmcs_leave(vcpu);
1763
1764 return 0;
1765 }
1766
1767 /* -------------------------------------------------------------------------- */
1768
1769 static int
1770 vmx_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
1771 {
1772 struct pglist pglist;
1773 paddr_t _pa;
1774 vaddr_t _va;
1775 size_t i;
1776 int ret;
1777
1778 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
1779 &pglist, 1, 0);
1780 if (ret != 0)
1781 return ENOMEM;
1782 _pa = TAILQ_FIRST(&pglist)->phys_addr;
1783 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
1784 UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
1785 if (_va == 0)
1786 goto error;
1787
1788 for (i = 0; i < npages; i++) {
1789 pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
1790 VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
1791 }
1792 pmap_update(pmap_kernel());
1793
1794 memset((void *)_va, 0, npages * PAGE_SIZE);
1795
1796 *pa = _pa;
1797 *va = _va;
1798 return 0;
1799
1800 error:
1801 for (i = 0; i < npages; i++) {
1802 uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
1803 }
1804 return ENOMEM;
1805 }
1806
1807 static void
1808 vmx_memfree(paddr_t pa, vaddr_t va, size_t npages)
1809 {
1810 size_t i;
1811
1812 pmap_kremove(va, npages * PAGE_SIZE);
1813 pmap_update(pmap_kernel());
1814 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
1815 for (i = 0; i < npages; i++) {
1816 uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
1817 }
1818 }
1819
1820 /* -------------------------------------------------------------------------- */
1821
1822 static void
1823 vmx_asid_alloc(struct nvmm_cpu *vcpu)
1824 {
1825 struct vmx_cpudata *cpudata = vcpu->cpudata;
1826 size_t i, oct, bit;
1827
1828 mutex_enter(&vmx_asidlock);
1829
1830 for (i = 0; i < vmx_maxasid; i++) {
1831 oct = i / 8;
1832 bit = i % 8;
1833
1834 if (vmx_asidmap[oct] & __BIT(bit)) {
1835 continue;
1836 }
1837
1838 cpudata->asid = i;
1839
1840 vmx_asidmap[oct] |= __BIT(bit);
1841 vmx_vmwrite(VMCS_VPID, i);
1842 mutex_exit(&vmx_asidlock);
1843 return;
1844 }
1845
1846 mutex_exit(&vmx_asidlock);
1847
1848 panic("%s: impossible", __func__);
1849 }
1850
1851 static void
1852 vmx_asid_free(struct nvmm_cpu *vcpu)
1853 {
1854 size_t oct, bit;
1855 uint64_t asid;
1856
1857 vmx_vmread(VMCS_VPID, &asid);
1858
1859 oct = asid / 8;
1860 bit = asid % 8;
1861
1862 mutex_enter(&vmx_asidlock);
1863 vmx_asidmap[oct] &= ~__BIT(bit);
1864 mutex_exit(&vmx_asidlock);
1865 }
1866
1867 static void
1868 vmx_init_asid(uint32_t maxasid)
1869 {
1870 size_t allocsz;
1871
1872 mutex_init(&vmx_asidlock, MUTEX_DEFAULT, IPL_NONE);
1873
1874 vmx_maxasid = maxasid;
1875 allocsz = roundup(maxasid, 8) / 8;
1876 vmx_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
1877
1878 /* ASID 0 is reserved for the host. */
1879 vmx_asidmap[0] |= __BIT(0);
1880 }
1881
1882 static void
1883 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
1884 {
1885 uint64_t byte;
1886 uint8_t bitoff;
1887
1888 if (msr < 0x00002000) {
1889 /* Range 1 */
1890 byte = ((msr - 0x00000000) / 8) + 0;
1891 } else if (msr >= 0xC0000000 && msr < 0xC0002000) {
1892 /* Range 2 */
1893 byte = ((msr - 0xC0000000) / 8) + 1024;
1894 } else {
1895 panic("%s: wrong range", __func__);
1896 }
1897
1898 bitoff = (msr & 0x7);
1899
1900 if (read) {
1901 bitmap[byte] &= ~__BIT(bitoff);
1902 }
1903 if (write) {
1904 bitmap[2048 + byte] &= ~__BIT(bitoff);
1905 }
1906 }
1907
1908 static void
1909 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
1910 {
1911 struct vmx_cpudata *cpudata = vcpu->cpudata;
1912 struct vmcs *vmcs = cpudata->vmcs;
1913 struct msr_entry *gmsr = cpudata->gmsr;
1914 extern uint8_t vmx_resume_rip;
1915 uint64_t rev, eptp;
1916
1917 rev = vmx_get_revision();
1918
1919 memset(vmcs, 0, VMCS_SIZE);
1920 vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION);
1921 vmcs->abort = 0;
1922
1923 vmx_vmcs_enter(vcpu);
1924
1925 /* No link pointer. */
1926 vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF);
1927
1928 /* Install the CTLSs. */
1929 vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls);
1930 vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls);
1931 vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2);
1932 vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls);
1933 vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls);
1934
1935 /* Allow direct access to certain MSRs. */
1936 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
1937 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true);
1938 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
1939 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
1940 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
1941 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
1942 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
1943 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
1944 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
1945 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
1946 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
1947 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
1948 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
1949 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_IA32_ARCH_CAPABILITIES,
1950 true, false);
1951 vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa);
1952
1953 /*
1954 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This
1955 * includes the L1D_FLUSH MSR, to mitigate L1TF.
1956 */
1957 gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR;
1958 gmsr[VMX_MSRLIST_STAR].val = 0;
1959 gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR;
1960 gmsr[VMX_MSRLIST_LSTAR].val = 0;
1961 gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR;
1962 gmsr[VMX_MSRLIST_CSTAR].val = 0;
1963 gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK;
1964 gmsr[VMX_MSRLIST_SFMASK].val = 0;
1965 gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE;
1966 gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0;
1967 gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD;
1968 gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH;
1969 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa);
1970 vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa);
1971 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr);
1972 vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR);
1973
1974 /* Force CR0_NW and CR0_CD to zero, CR0_ET to one. */
1975 vmx_vmwrite(VMCS_CR0_MASK, CR0_NW|CR0_CD);
1976 vmx_vmwrite(VMCS_CR0_SHADOW, CR0_ET);
1977
1978 /* Force CR4_VMXE to zero. */
1979 vmx_vmwrite(VMCS_CR4_MASK, CR4_VMXE);
1980
1981 /* Set the Host state for resuming. */
1982 vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)&vmx_resume_rip);
1983 vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL));
1984 vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
1985 vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
1986 vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
1987 vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0);
1988 vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0);
1989 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0);
1990 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0);
1991 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0);
1992 vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)idt);
1993 vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT));
1994 vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER));
1995 vmx_vmwrite(VMCS_HOST_CR0, rcr0());
1996
1997 /* Generate ASID. */
1998 vmx_asid_alloc(vcpu);
1999
2000 /* Enable Extended Paging, 4-Level. */
2001 eptp =
2002 __SHIFTIN(vmx_eptp_type, EPTP_TYPE) |
2003 __SHIFTIN(4-1, EPTP_WALKLEN) |
2004 EPTP_FLAGS_AD |
2005 mach->vm->vm_map.pmap->pm_pdirpa[0];
2006 vmx_vmwrite(VMCS_EPTP, eptp);
2007
2008 /* Must always be set. */
2009 vmx_vmwrite(VMCS_GUEST_CR4, CR4_VMXE);
2010 vmx_vmwrite(VMCS_GUEST_CR0, CR0_NE);
2011 cpudata->gxcr0 = XCR0_X87;
2012
2013 /* Init XSAVE header. */
2014 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2015 cpudata->gfpu.xsh_xcomp_bv = 0;
2016
2017 /* Bluntly hide the host TSC. */
2018 cpudata->tsc_offset = rdtsc();
2019
2020 /* These MSRs are static. */
2021 cpudata->star = rdmsr(MSR_STAR);
2022 cpudata->cstar = rdmsr(MSR_CSTAR);
2023 cpudata->sfmask = rdmsr(MSR_SFMASK);
2024
2025 vmx_vmcs_leave(vcpu);
2026 }
2027
2028 static int
2029 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2030 {
2031 struct vmx_cpudata *cpudata;
2032 int error;
2033
2034 /* Allocate the VMX cpudata. */
2035 cpudata = (struct vmx_cpudata *)uvm_km_alloc(kernel_map,
2036 roundup(sizeof(*cpudata), PAGE_SIZE), 0,
2037 UVM_KMF_WIRED|UVM_KMF_ZERO);
2038 vcpu->cpudata = cpudata;
2039
2040 /* VMCS */
2041 error = vmx_memalloc(&cpudata->vmcs_pa, (vaddr_t *)&cpudata->vmcs,
2042 VMCS_NPAGES);
2043 if (error)
2044 goto error;
2045
2046 /* MSR Bitmap */
2047 error = vmx_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
2048 MSRBM_NPAGES);
2049 if (error)
2050 goto error;
2051
2052 /* Guest MSR List */
2053 error = vmx_memalloc(&cpudata->gmsr_pa, (vaddr_t *)&cpudata->gmsr, 1);
2054 if (error)
2055 goto error;
2056
2057 /* Init the VCPU info. */
2058 vmx_vcpu_init(mach, vcpu);
2059
2060 return 0;
2061
2062 error:
2063 if (cpudata->vmcs_pa) {
2064 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
2065 VMCS_NPAGES);
2066 }
2067 if (cpudata->msrbm_pa) {
2068 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
2069 MSRBM_NPAGES);
2070 }
2071 if (cpudata->gmsr_pa) {
2072 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
2073 }
2074
2075 kmem_free(cpudata, sizeof(*cpudata));
2076 return error;
2077 }
2078
2079 static void
2080 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2081 {
2082 struct vmx_cpudata *cpudata = vcpu->cpudata;
2083
2084 vmx_vmcs_enter(vcpu);
2085 vmx_asid_free(vcpu);
2086 vmx_vmcs_leave(vcpu);
2087
2088 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, VMCS_NPAGES);
2089 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
2090 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
2091 uvm_km_free(kernel_map, (vaddr_t)cpudata,
2092 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
2093 }
2094
2095 #define VMX_SEG_ATTRIB_TYPE __BITS(4,0)
2096 #define VMX_SEG_ATTRIB_DPL __BITS(6,5)
2097 #define VMX_SEG_ATTRIB_P __BIT(7)
2098 #define VMX_SEG_ATTRIB_AVL __BIT(12)
2099 #define VMX_SEG_ATTRIB_LONG __BIT(13)
2100 #define VMX_SEG_ATTRIB_DEF32 __BIT(14)
2101 #define VMX_SEG_ATTRIB_GRAN __BIT(15)
2102 #define VMX_SEG_ATTRIB_UNUSABLE __BIT(16)
2103
2104 static void
2105 vmx_vcpu_setstate_seg(struct nvmm_x64_state_seg *segs, int idx)
2106 {
2107 uint64_t attrib;
2108
2109 attrib =
2110 __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) |
2111 __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) |
2112 __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) |
2113 __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) |
2114 __SHIFTIN(segs[idx].attrib.lng, VMX_SEG_ATTRIB_LONG) |
2115 __SHIFTIN(segs[idx].attrib.def32, VMX_SEG_ATTRIB_DEF32) |
2116 __SHIFTIN(segs[idx].attrib.gran, VMX_SEG_ATTRIB_GRAN) |
2117 (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0);
2118
2119 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2120 vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector);
2121 vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib);
2122 }
2123 vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit);
2124 vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base);
2125 }
2126
2127 static void
2128 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx)
2129 {
2130 uint64_t attrib = 0;
2131
2132 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2133 vmx_vmread(vmx_guest_segs[idx].selector, &segs[idx].selector);
2134 vmx_vmread(vmx_guest_segs[idx].attrib, &attrib);
2135 }
2136 vmx_vmread(vmx_guest_segs[idx].limit, &segs[idx].limit);
2137 vmx_vmread(vmx_guest_segs[idx].base, &segs[idx].base);
2138
2139 segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE);
2140 segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL);
2141 segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P);
2142 segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL);
2143 segs[idx].attrib.lng = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_LONG);
2144 segs[idx].attrib.def32 = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF32);
2145 segs[idx].attrib.gran = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_GRAN);
2146 if (attrib & VMX_SEG_ATTRIB_UNUSABLE) {
2147 segs[idx].attrib.p = 0;
2148 }
2149 }
2150
2151 static inline bool
2152 vmx_state_tlb_flush(struct nvmm_x64_state *state, uint64_t flags)
2153 {
2154 uint64_t cr0, cr3, cr4, efer;
2155
2156 if (flags & NVMM_X64_STATE_CRS) {
2157 vmx_vmread(VMCS_GUEST_CR0, &cr0);
2158 if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
2159 return true;
2160 }
2161 vmx_vmread(VMCS_GUEST_CR3, &cr3);
2162 if (cr3 != state->crs[NVMM_X64_CR_CR3]) {
2163 return true;
2164 }
2165 vmx_vmread(VMCS_GUEST_CR4, &cr4);
2166 if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
2167 return true;
2168 }
2169 }
2170
2171 if (flags & NVMM_X64_STATE_MSRS) {
2172 vmx_vmread(VMCS_GUEST_IA32_EFER, &efer);
2173 if ((efer ^
2174 state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
2175 return true;
2176 }
2177 }
2178
2179 return false;
2180 }
2181
2182 static void
2183 vmx_vcpu_setstate(struct nvmm_cpu *vcpu, void *data, uint64_t flags)
2184 {
2185 struct nvmm_x64_state *state = (struct nvmm_x64_state *)data;
2186 struct vmx_cpudata *cpudata = vcpu->cpudata;
2187 struct fxsave *fpustate;
2188 uint64_t ctls1, intstate;
2189
2190 vmx_vmcs_enter(vcpu);
2191
2192 if (vmx_state_tlb_flush(state, flags)) {
2193 cpudata->tlb_want_flush = true;
2194 }
2195
2196 if (flags & NVMM_X64_STATE_SEGS) {
2197 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS);
2198 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS);
2199 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES);
2200 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS);
2201 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS);
2202 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS);
2203 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT);
2204 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT);
2205 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT);
2206 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR);
2207 }
2208
2209 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2210 if (flags & NVMM_X64_STATE_GPRS) {
2211 memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
2212
2213 vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]);
2214 vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]);
2215 vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]);
2216 }
2217
2218 if (flags & NVMM_X64_STATE_CRS) {
2219 /* These bits are mandatory. */
2220 state->crs[NVMM_X64_CR_CR4] |= CR4_VMXE;
2221 state->crs[NVMM_X64_CR_CR0] |= CR0_NE;
2222
2223 vmx_vmwrite(VMCS_GUEST_CR0, state->crs[NVMM_X64_CR_CR0]);
2224 cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2];
2225 vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]); // XXX PDPTE?
2226 vmx_vmwrite(VMCS_GUEST_CR4, state->crs[NVMM_X64_CR_CR4]);
2227 cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8];
2228
2229 if (vmx_xcr0_mask != 0) {
2230 /* Clear illegal XCR0 bits, set mandatory X87 bit. */
2231 cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
2232 cpudata->gxcr0 &= vmx_xcr0_mask;
2233 cpudata->gxcr0 |= XCR0_X87;
2234 }
2235 }
2236
2237 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2238 if (flags & NVMM_X64_STATE_DRS) {
2239 memcpy(cpudata->drs, state->drs, sizeof(state->drs));
2240
2241 cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF;
2242 vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]);
2243 }
2244
2245 if (flags & NVMM_X64_STATE_MSRS) {
2246 cpudata->gmsr[VMX_MSRLIST_STAR].val =
2247 state->msrs[NVMM_X64_MSR_STAR];
2248 cpudata->gmsr[VMX_MSRLIST_LSTAR].val =
2249 state->msrs[NVMM_X64_MSR_LSTAR];
2250 cpudata->gmsr[VMX_MSRLIST_CSTAR].val =
2251 state->msrs[NVMM_X64_MSR_CSTAR];
2252 cpudata->gmsr[VMX_MSRLIST_SFMASK].val =
2253 state->msrs[NVMM_X64_MSR_SFMASK];
2254 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val =
2255 state->msrs[NVMM_X64_MSR_KERNELGSBASE];
2256
2257 vmx_vmwrite(VMCS_GUEST_IA32_EFER,
2258 state->msrs[NVMM_X64_MSR_EFER]);
2259 vmx_vmwrite(VMCS_GUEST_IA32_PAT,
2260 state->msrs[NVMM_X64_MSR_PAT]);
2261 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS,
2262 state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
2263 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP,
2264 state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
2265 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP,
2266 state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
2267
2268 /* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */
2269 vmx_vmread(VMCS_ENTRY_CTLS, &ctls1);
2270 if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) {
2271 ctls1 |= ENTRY_CTLS_LONG_MODE;
2272 } else {
2273 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
2274 }
2275 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
2276 }
2277
2278 if (flags & NVMM_X64_STATE_MISC) {
2279 // XXX CPL? not sure
2280
2281 vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
2282 intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS);
2283 if (state->misc[NVMM_X64_MISC_INT_SHADOW]) {
2284 intstate |= INT_STATE_MOVSS;
2285 }
2286 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate);
2287
2288 if (state->misc[NVMM_X64_MISC_INT_WINDOW_EXIT]) {
2289 vmx_event_waitexit_enable(vcpu, false);
2290 } else {
2291 vmx_event_waitexit_disable(vcpu, false);
2292 }
2293
2294 if (state->misc[NVMM_X64_MISC_NMI_WINDOW_EXIT]) {
2295 vmx_event_waitexit_enable(vcpu, true);
2296 } else {
2297 vmx_event_waitexit_disable(vcpu, true);
2298 }
2299 }
2300
2301 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
2302 if (flags & NVMM_X64_STATE_FPU) {
2303 memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
2304 sizeof(state->fpu));
2305
2306 fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
2307 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
2308 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
2309
2310 if (vmx_xcr0_mask != 0) {
2311 /* Reset XSTATE_BV, to force a reload. */
2312 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
2313 }
2314 }
2315
2316 vmx_vmcs_leave(vcpu);
2317 }
2318
2319 static void
2320 vmx_vcpu_getstate(struct nvmm_cpu *vcpu, void *data, uint64_t flags)
2321 {
2322 struct nvmm_x64_state *state = (struct nvmm_x64_state *)data;
2323 struct vmx_cpudata *cpudata = vcpu->cpudata;
2324 uint64_t intstate;
2325
2326 vmx_vmcs_enter(vcpu);
2327
2328 if (flags & NVMM_X64_STATE_SEGS) {
2329 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS);
2330 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS);
2331 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES);
2332 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS);
2333 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS);
2334 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS);
2335 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT);
2336 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT);
2337 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT);
2338 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR);
2339 }
2340
2341 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2342 if (flags & NVMM_X64_STATE_GPRS) {
2343 memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
2344
2345 vmx_vmread(VMCS_GUEST_RIP, &state->gprs[NVMM_X64_GPR_RIP]);
2346 vmx_vmread(VMCS_GUEST_RSP, &state->gprs[NVMM_X64_GPR_RSP]);
2347 vmx_vmread(VMCS_GUEST_RFLAGS, &state->gprs[NVMM_X64_GPR_RFLAGS]);
2348 }
2349
2350 if (flags & NVMM_X64_STATE_CRS) {
2351 vmx_vmread(VMCS_GUEST_CR0, &state->crs[NVMM_X64_CR_CR0]);
2352 state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2;
2353 vmx_vmread(VMCS_GUEST_CR3, &state->crs[NVMM_X64_CR_CR3]);
2354 vmx_vmread(VMCS_GUEST_CR4, &state->crs[NVMM_X64_CR_CR4]);
2355 state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8;
2356 state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
2357
2358 /* Hide VMXE. */
2359 state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE;
2360 }
2361
2362 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2363 if (flags & NVMM_X64_STATE_DRS) {
2364 memcpy(state->drs, cpudata->drs, sizeof(state->drs));
2365
2366 vmx_vmread(VMCS_GUEST_DR7, &state->drs[NVMM_X64_DR_DR7]);
2367 }
2368
2369 if (flags & NVMM_X64_STATE_MSRS) {
2370 state->msrs[NVMM_X64_MSR_STAR] =
2371 cpudata->gmsr[VMX_MSRLIST_STAR].val;
2372 state->msrs[NVMM_X64_MSR_LSTAR] =
2373 cpudata->gmsr[VMX_MSRLIST_LSTAR].val;
2374 state->msrs[NVMM_X64_MSR_CSTAR] =
2375 cpudata->gmsr[VMX_MSRLIST_CSTAR].val;
2376 state->msrs[NVMM_X64_MSR_SFMASK] =
2377 cpudata->gmsr[VMX_MSRLIST_SFMASK].val;
2378 state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
2379 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val;
2380
2381 vmx_vmread(VMCS_GUEST_IA32_EFER,
2382 &state->msrs[NVMM_X64_MSR_EFER]);
2383 vmx_vmread(VMCS_GUEST_IA32_PAT,
2384 &state->msrs[NVMM_X64_MSR_PAT]);
2385 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS,
2386 &state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
2387 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP,
2388 &state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
2389 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP,
2390 &state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
2391 }
2392
2393 if (flags & NVMM_X64_STATE_MISC) {
2394 // XXX CPL? not sure
2395
2396 vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
2397 state->misc[NVMM_X64_MISC_INT_SHADOW] =
2398 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2399
2400 state->misc[NVMM_X64_MISC_INT_WINDOW_EXIT] =
2401 cpudata->int_window_exit;
2402 state->misc[NVMM_X64_MISC_NMI_WINDOW_EXIT] =
2403 cpudata->nmi_window_exit;
2404 }
2405
2406 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
2407 if (flags & NVMM_X64_STATE_FPU) {
2408 memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
2409 sizeof(state->fpu));
2410 }
2411
2412 vmx_vmcs_leave(vcpu);
2413 }
2414
2415 /* -------------------------------------------------------------------------- */
2416
2417 static void
2418 vmx_tlb_flush(struct pmap *pm)
2419 {
2420 struct nvmm_machine *mach = pm->pm_data;
2421 struct vmx_machdata *machdata = mach->machdata;
2422 struct nvmm_cpu *vcpu;
2423 int error;
2424 size_t i;
2425
2426 kcpuset_atomicly_merge(machdata->ept_want_flush, kcpuset_running);
2427
2428 /*
2429 * Not as dumb as it seems. We want to make sure that when we leave
2430 * this function, each VCPU got halted at some point, and possibly
2431 * resumed with the updated kcpuset.
2432 */
2433 for (i = 0; i < NVMM_MAX_VCPUS; i++) {
2434 error = nvmm_vcpu_get(mach, i, &vcpu);
2435 if (error)
2436 continue;
2437 nvmm_vcpu_put(vcpu);
2438 }
2439 }
2440
2441 static void
2442 vmx_machine_create(struct nvmm_machine *mach)
2443 {
2444 struct pmap *pmap = mach->vm->vm_map.pmap;
2445 struct vmx_machdata *machdata;
2446
2447 /* Convert to EPT. */
2448 pmap_ept_transform(pmap);
2449
2450 /* Fill in pmap info. */
2451 pmap->pm_data = (void *)mach;
2452 pmap->pm_tlb_flush = vmx_tlb_flush;
2453
2454 machdata = kmem_zalloc(sizeof(struct vmx_machdata), KM_SLEEP);
2455 kcpuset_create(&machdata->ept_want_flush, true);
2456 mach->machdata = machdata;
2457
2458 /* Start with an EPT flush everywhere. */
2459 kcpuset_copy(machdata->ept_want_flush, kcpuset_running);
2460 }
2461
2462 static void
2463 vmx_machine_destroy(struct nvmm_machine *mach)
2464 {
2465 struct vmx_machdata *machdata = mach->machdata;
2466
2467 kcpuset_destroy(machdata->ept_want_flush);
2468 kmem_free(machdata, sizeof(struct vmx_machdata));
2469 }
2470
2471 static int
2472 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
2473 {
2474 struct nvmm_x86_conf_cpuid *cpuid = data;
2475 struct vmx_machdata *machdata = (struct vmx_machdata *)mach->machdata;
2476 size_t i;
2477
2478 if (__predict_false(op != NVMM_X86_CONF_CPUID)) {
2479 return EINVAL;
2480 }
2481
2482 if (__predict_false((cpuid->set.eax & cpuid->del.eax) ||
2483 (cpuid->set.ebx & cpuid->del.ebx) ||
2484 (cpuid->set.ecx & cpuid->del.ecx) ||
2485 (cpuid->set.edx & cpuid->del.edx))) {
2486 return EINVAL;
2487 }
2488
2489 /* If already here, replace. */
2490 for (i = 0; i < VMX_NCPUIDS; i++) {
2491 if (!machdata->cpuidpresent[i]) {
2492 continue;
2493 }
2494 if (machdata->cpuid[i].leaf == cpuid->leaf) {
2495 memcpy(&machdata->cpuid[i], cpuid,
2496 sizeof(struct nvmm_x86_conf_cpuid));
2497 return 0;
2498 }
2499 }
2500
2501 /* Not here, insert. */
2502 for (i = 0; i < VMX_NCPUIDS; i++) {
2503 if (!machdata->cpuidpresent[i]) {
2504 machdata->cpuidpresent[i] = true;
2505 memcpy(&machdata->cpuid[i], cpuid,
2506 sizeof(struct nvmm_x86_conf_cpuid));
2507 return 0;
2508 }
2509 }
2510
2511 return ENOBUFS;
2512 }
2513
2514 /* -------------------------------------------------------------------------- */
2515
2516 static int
2517 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls,
2518 uint64_t set_one, uint64_t set_zero, uint64_t *res)
2519 {
2520 uint64_t basic, val, true_val;
2521 bool one_allowed, zero_allowed, has_true;
2522 size_t i;
2523
2524 basic = rdmsr(MSR_IA32_VMX_BASIC);
2525 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
2526
2527 val = rdmsr(msr_ctls);
2528 if (has_true) {
2529 true_val = rdmsr(msr_true_ctls);
2530 } else {
2531 true_val = val;
2532 }
2533
2534 #define ONE_ALLOWED(msrval, bitoff) \
2535 ((msrval & __BIT(32 + bitoff)) != 0)
2536 #define ZERO_ALLOWED(msrval, bitoff) \
2537 ((msrval & __BIT(bitoff)) == 0)
2538
2539 for (i = 0; i < 32; i++) {
2540 one_allowed = ONE_ALLOWED(true_val, i);
2541 zero_allowed = ZERO_ALLOWED(true_val, i);
2542
2543 if (zero_allowed && !one_allowed) {
2544 if (set_one & __BIT(i))
2545 return -1;
2546 *res &= ~__BIT(i);
2547 } else if (one_allowed && !zero_allowed) {
2548 if (set_zero & __BIT(i))
2549 return -1;
2550 *res |= __BIT(i);
2551 } else {
2552 if (set_zero & __BIT(i)) {
2553 *res &= ~__BIT(i);
2554 } else if (set_one & __BIT(i)) {
2555 *res |= __BIT(i);
2556 } else if (!has_true) {
2557 *res &= ~__BIT(i);
2558 } else if (ZERO_ALLOWED(val, i)) {
2559 *res &= ~__BIT(i);
2560 } else if (ONE_ALLOWED(val, i)) {
2561 *res |= __BIT(i);
2562 } else {
2563 return -1;
2564 }
2565 }
2566 }
2567
2568 return 0;
2569 }
2570
2571 static bool
2572 vmx_ident(void)
2573 {
2574 uint64_t msr;
2575 int ret;
2576
2577 if (!(cpu_feature[1] & CPUID2_VMX)) {
2578 return false;
2579 }
2580
2581 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
2582 if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) {
2583 return false;
2584 }
2585
2586 msr = rdmsr(MSR_IA32_VMX_BASIC);
2587 if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) {
2588 return false;
2589 }
2590 if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) {
2591 return false;
2592 }
2593
2594 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
2595 if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) {
2596 return false;
2597 }
2598 if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) {
2599 return false;
2600 }
2601 if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) {
2602 return false;
2603 }
2604 if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) == 0) {
2605 return false;
2606 }
2607 if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) {
2608 return false;
2609 }
2610
2611 /* PG and PE are reported, even if Unrestricted Guests is supported. */
2612 vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE);
2613 vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE);
2614 ret = vmx_check_cr(rcr0(), vmx_cr0_fixed0, vmx_cr0_fixed1);
2615 if (ret == -1) {
2616 return false;
2617 }
2618
2619 vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
2620 vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
2621 ret = vmx_check_cr(rcr4() | CR4_VMXE, vmx_cr4_fixed0, vmx_cr4_fixed1);
2622 if (ret == -1) {
2623 return false;
2624 }
2625
2626 /* Init the CTLSs right now, and check for errors. */
2627 ret = vmx_init_ctls(
2628 MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
2629 VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO,
2630 &vmx_pinbased_ctls);
2631 if (ret == -1) {
2632 return false;
2633 }
2634 ret = vmx_init_ctls(
2635 MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
2636 VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO,
2637 &vmx_procbased_ctls);
2638 if (ret == -1) {
2639 return false;
2640 }
2641 ret = vmx_init_ctls(
2642 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
2643 VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO,
2644 &vmx_procbased_ctls2);
2645 if (ret == -1) {
2646 return false;
2647 }
2648 ret = vmx_init_ctls(
2649 MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
2650 VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO,
2651 &vmx_entry_ctls);
2652 if (ret == -1) {
2653 return false;
2654 }
2655 ret = vmx_init_ctls(
2656 MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS,
2657 VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO,
2658 &vmx_exit_ctls);
2659 if (ret == -1) {
2660 return false;
2661 }
2662
2663 return true;
2664 }
2665
2666 static void
2667 vmx_change_cpu(void *arg1, void *arg2)
2668 {
2669 struct cpu_info *ci = curcpu();
2670 bool enable = (bool)arg1;
2671 uint64_t cr4;
2672
2673 if (!enable) {
2674 vmx_vmxoff();
2675 }
2676
2677 cr4 = rcr4();
2678 if (enable) {
2679 cr4 |= CR4_VMXE;
2680 } else {
2681 cr4 &= ~CR4_VMXE;
2682 }
2683 lcr4(cr4);
2684
2685 if (enable) {
2686 vmx_vmxon(&vmxoncpu[cpu_index(ci)].pa);
2687 }
2688 }
2689
2690 static void
2691 vmx_init_l1tf(void)
2692 {
2693 u_int descs[4];
2694 uint64_t msr;
2695
2696 if (cpuid_level < 7) {
2697 return;
2698 }
2699
2700 x86_cpuid(7, descs);
2701
2702 if (descs[3] & CPUID_SEF_ARCH_CAP) {
2703 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
2704 if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) {
2705 /* No mitigation needed. */
2706 return;
2707 }
2708 }
2709
2710 if (descs[3] & CPUID_SEF_L1D_FLUSH) {
2711 /* Enable hardware mitigation. */
2712 vmx_msrlist_entry_nmsr += 1;
2713 }
2714 }
2715
2716 static void
2717 vmx_init(void)
2718 {
2719 CPU_INFO_ITERATOR cii;
2720 struct cpu_info *ci;
2721 uint64_t xc, msr;
2722 struct vmxon *vmxon;
2723 uint32_t revision;
2724 paddr_t pa;
2725 vaddr_t va;
2726 int error;
2727
2728 /* Init the ASID bitmap (VPID). */
2729 vmx_init_asid(VPID_MAX);
2730
2731 /* Init the XCR0 mask. */
2732 vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
2733
2734 /* Init the TLB flush op, the EPT flush op and the EPTP type. */
2735 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
2736 if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
2737 vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
2738 } else {
2739 vmx_tlb_flush_op = VMX_INVVPID_ALL;
2740 }
2741 if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
2742 vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
2743 } else {
2744 vmx_ept_flush_op = VMX_INVEPT_ALL;
2745 }
2746 if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {
2747 vmx_eptp_type = EPTP_TYPE_WB;
2748 } else {
2749 vmx_eptp_type = EPTP_TYPE_UC;
2750 }
2751
2752 /* Init the L1TF mitigation. */
2753 vmx_init_l1tf();
2754
2755 memset(vmxoncpu, 0, sizeof(vmxoncpu));
2756 revision = vmx_get_revision();
2757
2758 for (CPU_INFO_FOREACH(cii, ci)) {
2759 error = vmx_memalloc(&pa, &va, 1);
2760 if (error) {
2761 panic("%s: out of memory", __func__);
2762 }
2763 vmxoncpu[cpu_index(ci)].pa = pa;
2764 vmxoncpu[cpu_index(ci)].va = va;
2765
2766 vmxon = (struct vmxon *)vmxoncpu[cpu_index(ci)].va;
2767 vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION);
2768 }
2769
2770 xc = xc_broadcast(0, vmx_change_cpu, (void *)true, NULL);
2771 xc_wait(xc);
2772 }
2773
2774 static void
2775 vmx_fini_asid(void)
2776 {
2777 size_t allocsz;
2778
2779 allocsz = roundup(vmx_maxasid, 8) / 8;
2780 kmem_free(vmx_asidmap, allocsz);
2781
2782 mutex_destroy(&vmx_asidlock);
2783 }
2784
2785 static void
2786 vmx_fini(void)
2787 {
2788 uint64_t xc;
2789 size_t i;
2790
2791 xc = xc_broadcast(0, vmx_change_cpu, (void *)false, NULL);
2792 xc_wait(xc);
2793
2794 for (i = 0; i < MAXCPUS; i++) {
2795 if (vmxoncpu[i].pa != 0)
2796 vmx_memfree(vmxoncpu[i].pa, vmxoncpu[i].va, 1);
2797 }
2798
2799 vmx_fini_asid();
2800 }
2801
2802 static void
2803 vmx_capability(struct nvmm_capability *cap)
2804 {
2805 cap->u.x86.xcr0_mask = vmx_xcr0_mask;
2806 cap->u.x86.mxcsr_mask = x86_fpu_mxcsr_mask;
2807 cap->u.x86.conf_cpuid_maxops = VMX_NCPUIDS;
2808 }
2809
2810 const struct nvmm_impl nvmm_x86_vmx = {
2811 .ident = vmx_ident,
2812 .init = vmx_init,
2813 .fini = vmx_fini,
2814 .capability = vmx_capability,
2815 .conf_max = NVMM_X86_NCONF,
2816 .conf_sizes = vmx_conf_sizes,
2817 .state_size = sizeof(struct nvmm_x64_state),
2818 .machine_create = vmx_machine_create,
2819 .machine_destroy = vmx_machine_destroy,
2820 .machine_configure = vmx_machine_configure,
2821 .vcpu_create = vmx_vcpu_create,
2822 .vcpu_destroy = vmx_vcpu_destroy,
2823 .vcpu_setstate = vmx_vcpu_setstate,
2824 .vcpu_getstate = vmx_vcpu_getstate,
2825 .vcpu_inject = vmx_vcpu_inject,
2826 .vcpu_run = vmx_vcpu_run
2827 };
2828