1 /* $NetBSD: nvmm_x86_vmx.c,v 1.91 2025/08/15 11:36:44 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net 5 * All rights reserved. 6 * 7 * This code is part of the NVMM hypervisor. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.91 2025/08/15 11:36:44 skrll Exp $"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/kmem.h> 38 #include <sys/cpu.h> 39 #include <sys/xcall.h> 40 #include <sys/mman.h> 41 #include <sys/bitops.h> 42 43 #include <uvm/uvm_extern.h> 44 #include <uvm/uvm_page.h> 45 46 #include <x86/apicvar.h> 47 #include <x86/cputypes.h> 48 #include <x86/specialreg.h> 49 #include <x86/dbregs.h> 50 #include <x86/cpu_counter.h> 51 52 #include <machine/cpuvar.h> 53 #include <machine/pmap_private.h> 54 55 #include <dev/nvmm/nvmm.h> 56 #include <dev/nvmm/nvmm_internal.h> 57 #include <dev/nvmm/x86/nvmm_x86.h> 58 59 int _vmx_vmxon(paddr_t *pa); 60 int _vmx_vmxoff(void); 61 int vmx_vmlaunch(uint64_t *gprs); 62 int vmx_vmresume(uint64_t *gprs); 63 64 #define vmx_vmxon(a) \ 65 if (__predict_false(_vmx_vmxon(a) != 0)) { \ 66 panic("%s: VMXON failed", __func__); \ 67 } 68 #define vmx_vmxoff() \ 69 if (__predict_false(_vmx_vmxoff() != 0)) { \ 70 panic("%s: VMXOFF failed", __func__); \ 71 } 72 73 struct ept_desc { 74 uint64_t eptp; 75 uint64_t mbz; 76 } __packed; 77 78 struct vpid_desc { 79 uint64_t vpid; 80 uint64_t addr; 81 } __packed; 82 83 static inline void 84 vmx_invept(uint64_t op, struct ept_desc *desc) 85 { 86 asm volatile ( 87 "invept %[desc],%[op];" 88 "jz vmx_insn_failvalid;" 89 "jc vmx_insn_failinvalid;" 90 : 91 : [desc] "m" (*desc), [op] "r" (op) 92 : "memory", "cc" 93 ); 94 } 95 96 static inline void 97 vmx_invvpid(uint64_t op, struct vpid_desc *desc) 98 { 99 asm volatile ( 100 "invvpid %[desc],%[op];" 101 "jz vmx_insn_failvalid;" 102 "jc vmx_insn_failinvalid;" 103 : 104 : [desc] "m" (*desc), [op] "r" (op) 105 : "memory", "cc" 106 ); 107 } 108 109 static inline uint64_t 110 vmx_vmread(uint64_t field) 111 { 112 uint64_t value; 113 114 asm volatile ( 115 "vmread %[field],%[value];" 116 "jz vmx_insn_failvalid;" 117 "jc vmx_insn_failinvalid;" 118 : [value] "=r" (value) 119 : [field] "r" (field) 120 : "cc" 121 ); 122 123 return value; 124 } 125 126 static inline void 127 vmx_vmwrite(uint64_t field, uint64_t value) 128 { 129 asm volatile ( 130 "vmwrite %[value],%[field];" 131 "jz vmx_insn_failvalid;" 132 "jc vmx_insn_failinvalid;" 133 : 134 : [field] "r" (field), [value] "r" (value) 135 : "cc" 136 ); 137 } 138 139 static inline paddr_t __diagused 140 vmx_vmptrst(void) 141 { 142 paddr_t pa; 143 144 asm volatile ( 145 "vmptrst %[pa];" 146 : 147 : [pa] "m" (*(paddr_t *)&pa) 148 : "memory" 149 ); 150 151 return pa; 152 } 153 154 static inline void 155 vmx_vmptrld(paddr_t *pa) 156 { 157 asm volatile ( 158 "vmptrld %[pa];" 159 "jz vmx_insn_failvalid;" 160 "jc vmx_insn_failinvalid;" 161 : 162 : [pa] "m" (*pa) 163 : "memory", "cc" 164 ); 165 } 166 167 static inline void 168 vmx_vmclear(paddr_t *pa) 169 { 170 asm volatile ( 171 "vmclear %[pa];" 172 "jz vmx_insn_failvalid;" 173 "jc vmx_insn_failinvalid;" 174 : 175 : [pa] "m" (*pa) 176 : "memory", "cc" 177 ); 178 } 179 180 static inline void 181 vmx_cli(void) 182 { 183 asm volatile ("cli" ::: "memory"); 184 } 185 186 static inline void 187 vmx_sti(void) 188 { 189 asm volatile ("sti" ::: "memory"); 190 } 191 192 #define MSR_IA32_FEATURE_CONTROL 0x003A 193 #define IA32_FEATURE_CONTROL_LOCK __BIT(0) 194 #define IA32_FEATURE_CONTROL_IN_SMX __BIT(1) 195 #define IA32_FEATURE_CONTROL_OUT_SMX __BIT(2) 196 197 #define MSR_IA32_VMX_BASIC 0x0480 198 #define IA32_VMX_BASIC_IDENT __BITS(30,0) 199 #define IA32_VMX_BASIC_DATA_SIZE __BITS(44,32) 200 #define IA32_VMX_BASIC_MEM_WIDTH __BIT(48) 201 #define IA32_VMX_BASIC_DUAL __BIT(49) 202 #define IA32_VMX_BASIC_MEM_TYPE __BITS(53,50) 203 #define MEM_TYPE_UC 0 204 #define MEM_TYPE_WB 6 205 #define IA32_VMX_BASIC_IO_REPORT __BIT(54) 206 #define IA32_VMX_BASIC_TRUE_CTLS __BIT(55) 207 208 #define MSR_IA32_VMX_PINBASED_CTLS 0x0481 209 #define MSR_IA32_VMX_PROCBASED_CTLS 0x0482 210 #define MSR_IA32_VMX_EXIT_CTLS 0x0483 211 #define MSR_IA32_VMX_ENTRY_CTLS 0x0484 212 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x048B 213 214 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x048D 215 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x048E 216 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x048F 217 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x0490 218 219 #define MSR_IA32_VMX_CR0_FIXED0 0x0486 220 #define MSR_IA32_VMX_CR0_FIXED1 0x0487 221 #define MSR_IA32_VMX_CR4_FIXED0 0x0488 222 #define MSR_IA32_VMX_CR4_FIXED1 0x0489 223 224 #define MSR_IA32_VMX_EPT_VPID_CAP 0x048C 225 #define IA32_VMX_EPT_VPID_XO __BIT(0) 226 #define IA32_VMX_EPT_VPID_WALKLENGTH_4 __BIT(6) 227 #define IA32_VMX_EPT_VPID_UC __BIT(8) 228 #define IA32_VMX_EPT_VPID_WB __BIT(14) 229 #define IA32_VMX_EPT_VPID_2MB __BIT(16) 230 #define IA32_VMX_EPT_VPID_1GB __BIT(17) 231 #define IA32_VMX_EPT_VPID_INVEPT __BIT(20) 232 #define IA32_VMX_EPT_VPID_FLAGS_AD __BIT(21) 233 #define IA32_VMX_EPT_VPID_ADVANCED_VMEXIT_INFO __BIT(22) 234 #define IA32_VMX_EPT_VPID_SHSTK __BIT(23) 235 #define IA32_VMX_EPT_VPID_INVEPT_CONTEXT __BIT(25) 236 #define IA32_VMX_EPT_VPID_INVEPT_ALL __BIT(26) 237 #define IA32_VMX_EPT_VPID_INVVPID __BIT(32) 238 #define IA32_VMX_EPT_VPID_INVVPID_ADDR __BIT(40) 239 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT __BIT(41) 240 #define IA32_VMX_EPT_VPID_INVVPID_ALL __BIT(42) 241 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG __BIT(43) 242 243 /* -------------------------------------------------------------------------- */ 244 245 /* 16-bit control fields */ 246 #define VMCS_VPID 0x00000000 247 #define VMCS_PIR_VECTOR 0x00000002 248 #define VMCS_EPTP_INDEX 0x00000004 249 /* 16-bit guest-state fields */ 250 #define VMCS_GUEST_ES_SELECTOR 0x00000800 251 #define VMCS_GUEST_CS_SELECTOR 0x00000802 252 #define VMCS_GUEST_SS_SELECTOR 0x00000804 253 #define VMCS_GUEST_DS_SELECTOR 0x00000806 254 #define VMCS_GUEST_FS_SELECTOR 0x00000808 255 #define VMCS_GUEST_GS_SELECTOR 0x0000080A 256 #define VMCS_GUEST_LDTR_SELECTOR 0x0000080C 257 #define VMCS_GUEST_TR_SELECTOR 0x0000080E 258 #define VMCS_GUEST_INTR_STATUS 0x00000810 259 #define VMCS_PML_INDEX 0x00000812 260 /* 16-bit host-state fields */ 261 #define VMCS_HOST_ES_SELECTOR 0x00000C00 262 #define VMCS_HOST_CS_SELECTOR 0x00000C02 263 #define VMCS_HOST_SS_SELECTOR 0x00000C04 264 #define VMCS_HOST_DS_SELECTOR 0x00000C06 265 #define VMCS_HOST_FS_SELECTOR 0x00000C08 266 #define VMCS_HOST_GS_SELECTOR 0x00000C0A 267 #define VMCS_HOST_TR_SELECTOR 0x00000C0C 268 /* 64-bit control fields */ 269 #define VMCS_IO_BITMAP_A 0x00002000 270 #define VMCS_IO_BITMAP_B 0x00002002 271 #define VMCS_MSR_BITMAP 0x00002004 272 #define VMCS_EXIT_MSR_STORE_ADDRESS 0x00002006 273 #define VMCS_EXIT_MSR_LOAD_ADDRESS 0x00002008 274 #define VMCS_ENTRY_MSR_LOAD_ADDRESS 0x0000200A 275 #define VMCS_EXECUTIVE_VMCS 0x0000200C 276 #define VMCS_PML_ADDRESS 0x0000200E 277 #define VMCS_TSC_OFFSET 0x00002010 278 #define VMCS_VIRTUAL_APIC 0x00002012 279 #define VMCS_APIC_ACCESS 0x00002014 280 #define VMCS_PIR_DESC 0x00002016 281 #define VMCS_VM_CONTROL 0x00002018 282 #define VMCS_EPTP 0x0000201A 283 #define EPTP_TYPE __BITS(2,0) 284 #define EPTP_TYPE_UC 0 285 #define EPTP_TYPE_WB 6 286 #define EPTP_WALKLEN __BITS(5,3) 287 #define EPTP_FLAGS_AD __BIT(6) 288 #define EPTP_SSS __BIT(7) 289 #define EPTP_PHYSADDR __BITS(63,12) 290 #define VMCS_EOI_EXIT0 0x0000201C 291 #define VMCS_EOI_EXIT1 0x0000201E 292 #define VMCS_EOI_EXIT2 0x00002020 293 #define VMCS_EOI_EXIT3 0x00002022 294 #define VMCS_EPTP_LIST 0x00002024 295 #define VMCS_VMREAD_BITMAP 0x00002026 296 #define VMCS_VMWRITE_BITMAP 0x00002028 297 #define VMCS_VIRTUAL_EXCEPTION 0x0000202A 298 #define VMCS_XSS_EXIT_BITMAP 0x0000202C 299 #define VMCS_ENCLS_EXIT_BITMAP 0x0000202E 300 #define VMCS_SUBPAGE_PERM_TABLE_PTR 0x00002030 301 #define VMCS_TSC_MULTIPLIER 0x00002032 302 #define VMCS_ENCLV_EXIT_BITMAP 0x00002036 303 /* 64-bit read-only fields */ 304 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400 305 /* 64-bit guest-state fields */ 306 #define VMCS_LINK_POINTER 0x00002800 307 #define VMCS_GUEST_IA32_DEBUGCTL 0x00002802 308 #define VMCS_GUEST_IA32_PAT 0x00002804 309 #define VMCS_GUEST_IA32_EFER 0x00002806 310 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808 311 #define VMCS_GUEST_PDPTE0 0x0000280A 312 #define VMCS_GUEST_PDPTE1 0x0000280C 313 #define VMCS_GUEST_PDPTE2 0x0000280E 314 #define VMCS_GUEST_PDPTE3 0x00002810 315 #define VMCS_GUEST_BNDCFGS 0x00002812 316 #define VMCS_GUEST_RTIT_CTL 0x00002814 317 #define VMCS_GUEST_PKRS 0x00002818 318 /* 64-bit host-state fields */ 319 #define VMCS_HOST_IA32_PAT 0x00002C00 320 #define VMCS_HOST_IA32_EFER 0x00002C02 321 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04 322 #define VMCS_HOST_IA32_PKRS 0x00002C06 323 /* 32-bit control fields */ 324 #define VMCS_PINBASED_CTLS 0x00004000 325 #define PIN_CTLS_INT_EXITING __BIT(0) 326 #define PIN_CTLS_NMI_EXITING __BIT(3) 327 #define PIN_CTLS_VIRTUAL_NMIS __BIT(5) 328 #define PIN_CTLS_ACTIVATE_PREEMPT_TIMER __BIT(6) 329 #define PIN_CTLS_PROCESS_POSTED_INTS __BIT(7) 330 #define VMCS_PROCBASED_CTLS 0x00004002 331 #define PROC_CTLS_INT_WINDOW_EXITING __BIT(2) 332 #define PROC_CTLS_USE_TSC_OFFSETTING __BIT(3) 333 #define PROC_CTLS_HLT_EXITING __BIT(7) 334 #define PROC_CTLS_INVLPG_EXITING __BIT(9) 335 #define PROC_CTLS_MWAIT_EXITING __BIT(10) 336 #define PROC_CTLS_RDPMC_EXITING __BIT(11) 337 #define PROC_CTLS_RDTSC_EXITING __BIT(12) 338 #define PROC_CTLS_RCR3_EXITING __BIT(15) 339 #define PROC_CTLS_LCR3_EXITING __BIT(16) 340 #define PROC_CTLS_RCR8_EXITING __BIT(19) 341 #define PROC_CTLS_LCR8_EXITING __BIT(20) 342 #define PROC_CTLS_USE_TPR_SHADOW __BIT(21) 343 #define PROC_CTLS_NMI_WINDOW_EXITING __BIT(22) 344 #define PROC_CTLS_DR_EXITING __BIT(23) 345 #define PROC_CTLS_UNCOND_IO_EXITING __BIT(24) 346 #define PROC_CTLS_USE_IO_BITMAPS __BIT(25) 347 #define PROC_CTLS_MONITOR_TRAP_FLAG __BIT(27) 348 #define PROC_CTLS_USE_MSR_BITMAPS __BIT(28) 349 #define PROC_CTLS_MONITOR_EXITING __BIT(29) 350 #define PROC_CTLS_PAUSE_EXITING __BIT(30) 351 #define PROC_CTLS_ACTIVATE_CTLS2 __BIT(31) 352 #define VMCS_EXCEPTION_BITMAP 0x00004004 353 #define VMCS_PF_ERROR_MASK 0x00004006 354 #define VMCS_PF_ERROR_MATCH 0x00004008 355 #define VMCS_CR3_TARGET_COUNT 0x0000400A 356 #define VMCS_EXIT_CTLS 0x0000400C 357 #define EXIT_CTLS_SAVE_DEBUG_CONTROLS __BIT(2) 358 #define EXIT_CTLS_HOST_LONG_MODE __BIT(9) 359 #define EXIT_CTLS_LOAD_PERFGLOBALCTRL __BIT(12) 360 #define EXIT_CTLS_ACK_INTERRUPT __BIT(15) 361 #define EXIT_CTLS_SAVE_PAT __BIT(18) 362 #define EXIT_CTLS_LOAD_PAT __BIT(19) 363 #define EXIT_CTLS_SAVE_EFER __BIT(20) 364 #define EXIT_CTLS_LOAD_EFER __BIT(21) 365 #define EXIT_CTLS_SAVE_PREEMPT_TIMER __BIT(22) 366 #define EXIT_CTLS_CLEAR_BNDCFGS __BIT(23) 367 #define EXIT_CTLS_CONCEAL_PT __BIT(24) 368 #define EXIT_CTLS_CLEAR_RTIT_CTL __BIT(25) 369 #define EXIT_CTLS_LOAD_CET __BIT(28) 370 #define EXIT_CTLS_LOAD_PKRS __BIT(29) 371 #define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E 372 #define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010 373 #define VMCS_ENTRY_CTLS 0x00004012 374 #define ENTRY_CTLS_LOAD_DEBUG_CONTROLS __BIT(2) 375 #define ENTRY_CTLS_LONG_MODE __BIT(9) 376 #define ENTRY_CTLS_SMM __BIT(10) 377 #define ENTRY_CTLS_DISABLE_DUAL __BIT(11) 378 #define ENTRY_CTLS_LOAD_PERFGLOBALCTRL __BIT(13) 379 #define ENTRY_CTLS_LOAD_PAT __BIT(14) 380 #define ENTRY_CTLS_LOAD_EFER __BIT(15) 381 #define ENTRY_CTLS_LOAD_BNDCFGS __BIT(16) 382 #define ENTRY_CTLS_CONCEAL_PT __BIT(17) 383 #define ENTRY_CTLS_LOAD_RTIT_CTL __BIT(18) 384 #define ENTRY_CTLS_LOAD_CET __BIT(20) 385 #define ENTRY_CTLS_LOAD_PKRS __BIT(22) 386 #define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014 387 #define VMCS_ENTRY_INTR_INFO 0x00004016 388 #define INTR_INFO_VECTOR __BITS(7,0) 389 #define INTR_INFO_TYPE __BITS(10,8) 390 #define INTR_TYPE_EXT_INT 0 391 #define INTR_TYPE_NMI 2 392 #define INTR_TYPE_HW_EXC 3 393 #define INTR_TYPE_SW_INT 4 394 #define INTR_TYPE_PRIV_SW_EXC 5 395 #define INTR_TYPE_SW_EXC 6 396 #define INTR_TYPE_OTHER 7 397 #define INTR_INFO_ERROR __BIT(11) 398 #define INTR_INFO_VALID __BIT(31) 399 #define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018 400 #define VMCS_ENTRY_INSTRUCTION_LENGTH 0x0000401A 401 #define VMCS_TPR_THRESHOLD 0x0000401C 402 #define VMCS_PROCBASED_CTLS2 0x0000401E 403 #define PROC_CTLS2_VIRT_APIC_ACCESSES __BIT(0) 404 #define PROC_CTLS2_ENABLE_EPT __BIT(1) 405 #define PROC_CTLS2_DESC_TABLE_EXITING __BIT(2) 406 #define PROC_CTLS2_ENABLE_RDTSCP __BIT(3) 407 #define PROC_CTLS2_VIRT_X2APIC __BIT(4) 408 #define PROC_CTLS2_ENABLE_VPID __BIT(5) 409 #define PROC_CTLS2_WBINVD_EXITING __BIT(6) 410 #define PROC_CTLS2_UNRESTRICTED_GUEST __BIT(7) 411 #define PROC_CTLS2_APIC_REG_VIRT __BIT(8) 412 #define PROC_CTLS2_VIRT_INT_DELIVERY __BIT(9) 413 #define PROC_CTLS2_PAUSE_LOOP_EXITING __BIT(10) 414 #define PROC_CTLS2_RDRAND_EXITING __BIT(11) 415 #define PROC_CTLS2_INVPCID_ENABLE __BIT(12) 416 #define PROC_CTLS2_VMFUNC_ENABLE __BIT(13) 417 #define PROC_CTLS2_VMCS_SHADOWING __BIT(14) 418 #define PROC_CTLS2_ENCLS_EXITING __BIT(15) 419 #define PROC_CTLS2_RDSEED_EXITING __BIT(16) 420 #define PROC_CTLS2_PML_ENABLE __BIT(17) 421 #define PROC_CTLS2_EPT_VIOLATION __BIT(18) 422 #define PROC_CTLS2_CONCEAL_VMX_FROM_PT __BIT(19) 423 #define PROC_CTLS2_XSAVES_ENABLE __BIT(20) 424 #define PROC_CTLS2_MODE_BASED_EXEC_EPT __BIT(22) 425 #define PROC_CTLS2_SUBPAGE_PERMISSIONS __BIT(23) 426 #define PROC_CTLS2_PT_USES_GPA __BIT(24) 427 #define PROC_CTLS2_USE_TSC_SCALING __BIT(25) 428 #define PROC_CTLS2_WAIT_PAUSE_ENABLE __BIT(26) 429 #define PROC_CTLS2_ENCLV_EXITING __BIT(28) 430 #define VMCS_PLE_GAP 0x00004020 431 #define VMCS_PLE_WINDOW 0x00004022 432 /* 32-bit read-only data fields */ 433 #define VMCS_INSTRUCTION_ERROR 0x00004400 434 #define VMCS_EXIT_REASON 0x00004402 435 #define VMCS_EXIT_INTR_INFO 0x00004404 436 #define VMCS_EXIT_INTR_ERRCODE 0x00004406 437 #define VMCS_IDT_VECTORING_INFO 0x00004408 438 #define VMCS_IDT_VECTORING_ERROR 0x0000440A 439 #define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C 440 #define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E 441 /* 32-bit guest-state fields */ 442 #define VMCS_GUEST_ES_LIMIT 0x00004800 443 #define VMCS_GUEST_CS_LIMIT 0x00004802 444 #define VMCS_GUEST_SS_LIMIT 0x00004804 445 #define VMCS_GUEST_DS_LIMIT 0x00004806 446 #define VMCS_GUEST_FS_LIMIT 0x00004808 447 #define VMCS_GUEST_GS_LIMIT 0x0000480A 448 #define VMCS_GUEST_LDTR_LIMIT 0x0000480C 449 #define VMCS_GUEST_TR_LIMIT 0x0000480E 450 #define VMCS_GUEST_GDTR_LIMIT 0x00004810 451 #define VMCS_GUEST_IDTR_LIMIT 0x00004812 452 #define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814 453 #define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816 454 #define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818 455 #define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A 456 #define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C 457 #define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E 458 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820 459 #define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822 460 #define VMCS_GUEST_INTERRUPTIBILITY 0x00004824 461 #define INT_STATE_STI __BIT(0) 462 #define INT_STATE_MOVSS __BIT(1) 463 #define INT_STATE_SMI __BIT(2) 464 #define INT_STATE_NMI __BIT(3) 465 #define INT_STATE_ENCLAVE __BIT(4) 466 #define VMCS_GUEST_ACTIVITY 0x00004826 467 #define VMCS_GUEST_SMBASE 0x00004828 468 #define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A 469 #define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E 470 /* 32-bit host state fields */ 471 #define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00 472 /* Natural-Width control fields */ 473 #define VMCS_CR0_MASK 0x00006000 474 #define VMCS_CR4_MASK 0x00006002 475 #define VMCS_CR0_SHADOW 0x00006004 476 #define VMCS_CR4_SHADOW 0x00006006 477 #define VMCS_CR3_TARGET0 0x00006008 478 #define VMCS_CR3_TARGET1 0x0000600A 479 #define VMCS_CR3_TARGET2 0x0000600C 480 #define VMCS_CR3_TARGET3 0x0000600E 481 /* Natural-Width read-only fields */ 482 #define VMCS_EXIT_QUALIFICATION 0x00006400 483 #define VMCS_IO_RCX 0x00006402 484 #define VMCS_IO_RSI 0x00006404 485 #define VMCS_IO_RDI 0x00006406 486 #define VMCS_IO_RIP 0x00006408 487 #define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A 488 /* Natural-Width guest-state fields */ 489 #define VMCS_GUEST_CR0 0x00006800 490 #define VMCS_GUEST_CR3 0x00006802 491 #define VMCS_GUEST_CR4 0x00006804 492 #define VMCS_GUEST_ES_BASE 0x00006806 493 #define VMCS_GUEST_CS_BASE 0x00006808 494 #define VMCS_GUEST_SS_BASE 0x0000680A 495 #define VMCS_GUEST_DS_BASE 0x0000680C 496 #define VMCS_GUEST_FS_BASE 0x0000680E 497 #define VMCS_GUEST_GS_BASE 0x00006810 498 #define VMCS_GUEST_LDTR_BASE 0x00006812 499 #define VMCS_GUEST_TR_BASE 0x00006814 500 #define VMCS_GUEST_GDTR_BASE 0x00006816 501 #define VMCS_GUEST_IDTR_BASE 0x00006818 502 #define VMCS_GUEST_DR7 0x0000681A 503 #define VMCS_GUEST_RSP 0x0000681C 504 #define VMCS_GUEST_RIP 0x0000681E 505 #define VMCS_GUEST_RFLAGS 0x00006820 506 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822 507 #define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824 508 #define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826 509 #define VMCS_GUEST_IA32_S_CET 0x00006828 510 #define VMCS_GUEST_SSP 0x0000682A 511 #define VMCS_GUEST_IA32_INTR_SSP_TABLE 0x0000682C 512 /* Natural-Width host-state fields */ 513 #define VMCS_HOST_CR0 0x00006C00 514 #define VMCS_HOST_CR3 0x00006C02 515 #define VMCS_HOST_CR4 0x00006C04 516 #define VMCS_HOST_FS_BASE 0x00006C06 517 #define VMCS_HOST_GS_BASE 0x00006C08 518 #define VMCS_HOST_TR_BASE 0x00006C0A 519 #define VMCS_HOST_GDTR_BASE 0x00006C0C 520 #define VMCS_HOST_IDTR_BASE 0x00006C0E 521 #define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10 522 #define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12 523 #define VMCS_HOST_RSP 0x00006C14 524 #define VMCS_HOST_RIP 0x00006C16 525 #define VMCS_HOST_IA32_S_CET 0x00006C18 526 #define VMCS_HOST_SSP 0x00006C1A 527 #define VMCS_HOST_IA32_INTR_SSP_TABLE 0x00006C1C 528 529 /* VMX basic exit reasons. */ 530 #define VMCS_EXITCODE_EXC_NMI 0 531 #define VMCS_EXITCODE_EXT_INT 1 532 #define VMCS_EXITCODE_SHUTDOWN 2 533 #define VMCS_EXITCODE_INIT 3 534 #define VMCS_EXITCODE_SIPI 4 535 #define VMCS_EXITCODE_SMI 5 536 #define VMCS_EXITCODE_OTHER_SMI 6 537 #define VMCS_EXITCODE_INT_WINDOW 7 538 #define VMCS_EXITCODE_NMI_WINDOW 8 539 #define VMCS_EXITCODE_TASK_SWITCH 9 540 #define VMCS_EXITCODE_CPUID 10 541 #define VMCS_EXITCODE_GETSEC 11 542 #define VMCS_EXITCODE_HLT 12 543 #define VMCS_EXITCODE_INVD 13 544 #define VMCS_EXITCODE_INVLPG 14 545 #define VMCS_EXITCODE_RDPMC 15 546 #define VMCS_EXITCODE_RDTSC 16 547 #define VMCS_EXITCODE_RSM 17 548 #define VMCS_EXITCODE_VMCALL 18 549 #define VMCS_EXITCODE_VMCLEAR 19 550 #define VMCS_EXITCODE_VMLAUNCH 20 551 #define VMCS_EXITCODE_VMPTRLD 21 552 #define VMCS_EXITCODE_VMPTRST 22 553 #define VMCS_EXITCODE_VMREAD 23 554 #define VMCS_EXITCODE_VMRESUME 24 555 #define VMCS_EXITCODE_VMWRITE 25 556 #define VMCS_EXITCODE_VMXOFF 26 557 #define VMCS_EXITCODE_VMXON 27 558 #define VMCS_EXITCODE_CR 28 559 #define VMCS_EXITCODE_DR 29 560 #define VMCS_EXITCODE_IO 30 561 #define VMCS_EXITCODE_RDMSR 31 562 #define VMCS_EXITCODE_WRMSR 32 563 #define VMCS_EXITCODE_FAIL_GUEST_INVALID 33 564 #define VMCS_EXITCODE_FAIL_MSR_INVALID 34 565 #define VMCS_EXITCODE_MWAIT 36 566 #define VMCS_EXITCODE_TRAP_FLAG 37 567 #define VMCS_EXITCODE_MONITOR 39 568 #define VMCS_EXITCODE_PAUSE 40 569 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK 41 570 #define VMCS_EXITCODE_TPR_BELOW 43 571 #define VMCS_EXITCODE_APIC_ACCESS 44 572 #define VMCS_EXITCODE_VEOI 45 573 #define VMCS_EXITCODE_GDTR_IDTR 46 574 #define VMCS_EXITCODE_LDTR_TR 47 575 #define VMCS_EXITCODE_EPT_VIOLATION 48 576 #define VMCS_EXITCODE_EPT_MISCONFIG 49 577 #define VMCS_EXITCODE_INVEPT 50 578 #define VMCS_EXITCODE_RDTSCP 51 579 #define VMCS_EXITCODE_PREEMPT_TIMEOUT 52 580 #define VMCS_EXITCODE_INVVPID 53 581 #define VMCS_EXITCODE_WBINVD 54 582 #define VMCS_EXITCODE_XSETBV 55 583 #define VMCS_EXITCODE_APIC_WRITE 56 584 #define VMCS_EXITCODE_RDRAND 57 585 #define VMCS_EXITCODE_INVPCID 58 586 #define VMCS_EXITCODE_VMFUNC 59 587 #define VMCS_EXITCODE_ENCLS 60 588 #define VMCS_EXITCODE_RDSEED 61 589 #define VMCS_EXITCODE_PAGE_LOG_FULL 62 590 #define VMCS_EXITCODE_XSAVES 63 591 #define VMCS_EXITCODE_XRSTORS 64 592 #define VMCS_EXITCODE_SPP 66 593 #define VMCS_EXITCODE_UMWAIT 67 594 #define VMCS_EXITCODE_TPAUSE 68 595 596 /* -------------------------------------------------------------------------- */ 597 598 static void vmx_vcpu_state_provide(struct nvmm_cpu *, uint64_t); 599 static void vmx_vcpu_state_commit(struct nvmm_cpu *); 600 601 #define VMX_MSRLIST_STAR 0 602 #define VMX_MSRLIST_LSTAR 1 603 #define VMX_MSRLIST_CSTAR 2 604 #define VMX_MSRLIST_SFMASK 3 605 #define VMX_MSRLIST_KERNELGSBASE 4 606 #define VMX_MSRLIST_EXIT_NMSR 5 607 #define VMX_MSRLIST_L1DFLUSH 5 608 609 /* On entry, we may do +1 to include L1DFLUSH. */ 610 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR; 611 612 struct vmxon { 613 uint32_t ident; 614 #define VMXON_IDENT_REVISION __BITS(30,0) 615 616 uint8_t data[PAGE_SIZE - 4]; 617 } __packed; 618 619 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE); 620 621 struct vmxoncpu { 622 vaddr_t va; 623 paddr_t pa; 624 }; 625 626 static struct vmxoncpu vmxoncpu[MAXCPUS]; 627 628 struct vmcs { 629 uint32_t ident; 630 #define VMCS_IDENT_REVISION __BITS(30,0) 631 #define VMCS_IDENT_SHADOW __BIT(31) 632 633 uint32_t abort; 634 uint8_t data[PAGE_SIZE - 8]; 635 } __packed; 636 637 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE); 638 639 struct msr_entry { 640 uint32_t msr; 641 uint32_t rsvd; 642 uint64_t val; 643 } __packed; 644 645 #define VPID_MAX 0xFFFF 646 647 /* Make sure we never run out of VPIDs. */ 648 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS); 649 650 static uint64_t vmx_tlb_flush_op __read_mostly; 651 static uint64_t vmx_ept_flush_op __read_mostly; 652 static uint64_t vmx_eptp_type __read_mostly; 653 654 static uint64_t vmx_pinbased_ctls __read_mostly; 655 static uint64_t vmx_procbased_ctls __read_mostly; 656 static uint64_t vmx_procbased_ctls2 __read_mostly; 657 static uint64_t vmx_entry_ctls __read_mostly; 658 static uint64_t vmx_exit_ctls __read_mostly; 659 660 static uint64_t vmx_cr0_fixed0 __read_mostly; 661 static uint64_t vmx_cr0_fixed1 __read_mostly; 662 static uint64_t vmx_cr4_fixed0 __read_mostly; 663 static uint64_t vmx_cr4_fixed1 __read_mostly; 664 665 extern bool pmap_ept_has_ad; 666 667 #define VMX_PINBASED_CTLS_ONE \ 668 (PIN_CTLS_INT_EXITING| \ 669 PIN_CTLS_NMI_EXITING| \ 670 PIN_CTLS_VIRTUAL_NMIS) 671 672 #define VMX_PINBASED_CTLS_ZERO 0 673 674 #define VMX_PROCBASED_CTLS_ONE \ 675 (PROC_CTLS_USE_TSC_OFFSETTING| \ 676 PROC_CTLS_HLT_EXITING| \ 677 PROC_CTLS_MWAIT_EXITING | \ 678 PROC_CTLS_RDPMC_EXITING | \ 679 PROC_CTLS_RCR8_EXITING | \ 680 PROC_CTLS_LCR8_EXITING | \ 681 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \ 682 PROC_CTLS_USE_MSR_BITMAPS | \ 683 PROC_CTLS_MONITOR_EXITING | \ 684 PROC_CTLS_ACTIVATE_CTLS2) 685 686 #define VMX_PROCBASED_CTLS_ZERO \ 687 (PROC_CTLS_RCR3_EXITING| \ 688 PROC_CTLS_LCR3_EXITING) 689 690 #define VMX_PROCBASED_CTLS2_ONE \ 691 (PROC_CTLS2_ENABLE_EPT| \ 692 PROC_CTLS2_ENABLE_VPID| \ 693 PROC_CTLS2_UNRESTRICTED_GUEST) 694 695 #define VMX_PROCBASED_CTLS2_ZERO 0 696 697 #define VMX_ENTRY_CTLS_ONE \ 698 (ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \ 699 ENTRY_CTLS_LOAD_EFER| \ 700 ENTRY_CTLS_LOAD_PAT) 701 702 #define VMX_ENTRY_CTLS_ZERO \ 703 (ENTRY_CTLS_SMM| \ 704 ENTRY_CTLS_DISABLE_DUAL) 705 706 #define VMX_EXIT_CTLS_ONE \ 707 (EXIT_CTLS_SAVE_DEBUG_CONTROLS| \ 708 EXIT_CTLS_HOST_LONG_MODE| \ 709 EXIT_CTLS_SAVE_PAT| \ 710 EXIT_CTLS_LOAD_PAT| \ 711 EXIT_CTLS_SAVE_EFER| \ 712 EXIT_CTLS_LOAD_EFER) 713 714 #define VMX_EXIT_CTLS_ZERO 0 715 716 static uint8_t *vmx_asidmap __read_mostly; 717 static uint32_t vmx_maxasid __read_mostly; 718 static kmutex_t vmx_asidlock __cacheline_aligned; 719 720 #define VMX_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE) 721 static uint64_t vmx_xcr0_mask __read_mostly; 722 723 #define VMX_NCPUIDS 32 724 725 #define VMCS_NPAGES 1 726 #define VMCS_SIZE (VMCS_NPAGES * PAGE_SIZE) 727 728 #define MSRBM_NPAGES 1 729 #define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE) 730 731 #define CR0_STATIC_MASK \ 732 (CR0_ET | CR0_NW | CR0_CD) 733 734 #define CR4_VALID \ 735 (CR4_VME | \ 736 CR4_PVI | \ 737 CR4_TSD | \ 738 CR4_DE | \ 739 CR4_PSE | \ 740 CR4_PAE | \ 741 CR4_MCE | \ 742 CR4_PGE | \ 743 CR4_PCE | \ 744 CR4_OSFXSR | \ 745 CR4_OSXMMEXCPT | \ 746 CR4_UMIP | \ 747 /* CR4_LA57 excluded */ \ 748 /* CR4_VMXE excluded */ \ 749 /* CR4_SMXE excluded */ \ 750 CR4_FSGSBASE | \ 751 CR4_PCIDE | \ 752 CR4_OSXSAVE | \ 753 CR4_SMEP | \ 754 CR4_SMAP \ 755 /* CR4_PKE excluded */ \ 756 /* CR4_CET excluded */ \ 757 /* CR4_PKS excluded */) 758 #define CR4_INVALID \ 759 (0xFFFFFFFFFFFFFFFFULL & ~CR4_VALID) 760 761 #define EFER_TLB_FLUSH \ 762 (EFER_NXE|EFER_LMA|EFER_LME) 763 #define CR0_TLB_FLUSH \ 764 (CR0_PG|CR0_WP|CR0_CD|CR0_NW) 765 #define CR4_TLB_FLUSH \ 766 (CR4_PSE|CR4_PAE|CR4_PGE|CR4_PCIDE|CR4_SMEP) 767 768 /* -------------------------------------------------------------------------- */ 769 770 struct vmx_machdata { 771 volatile uint64_t mach_htlb_gen; 772 }; 773 774 static const size_t vmx_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = { 775 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] = 776 sizeof(struct nvmm_vcpu_conf_cpuid), 777 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] = 778 sizeof(struct nvmm_vcpu_conf_tpr) 779 }; 780 781 struct vmx_cpudata { 782 /* General */ 783 uint64_t asid; 784 bool gtlb_want_flush; 785 bool gtsc_want_update; 786 uint64_t vcpu_htlb_gen; 787 kcpuset_t *htlb_want_flush; 788 789 /* VMCS */ 790 struct vmcs *vmcs; 791 paddr_t vmcs_pa; 792 size_t vmcs_refcnt; 793 struct cpu_info *vmcs_ci; 794 bool vmcs_launched; 795 796 /* MSR bitmap */ 797 uint8_t *msrbm; 798 paddr_t msrbm_pa; 799 800 /* Host state */ 801 uint64_t hxcr0; 802 uint64_t star; 803 uint64_t lstar; 804 uint64_t cstar; 805 uint64_t sfmask; 806 uint64_t kernelgsbase; 807 808 /* Intr state */ 809 bool int_window_exit; 810 bool nmi_window_exit; 811 bool evt_pending; 812 813 /* Guest state */ 814 struct msr_entry *gmsr; 815 paddr_t gmsr_pa; 816 uint64_t gmsr_misc_enable; 817 uint64_t gcr2; 818 uint64_t gcr8; 819 uint64_t gxcr0; 820 uint64_t gprs[NVMM_X64_NGPR]; 821 uint64_t drs[NVMM_X64_NDR]; 822 uint64_t gtsc; 823 struct xsave_header gfpu __aligned(64); 824 825 /* VCPU configuration. */ 826 bool cpuidpresent[VMX_NCPUIDS]; 827 struct nvmm_vcpu_conf_cpuid cpuid[VMX_NCPUIDS]; 828 struct nvmm_vcpu_conf_tpr tpr; 829 }; 830 831 static const struct { 832 uint64_t selector; 833 uint64_t attrib; 834 uint64_t limit; 835 uint64_t base; 836 } vmx_guest_segs[NVMM_X64_NSEG] = { 837 [NVMM_X64_SEG_ES] = { 838 VMCS_GUEST_ES_SELECTOR, 839 VMCS_GUEST_ES_ACCESS_RIGHTS, 840 VMCS_GUEST_ES_LIMIT, 841 VMCS_GUEST_ES_BASE 842 }, 843 [NVMM_X64_SEG_CS] = { 844 VMCS_GUEST_CS_SELECTOR, 845 VMCS_GUEST_CS_ACCESS_RIGHTS, 846 VMCS_GUEST_CS_LIMIT, 847 VMCS_GUEST_CS_BASE 848 }, 849 [NVMM_X64_SEG_SS] = { 850 VMCS_GUEST_SS_SELECTOR, 851 VMCS_GUEST_SS_ACCESS_RIGHTS, 852 VMCS_GUEST_SS_LIMIT, 853 VMCS_GUEST_SS_BASE 854 }, 855 [NVMM_X64_SEG_DS] = { 856 VMCS_GUEST_DS_SELECTOR, 857 VMCS_GUEST_DS_ACCESS_RIGHTS, 858 VMCS_GUEST_DS_LIMIT, 859 VMCS_GUEST_DS_BASE 860 }, 861 [NVMM_X64_SEG_FS] = { 862 VMCS_GUEST_FS_SELECTOR, 863 VMCS_GUEST_FS_ACCESS_RIGHTS, 864 VMCS_GUEST_FS_LIMIT, 865 VMCS_GUEST_FS_BASE 866 }, 867 [NVMM_X64_SEG_GS] = { 868 VMCS_GUEST_GS_SELECTOR, 869 VMCS_GUEST_GS_ACCESS_RIGHTS, 870 VMCS_GUEST_GS_LIMIT, 871 VMCS_GUEST_GS_BASE 872 }, 873 [NVMM_X64_SEG_GDT] = { 874 0, /* doesn't exist */ 875 0, /* doesn't exist */ 876 VMCS_GUEST_GDTR_LIMIT, 877 VMCS_GUEST_GDTR_BASE 878 }, 879 [NVMM_X64_SEG_IDT] = { 880 0, /* doesn't exist */ 881 0, /* doesn't exist */ 882 VMCS_GUEST_IDTR_LIMIT, 883 VMCS_GUEST_IDTR_BASE 884 }, 885 [NVMM_X64_SEG_LDT] = { 886 VMCS_GUEST_LDTR_SELECTOR, 887 VMCS_GUEST_LDTR_ACCESS_RIGHTS, 888 VMCS_GUEST_LDTR_LIMIT, 889 VMCS_GUEST_LDTR_BASE 890 }, 891 [NVMM_X64_SEG_TR] = { 892 VMCS_GUEST_TR_SELECTOR, 893 VMCS_GUEST_TR_ACCESS_RIGHTS, 894 VMCS_GUEST_TR_LIMIT, 895 VMCS_GUEST_TR_BASE 896 } 897 }; 898 899 /* -------------------------------------------------------------------------- */ 900 901 static uint64_t 902 vmx_get_revision(void) 903 { 904 uint64_t msr; 905 906 msr = rdmsr(MSR_IA32_VMX_BASIC); 907 msr &= IA32_VMX_BASIC_IDENT; 908 909 return msr; 910 } 911 912 static void 913 vmx_vmclear_ipi(void *arg1, void *arg2) 914 { 915 paddr_t vmcs_pa = (paddr_t)arg1; 916 vmx_vmclear(&vmcs_pa); 917 } 918 919 static void 920 vmx_vmclear_remote(struct cpu_info *ci, paddr_t vmcs_pa) 921 { 922 uint64_t xc; 923 int bound; 924 925 KASSERT(kpreempt_disabled()); 926 927 bound = curlwp_bind(); 928 kpreempt_enable(); 929 930 xc = xc_unicast(XC_HIGHPRI, vmx_vmclear_ipi, (void *)vmcs_pa, NULL, ci); 931 xc_wait(xc); 932 933 kpreempt_disable(); 934 curlwp_bindx(bound); 935 } 936 937 static void 938 vmx_vmcs_enter(struct nvmm_cpu *vcpu) 939 { 940 struct vmx_cpudata *cpudata = vcpu->cpudata; 941 struct cpu_info *vmcs_ci; 942 943 cpudata->vmcs_refcnt++; 944 if (cpudata->vmcs_refcnt > 1) { 945 KASSERT(kpreempt_disabled()); 946 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa); 947 return; 948 } 949 950 vmcs_ci = cpudata->vmcs_ci; 951 cpudata->vmcs_ci = (void *)0x00FFFFFFFFFFFFFF; /* clobber */ 952 953 kpreempt_disable(); 954 955 if (vmcs_ci == NULL) { 956 /* This VMCS is loaded for the first time. */ 957 vmx_vmclear(&cpudata->vmcs_pa); 958 cpudata->vmcs_launched = false; 959 } else if (vmcs_ci != curcpu()) { 960 /* This VMCS is active on a remote CPU. */ 961 vmx_vmclear_remote(vmcs_ci, cpudata->vmcs_pa); 962 cpudata->vmcs_launched = false; 963 } else { 964 /* This VMCS is active on curcpu, nothing to do. */ 965 } 966 967 vmx_vmptrld(&cpudata->vmcs_pa); 968 } 969 970 static void 971 vmx_vmcs_leave(struct nvmm_cpu *vcpu) 972 { 973 struct vmx_cpudata *cpudata = vcpu->cpudata; 974 975 KASSERT(kpreempt_disabled()); 976 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa); 977 KASSERT(cpudata->vmcs_refcnt > 0); 978 cpudata->vmcs_refcnt--; 979 980 if (cpudata->vmcs_refcnt > 0) { 981 return; 982 } 983 984 cpudata->vmcs_ci = curcpu(); 985 kpreempt_enable(); 986 } 987 988 static void 989 vmx_vmcs_destroy(struct nvmm_cpu *vcpu) 990 { 991 struct vmx_cpudata *cpudata = vcpu->cpudata; 992 993 KASSERT(kpreempt_disabled()); 994 KASSERT(vmx_vmptrst() == cpudata->vmcs_pa); 995 KASSERT(cpudata->vmcs_refcnt == 1); 996 cpudata->vmcs_refcnt--; 997 998 vmx_vmclear(&cpudata->vmcs_pa); 999 kpreempt_enable(); 1000 } 1001 1002 /* -------------------------------------------------------------------------- */ 1003 1004 static void 1005 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi) 1006 { 1007 struct vmx_cpudata *cpudata = vcpu->cpudata; 1008 uint64_t ctls1; 1009 1010 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS); 1011 1012 if (nmi) { 1013 // XXX INT_STATE_NMI? 1014 ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING; 1015 cpudata->nmi_window_exit = true; 1016 } else { 1017 ctls1 |= PROC_CTLS_INT_WINDOW_EXITING; 1018 cpudata->int_window_exit = true; 1019 } 1020 1021 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1); 1022 } 1023 1024 static void 1025 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi) 1026 { 1027 struct vmx_cpudata *cpudata = vcpu->cpudata; 1028 uint64_t ctls1; 1029 1030 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS); 1031 1032 if (nmi) { 1033 ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING; 1034 cpudata->nmi_window_exit = false; 1035 } else { 1036 ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING; 1037 cpudata->int_window_exit = false; 1038 } 1039 1040 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1); 1041 } 1042 1043 static inline bool 1044 vmx_excp_has_rf(uint8_t vector) 1045 { 1046 switch (vector) { 1047 case 1: /* #DB */ 1048 case 4: /* #OF */ 1049 case 8: /* #DF */ 1050 case 18: /* #MC */ 1051 return false; 1052 default: 1053 return true; 1054 } 1055 } 1056 1057 static inline int 1058 vmx_excp_has_error(uint8_t vector) 1059 { 1060 switch (vector) { 1061 case 8: /* #DF */ 1062 case 10: /* #TS */ 1063 case 11: /* #NP */ 1064 case 12: /* #SS */ 1065 case 13: /* #GP */ 1066 case 14: /* #PF */ 1067 case 17: /* #AC */ 1068 case 30: /* #SX */ 1069 return 1; 1070 default: 1071 return 0; 1072 } 1073 } 1074 1075 static int 1076 vmx_vcpu_inject(struct nvmm_cpu *vcpu) 1077 { 1078 struct nvmm_comm_page *comm = vcpu->comm; 1079 struct vmx_cpudata *cpudata = vcpu->cpudata; 1080 int type = 0, err = 0, ret = EINVAL; 1081 uint64_t rflags, info, error; 1082 u_int evtype; 1083 uint8_t vector; 1084 1085 evtype = comm->event.type; 1086 vector = comm->event.vector; 1087 error = comm->event.u.excp.error; 1088 __insn_barrier(); 1089 1090 vmx_vmcs_enter(vcpu); 1091 1092 switch (evtype) { 1093 case NVMM_VCPU_EVENT_EXCP: 1094 if (vector == 2 || vector >= 32) 1095 goto out; 1096 if (vector == 3 || vector == 0) 1097 goto out; 1098 if (vmx_excp_has_rf(vector)) { 1099 rflags = vmx_vmread(VMCS_GUEST_RFLAGS); 1100 vmx_vmwrite(VMCS_GUEST_RFLAGS, rflags | PSL_RF); 1101 } 1102 type = INTR_TYPE_HW_EXC; 1103 err = vmx_excp_has_error(vector); 1104 break; 1105 case NVMM_VCPU_EVENT_INTR: 1106 type = INTR_TYPE_EXT_INT; 1107 if (vector == 2) { 1108 type = INTR_TYPE_NMI; 1109 vmx_event_waitexit_enable(vcpu, true); 1110 } 1111 err = 0; 1112 break; 1113 default: 1114 goto out; 1115 } 1116 1117 info = 1118 __SHIFTIN(vector, INTR_INFO_VECTOR) | 1119 __SHIFTIN(type, INTR_INFO_TYPE) | 1120 __SHIFTIN(err, INTR_INFO_ERROR) | 1121 __SHIFTIN(1, INTR_INFO_VALID); 1122 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info); 1123 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, error); 1124 1125 cpudata->evt_pending = true; 1126 ret = 0; 1127 1128 out: 1129 vmx_vmcs_leave(vcpu); 1130 return ret; 1131 } 1132 1133 static void 1134 vmx_inject_ud(struct nvmm_cpu *vcpu) 1135 { 1136 struct nvmm_comm_page *comm = vcpu->comm; 1137 int ret __diagused; 1138 1139 comm->event.type = NVMM_VCPU_EVENT_EXCP; 1140 comm->event.vector = 6; 1141 comm->event.u.excp.error = 0; 1142 1143 ret = vmx_vcpu_inject(vcpu); 1144 KASSERT(ret == 0); 1145 } 1146 1147 static void 1148 vmx_inject_gp(struct nvmm_cpu *vcpu) 1149 { 1150 struct nvmm_comm_page *comm = vcpu->comm; 1151 int ret __diagused; 1152 1153 comm->event.type = NVMM_VCPU_EVENT_EXCP; 1154 comm->event.vector = 13; 1155 comm->event.u.excp.error = 0; 1156 1157 ret = vmx_vcpu_inject(vcpu); 1158 KASSERT(ret == 0); 1159 } 1160 1161 static inline int 1162 vmx_vcpu_event_commit(struct nvmm_cpu *vcpu) 1163 { 1164 if (__predict_true(!vcpu->comm->event_commit)) { 1165 return 0; 1166 } 1167 vcpu->comm->event_commit = false; 1168 return vmx_vcpu_inject(vcpu); 1169 } 1170 1171 static inline void 1172 vmx_inkernel_advance(void) 1173 { 1174 uint64_t rip, inslen, intstate, rflags; 1175 1176 /* 1177 * Maybe we should also apply single-stepping and debug exceptions. 1178 * Matters for guest-ring3, because it can execute 'cpuid' under a 1179 * debugger. 1180 */ 1181 1182 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH); 1183 rip = vmx_vmread(VMCS_GUEST_RIP); 1184 vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen); 1185 1186 rflags = vmx_vmread(VMCS_GUEST_RFLAGS); 1187 vmx_vmwrite(VMCS_GUEST_RFLAGS, rflags & ~PSL_RF); 1188 1189 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY); 1190 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, 1191 intstate & ~(INT_STATE_STI|INT_STATE_MOVSS)); 1192 } 1193 1194 static void 1195 vmx_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code) 1196 { 1197 exit->u.inv.hwcode = code; 1198 exit->reason = NVMM_VCPU_EXIT_INVALID; 1199 } 1200 1201 static void 1202 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1203 struct nvmm_vcpu_exit *exit) 1204 { 1205 uint64_t qual; 1206 1207 qual = vmx_vmread(VMCS_EXIT_INTR_INFO); 1208 1209 if ((qual & INTR_INFO_VALID) == 0) { 1210 goto error; 1211 } 1212 if (__SHIFTOUT(qual, INTR_INFO_TYPE) != INTR_TYPE_NMI) { 1213 goto error; 1214 } 1215 1216 exit->reason = NVMM_VCPU_EXIT_NONE; 1217 return; 1218 1219 error: 1220 vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI); 1221 } 1222 1223 #define VMX_CPUID_MAX_BASIC 0x16 1224 #define VMX_CPUID_MAX_HYPERVISOR 0x40000010 1225 #define VMX_CPUID_MAX_EXTENDED 0x80000008 1226 static uint32_t vmx_cpuid_max_basic __read_mostly; 1227 static uint32_t vmx_cpuid_max_extended __read_mostly; 1228 1229 static void 1230 vmx_inkernel_exec_cpuid(struct vmx_cpudata *cpudata, uint64_t eax, uint64_t ecx) 1231 { 1232 u_int descs[4]; 1233 1234 x86_cpuid2(eax, ecx, descs); 1235 cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0]; 1236 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1]; 1237 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2]; 1238 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3]; 1239 } 1240 1241 static void 1242 vmx_inkernel_handle_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1243 uint64_t eax, uint64_t ecx) 1244 { 1245 struct vmx_cpudata *cpudata = vcpu->cpudata; 1246 unsigned int ncpus; 1247 uint64_t cr4; 1248 1249 /* 1250 * `If a value entered for CPUID.EAX is higher than the maximum 1251 * input value for basic or extended function for that 1252 * processor then the data for the highest basic information 1253 * leaf is returned.' 1254 * 1255 * --Intel 64 and IA-32 Architectures Software Developer's 1256 * Manual, Vol. 2A, Order Number: 325383-077US, April 2022, 1257 * Sec. 3.2 `Instructions (A-L)', CPUID--CPU Identification, 1258 * p. 3-214. 1259 * 1260 * We take the same to hold for the hypervisor range, 1261 * 0x40000000-0x4fffffff. 1262 * 1263 * (Sync with nvmm_x86_svm.c.) 1264 */ 1265 if (eax < 0x40000000) { /* basic CPUID range */ 1266 if (__predict_false(eax > vmx_cpuid_max_basic)) { 1267 eax = vmx_cpuid_max_basic; 1268 vmx_inkernel_exec_cpuid(cpudata, eax, ecx); 1269 } 1270 } else if (eax < 0x80000000) { /* hypervisor CPUID range */ 1271 if (__predict_false(eax > VMX_CPUID_MAX_HYPERVISOR)) { 1272 eax = vmx_cpuid_max_basic; 1273 vmx_inkernel_exec_cpuid(cpudata, eax, ecx); 1274 } 1275 } else { /* extended CPUID range */ 1276 if (__predict_false(eax > vmx_cpuid_max_extended)) { 1277 eax = vmx_cpuid_max_basic; 1278 vmx_inkernel_exec_cpuid(cpudata, eax, ecx); 1279 } 1280 } 1281 1282 switch (eax) { 1283 1284 /* 1285 * basic CPUID range 1286 */ 1287 case 0x00000000: 1288 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_basic; 1289 break; 1290 case 0x00000001: 1291 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax; 1292 1293 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID; 1294 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid, 1295 CPUID_LOCAL_APIC_ID); 1296 1297 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx; 1298 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ; 1299 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) { 1300 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_PCID; 1301 } 1302 1303 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx; 1304 1305 /* CPUID2_OSXSAVE depends on CR4. */ 1306 cr4 = vmx_vmread(VMCS_GUEST_CR4); 1307 if (!(cr4 & CR4_OSXSAVE)) { 1308 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE; 1309 } 1310 break; 1311 case 0x00000002: 1312 break; 1313 case 0x00000003: 1314 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1315 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1316 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1317 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1318 break; 1319 case 0x00000004: /* Deterministic Cache Parameters */ 1320 break; /* TODO? */ 1321 case 0x00000005: /* MONITOR/MWAIT */ 1322 case 0x00000006: /* Thermal and Power Management */ 1323 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1324 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1325 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1326 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1327 break; 1328 case 0x00000007: /* Structured Extended Feature Flags Enumeration */ 1329 switch (ecx) { 1330 case 0: 1331 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1332 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx; 1333 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx; 1334 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx; 1335 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) { 1336 cpudata->gprs[NVMM_X64_GPR_RBX] |= CPUID_SEF_INVPCID; 1337 } 1338 break; 1339 default: 1340 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1341 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1342 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1343 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1344 break; 1345 } 1346 break; 1347 case 0x00000008: /* Empty */ 1348 case 0x00000009: /* Direct Cache Access Information */ 1349 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1350 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1351 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1352 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1353 break; 1354 case 0x0000000A: /* Architectural Performance Monitoring */ 1355 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1356 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1357 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1358 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1359 break; 1360 case 0x0000000B: /* Extended Topology Enumeration */ 1361 switch (ecx) { 1362 case 0: /* Threads */ 1363 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1364 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1365 cpudata->gprs[NVMM_X64_GPR_RCX] = 1366 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) | 1367 __SHIFTIN(CPUID_TOP_LVLTYPE_SMT, CPUID_TOP_LVLTYPE); 1368 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid; 1369 break; 1370 case 1: /* Cores */ 1371 ncpus = atomic_load_relaxed(&mach->ncpus); 1372 cpudata->gprs[NVMM_X64_GPR_RAX] = ilog2(ncpus); 1373 cpudata->gprs[NVMM_X64_GPR_RBX] = ncpus; 1374 cpudata->gprs[NVMM_X64_GPR_RCX] = 1375 __SHIFTIN(ecx, CPUID_TOP_LVLNUM) | 1376 __SHIFTIN(CPUID_TOP_LVLTYPE_CORE, CPUID_TOP_LVLTYPE); 1377 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid; 1378 break; 1379 default: 1380 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1381 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1382 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; /* LVLTYPE_INVAL */ 1383 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1384 break; 1385 } 1386 break; 1387 case 0x0000000C: /* Empty */ 1388 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1389 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1390 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1391 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1392 break; 1393 case 0x0000000D: /* Processor Extended State Enumeration */ 1394 if (vmx_xcr0_mask == 0) { 1395 break; 1396 } 1397 switch (ecx) { 1398 case 0: 1399 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF; 1400 if (cpudata->gxcr0 & XCR0_SSE) { 1401 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave); 1402 } else { 1403 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87); 1404 } 1405 cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */ 1406 cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64; 1407 cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32; 1408 break; 1409 case 1: 1410 cpudata->gprs[NVMM_X64_GPR_RAX] &= 1411 (CPUID_PES1_XSAVEOPT | CPUID_PES1_XSAVEC | 1412 CPUID_PES1_XGETBV); 1413 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1414 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1415 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1416 break; 1417 default: 1418 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1419 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1420 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1421 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1422 break; 1423 } 1424 break; 1425 case 0x0000000E: /* Empty */ 1426 case 0x0000000F: /* Intel RDT Monitoring Enumeration */ 1427 case 0x00000010: /* Intel RDT Allocation Enumeration */ 1428 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1429 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1430 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1431 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1432 break; 1433 case 0x00000011: /* Empty */ 1434 case 0x00000012: /* Intel SGX Capability Enumeration */ 1435 case 0x00000013: /* Empty */ 1436 case 0x00000014: /* Intel Processor Trace Enumeration */ 1437 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1438 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1439 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1440 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1441 break; 1442 case 0x00000015: /* TSC and Nominal Core Crystal Clock Information */ 1443 case 0x00000016: /* Processor Frequency Information */ 1444 break; 1445 1446 /* 1447 * hypervisor CPUID range 1448 */ 1449 case 0x40000000: /* Hypervisor Information */ 1450 cpudata->gprs[NVMM_X64_GPR_RAX] = VMX_CPUID_MAX_HYPERVISOR; 1451 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1452 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1453 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1454 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4); 1455 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4); 1456 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4); 1457 break; 1458 case 0x40000010: /* VMware-style TSC and LAPIC freq */ 1459 cpudata->gprs[NVMM_X64_GPR_RAX] = curcpu()->ci_data.cpu_cc_freq / 1000; 1460 if (has_lapic()) 1461 cpudata->gprs[NVMM_X64_GPR_RBX] = lapic_per_second / 1000; 1462 else 1463 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1464 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1465 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1466 break; 1467 1468 /* 1469 * extended CPUID range 1470 */ 1471 case 0x80000000: 1472 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_extended; 1473 break; 1474 case 0x80000001: 1475 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax; 1476 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx; 1477 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx; 1478 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx; 1479 break; 1480 case 0x80000002: /* Processor Brand String */ 1481 case 0x80000003: /* Processor Brand String */ 1482 case 0x80000004: /* Processor Brand String */ 1483 case 0x80000005: /* Reserved Zero */ 1484 case 0x80000006: /* Cache Information */ 1485 break; 1486 case 0x80000007: /* TSC Information */ 1487 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000007.eax; 1488 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000007.ebx; 1489 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000007.ecx; 1490 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000007.edx; 1491 break; 1492 case 0x80000008: /* Address Sizes */ 1493 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000008.eax; 1494 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000008.ebx; 1495 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000008.ecx; 1496 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000008.edx; 1497 break; 1498 1499 default: 1500 break; 1501 } 1502 } 1503 1504 static void 1505 vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason) 1506 { 1507 uint64_t inslen, rip; 1508 1509 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH); 1510 rip = vmx_vmread(VMCS_GUEST_RIP); 1511 exit->u.insn.npc = rip + inslen; 1512 exit->reason = reason; 1513 } 1514 1515 static void 1516 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1517 struct nvmm_vcpu_exit *exit) 1518 { 1519 struct vmx_cpudata *cpudata = vcpu->cpudata; 1520 struct nvmm_vcpu_conf_cpuid *cpuid; 1521 uint64_t eax, ecx; 1522 size_t i; 1523 1524 eax = cpudata->gprs[NVMM_X64_GPR_RAX]; 1525 ecx = cpudata->gprs[NVMM_X64_GPR_RCX]; 1526 vmx_inkernel_exec_cpuid(cpudata, eax, ecx); 1527 vmx_inkernel_handle_cpuid(mach, vcpu, eax, ecx); 1528 1529 for (i = 0; i < VMX_NCPUIDS; i++) { 1530 if (!cpudata->cpuidpresent[i]) { 1531 continue; 1532 } 1533 cpuid = &cpudata->cpuid[i]; 1534 if (cpuid->leaf != eax) { 1535 continue; 1536 } 1537 1538 if (cpuid->exit) { 1539 vmx_exit_insn(exit, NVMM_VCPU_EXIT_CPUID); 1540 return; 1541 } 1542 KASSERT(cpuid->mask); 1543 1544 /* del */ 1545 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->u.mask.del.eax; 1546 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx; 1547 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx; 1548 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx; 1549 1550 /* set */ 1551 cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->u.mask.set.eax; 1552 cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx; 1553 cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx; 1554 cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx; 1555 1556 break; 1557 } 1558 1559 vmx_inkernel_advance(); 1560 exit->reason = NVMM_VCPU_EXIT_NONE; 1561 } 1562 1563 static void 1564 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1565 struct nvmm_vcpu_exit *exit) 1566 { 1567 struct vmx_cpudata *cpudata = vcpu->cpudata; 1568 uint64_t rflags; 1569 1570 if (cpudata->int_window_exit) { 1571 rflags = vmx_vmread(VMCS_GUEST_RFLAGS); 1572 if (rflags & PSL_I) { 1573 vmx_event_waitexit_disable(vcpu, false); 1574 } 1575 } 1576 1577 vmx_inkernel_advance(); 1578 exit->reason = NVMM_VCPU_EXIT_HALTED; 1579 } 1580 1581 #define VMX_QUAL_CR_NUM __BITS(3,0) 1582 #define VMX_QUAL_CR_TYPE __BITS(5,4) 1583 #define CR_TYPE_WRITE 0 1584 #define CR_TYPE_READ 1 1585 #define CR_TYPE_CLTS 2 1586 #define CR_TYPE_LMSW 3 1587 #define VMX_QUAL_CR_LMSW_OPMEM __BIT(6) 1588 #define VMX_QUAL_CR_GPR __BITS(11,8) 1589 #define VMX_QUAL_CR_LMSW_SRC __BIT(31,16) 1590 1591 static inline int 1592 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1) 1593 { 1594 /* Bits set to 1 in fixed0 are fixed to 1. */ 1595 if ((crval & fixed0) != fixed0) { 1596 return -1; 1597 } 1598 /* Bits set to 0 in fixed1 are fixed to 0. */ 1599 if (crval & ~fixed1) { 1600 return -1; 1601 } 1602 return 0; 1603 } 1604 1605 static int 1606 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1607 uint64_t qual) 1608 { 1609 struct vmx_cpudata *cpudata = vcpu->cpudata; 1610 uint64_t type, gpr, oldcr0, realcr0, fakecr0; 1611 uint64_t efer, ctls1; 1612 1613 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE); 1614 if (type != CR_TYPE_WRITE) { 1615 return -1; 1616 } 1617 1618 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR); 1619 KASSERT(gpr < 16); 1620 1621 if (gpr == NVMM_X64_GPR_RSP) { 1622 fakecr0 = vmx_vmread(VMCS_GUEST_RSP); 1623 } else { 1624 fakecr0 = cpudata->gprs[gpr]; 1625 } 1626 1627 /* 1628 * fakecr0 is the value the guest believes is in %cr0. realcr0 is the 1629 * actual value in %cr0. 1630 * 1631 * In fakecr0 we must force CR0_ET to 1. 1632 * 1633 * In realcr0 we must force CR0_NW and CR0_CD to 0, and CR0_ET and 1634 * CR0_NE to 1. 1635 */ 1636 fakecr0 |= CR0_ET; 1637 realcr0 = (fakecr0 & ~CR0_STATIC_MASK) | CR0_ET | CR0_NE; 1638 1639 if (vmx_check_cr(realcr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) { 1640 return -1; 1641 } 1642 1643 /* 1644 * XXX Handle 32bit PAE paging, need to set PDPTEs, fetched manually 1645 * from CR3. 1646 */ 1647 1648 if (realcr0 & CR0_PG) { 1649 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS); 1650 efer = vmx_vmread(VMCS_GUEST_IA32_EFER); 1651 if (efer & EFER_LME) { 1652 ctls1 |= ENTRY_CTLS_LONG_MODE; 1653 efer |= EFER_LMA; 1654 } else { 1655 ctls1 &= ~ENTRY_CTLS_LONG_MODE; 1656 efer &= ~EFER_LMA; 1657 } 1658 vmx_vmwrite(VMCS_GUEST_IA32_EFER, efer); 1659 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1); 1660 } 1661 1662 oldcr0 = (vmx_vmread(VMCS_CR0_SHADOW) & CR0_STATIC_MASK) | 1663 (vmx_vmread(VMCS_GUEST_CR0) & ~CR0_STATIC_MASK); 1664 if ((oldcr0 ^ fakecr0) & CR0_TLB_FLUSH) { 1665 cpudata->gtlb_want_flush = true; 1666 } 1667 1668 vmx_vmwrite(VMCS_CR0_SHADOW, fakecr0); 1669 vmx_vmwrite(VMCS_GUEST_CR0, realcr0); 1670 vmx_inkernel_advance(); 1671 return 0; 1672 } 1673 1674 static int 1675 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1676 uint64_t qual) 1677 { 1678 struct vmx_cpudata *cpudata = vcpu->cpudata; 1679 uint64_t type, gpr, oldcr4, cr4; 1680 1681 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE); 1682 if (type != CR_TYPE_WRITE) { 1683 return -1; 1684 } 1685 1686 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR); 1687 KASSERT(gpr < 16); 1688 1689 if (gpr == NVMM_X64_GPR_RSP) { 1690 gpr = vmx_vmread(VMCS_GUEST_RSP); 1691 } else { 1692 gpr = cpudata->gprs[gpr]; 1693 } 1694 1695 if (gpr & CR4_INVALID) { 1696 return -1; 1697 } 1698 cr4 = gpr | CR4_VMXE; 1699 if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) { 1700 return -1; 1701 } 1702 1703 oldcr4 = vmx_vmread(VMCS_GUEST_CR4); 1704 if ((oldcr4 ^ gpr) & CR4_TLB_FLUSH) { 1705 cpudata->gtlb_want_flush = true; 1706 } 1707 1708 vmx_vmwrite(VMCS_GUEST_CR4, cr4); 1709 vmx_inkernel_advance(); 1710 return 0; 1711 } 1712 1713 static int 1714 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1715 uint64_t qual, struct nvmm_vcpu_exit *exit) 1716 { 1717 struct vmx_cpudata *cpudata = vcpu->cpudata; 1718 uint64_t type, gpr; 1719 bool write; 1720 1721 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE); 1722 if (type == CR_TYPE_WRITE) { 1723 write = true; 1724 } else if (type == CR_TYPE_READ) { 1725 write = false; 1726 } else { 1727 return -1; 1728 } 1729 1730 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR); 1731 KASSERT(gpr < 16); 1732 1733 if (write) { 1734 if (gpr == NVMM_X64_GPR_RSP) { 1735 cpudata->gcr8 = vmx_vmread(VMCS_GUEST_RSP); 1736 } else { 1737 cpudata->gcr8 = cpudata->gprs[gpr]; 1738 } 1739 if (cpudata->tpr.exit_changed) { 1740 exit->reason = NVMM_VCPU_EXIT_TPR_CHANGED; 1741 } 1742 } else { 1743 if (gpr == NVMM_X64_GPR_RSP) { 1744 vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8); 1745 } else { 1746 cpudata->gprs[gpr] = cpudata->gcr8; 1747 } 1748 } 1749 1750 vmx_inkernel_advance(); 1751 return 0; 1752 } 1753 1754 static void 1755 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1756 struct nvmm_vcpu_exit *exit) 1757 { 1758 uint64_t qual; 1759 int ret; 1760 1761 exit->reason = NVMM_VCPU_EXIT_NONE; 1762 1763 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION); 1764 1765 switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) { 1766 case 0: 1767 ret = vmx_inkernel_handle_cr0(mach, vcpu, qual); 1768 break; 1769 case 4: 1770 ret = vmx_inkernel_handle_cr4(mach, vcpu, qual); 1771 break; 1772 case 8: 1773 ret = vmx_inkernel_handle_cr8(mach, vcpu, qual, exit); 1774 break; 1775 default: 1776 ret = -1; 1777 break; 1778 } 1779 1780 if (ret == -1) { 1781 vmx_inject_gp(vcpu); 1782 } 1783 } 1784 1785 #define VMX_QUAL_IO_SIZE __BITS(2,0) 1786 #define IO_SIZE_8 0 1787 #define IO_SIZE_16 1 1788 #define IO_SIZE_32 3 1789 #define VMX_QUAL_IO_IN __BIT(3) 1790 #define VMX_QUAL_IO_STR __BIT(4) 1791 #define VMX_QUAL_IO_REP __BIT(5) 1792 #define VMX_QUAL_IO_DX __BIT(6) 1793 #define VMX_QUAL_IO_PORT __BITS(31,16) 1794 1795 #define VMX_INFO_IO_ADRSIZE __BITS(9,7) 1796 #define IO_ADRSIZE_16 0 1797 #define IO_ADRSIZE_32 1 1798 #define IO_ADRSIZE_64 2 1799 #define VMX_INFO_IO_SEG __BITS(17,15) 1800 1801 static void 1802 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1803 struct nvmm_vcpu_exit *exit) 1804 { 1805 uint64_t qual, info, inslen, rip; 1806 1807 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION); 1808 info = vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO); 1809 1810 exit->reason = NVMM_VCPU_EXIT_IO; 1811 1812 exit->u.io.in = (qual & VMX_QUAL_IO_IN) != 0; 1813 exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT); 1814 1815 KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6); 1816 exit->u.io.seg = __SHIFTOUT(info, VMX_INFO_IO_SEG); 1817 1818 if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) { 1819 exit->u.io.address_size = 8; 1820 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) { 1821 exit->u.io.address_size = 4; 1822 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) { 1823 exit->u.io.address_size = 2; 1824 } 1825 1826 if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) { 1827 exit->u.io.operand_size = 4; 1828 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) { 1829 exit->u.io.operand_size = 2; 1830 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) { 1831 exit->u.io.operand_size = 1; 1832 } 1833 1834 exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0; 1835 exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0; 1836 1837 if (exit->u.io.in && exit->u.io.str) { 1838 exit->u.io.seg = NVMM_X64_SEG_ES; 1839 } 1840 1841 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH); 1842 rip = vmx_vmread(VMCS_GUEST_RIP); 1843 exit->u.io.npc = rip + inslen; 1844 1845 vmx_vcpu_state_provide(vcpu, 1846 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS | 1847 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS); 1848 } 1849 1850 static const uint64_t msr_ignore_list[] = { 1851 MSR_BIOS_SIGN, 1852 MSR_IA32_PLATFORM_ID 1853 }; 1854 1855 static bool 1856 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1857 struct nvmm_vcpu_exit *exit) 1858 { 1859 struct vmx_cpudata *cpudata = vcpu->cpudata; 1860 uint64_t val; 1861 size_t i; 1862 1863 if (exit->reason == NVMM_VCPU_EXIT_RDMSR) { 1864 if (exit->u.rdmsr.msr == MSR_CR_PAT) { 1865 val = vmx_vmread(VMCS_GUEST_IA32_PAT); 1866 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF); 1867 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 1868 goto handled; 1869 } 1870 if (exit->u.rdmsr.msr == MSR_MISC_ENABLE) { 1871 val = cpudata->gmsr_misc_enable; 1872 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF); 1873 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 1874 goto handled; 1875 } 1876 if (exit->u.rdmsr.msr == MSR_IA32_ARCH_CAPABILITIES) { 1877 u_int descs[4]; 1878 if (cpuid_level < 7) { 1879 goto error; 1880 } 1881 x86_cpuid(7, descs); 1882 if (!(descs[3] & CPUID_SEF_ARCH_CAP)) { 1883 goto error; 1884 } 1885 val = rdmsr(MSR_IA32_ARCH_CAPABILITIES); 1886 val &= (IA32_ARCH_RDCL_NO | 1887 IA32_ARCH_SSB_NO | 1888 IA32_ARCH_MDS_NO | 1889 IA32_ARCH_TAA_NO); 1890 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF); 1891 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 1892 goto handled; 1893 } 1894 for (i = 0; i < __arraycount(msr_ignore_list); i++) { 1895 if (msr_ignore_list[i] != exit->u.rdmsr.msr) 1896 continue; 1897 val = 0; 1898 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF); 1899 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 1900 goto handled; 1901 } 1902 } else { 1903 if (exit->u.wrmsr.msr == MSR_TSC) { 1904 cpudata->gtsc = exit->u.wrmsr.val; 1905 cpudata->gtsc_want_update = true; 1906 goto handled; 1907 } 1908 if (exit->u.wrmsr.msr == MSR_CR_PAT) { 1909 val = exit->u.wrmsr.val; 1910 if (__predict_false(!nvmm_x86_pat_validate(val))) { 1911 goto error; 1912 } 1913 vmx_vmwrite(VMCS_GUEST_IA32_PAT, val); 1914 goto handled; 1915 } 1916 if (exit->u.wrmsr.msr == MSR_MISC_ENABLE) { 1917 /* Don't care. */ 1918 goto handled; 1919 } 1920 for (i = 0; i < __arraycount(msr_ignore_list); i++) { 1921 if (msr_ignore_list[i] != exit->u.wrmsr.msr) 1922 continue; 1923 goto handled; 1924 } 1925 } 1926 1927 return false; 1928 1929 handled: 1930 vmx_inkernel_advance(); 1931 return true; 1932 1933 error: 1934 vmx_inject_gp(vcpu); 1935 return true; 1936 } 1937 1938 static void 1939 vmx_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1940 struct nvmm_vcpu_exit *exit) 1941 { 1942 struct vmx_cpudata *cpudata = vcpu->cpudata; 1943 uint64_t inslen, rip; 1944 1945 exit->reason = NVMM_VCPU_EXIT_RDMSR; 1946 exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF); 1947 1948 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) { 1949 exit->reason = NVMM_VCPU_EXIT_NONE; 1950 return; 1951 } 1952 1953 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH); 1954 rip = vmx_vmread(VMCS_GUEST_RIP); 1955 exit->u.rdmsr.npc = rip + inslen; 1956 1957 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS); 1958 } 1959 1960 static void 1961 vmx_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1962 struct nvmm_vcpu_exit *exit) 1963 { 1964 struct vmx_cpudata *cpudata = vcpu->cpudata; 1965 uint64_t rdx, rax, inslen, rip; 1966 1967 rdx = cpudata->gprs[NVMM_X64_GPR_RDX]; 1968 rax = cpudata->gprs[NVMM_X64_GPR_RAX]; 1969 1970 exit->reason = NVMM_VCPU_EXIT_WRMSR; 1971 exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF); 1972 exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF); 1973 1974 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) { 1975 exit->reason = NVMM_VCPU_EXIT_NONE; 1976 return; 1977 } 1978 1979 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH); 1980 rip = vmx_vmread(VMCS_GUEST_RIP); 1981 exit->u.wrmsr.npc = rip + inslen; 1982 1983 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS); 1984 } 1985 1986 static void 1987 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1988 struct nvmm_vcpu_exit *exit) 1989 { 1990 struct vmx_cpudata *cpudata = vcpu->cpudata; 1991 uint64_t val; 1992 1993 exit->reason = NVMM_VCPU_EXIT_NONE; 1994 1995 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) | 1996 (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF); 1997 1998 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) { 1999 goto error; 2000 } else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) { 2001 goto error; 2002 } else if (__predict_false((val & XCR0_X87) == 0)) { 2003 goto error; 2004 } 2005 2006 cpudata->gxcr0 = val; 2007 2008 vmx_inkernel_advance(); 2009 return; 2010 2011 error: 2012 vmx_inject_gp(vcpu); 2013 } 2014 2015 #define VMX_EPT_VIOLATION_READ __BIT(0) 2016 #define VMX_EPT_VIOLATION_WRITE __BIT(1) 2017 #define VMX_EPT_VIOLATION_EXECUTE __BIT(2) 2018 2019 static void 2020 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 2021 struct nvmm_vcpu_exit *exit) 2022 { 2023 uint64_t perm; 2024 gpaddr_t gpa; 2025 2026 gpa = vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS); 2027 2028 exit->reason = NVMM_VCPU_EXIT_MEMORY; 2029 perm = vmx_vmread(VMCS_EXIT_QUALIFICATION); 2030 if (perm & VMX_EPT_VIOLATION_WRITE) 2031 exit->u.mem.prot = PROT_WRITE; 2032 else if (perm & VMX_EPT_VIOLATION_EXECUTE) 2033 exit->u.mem.prot = PROT_EXEC; 2034 else 2035 exit->u.mem.prot = PROT_READ; 2036 exit->u.mem.gpa = gpa; 2037 exit->u.mem.inst_len = 0; 2038 2039 vmx_vcpu_state_provide(vcpu, 2040 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS | 2041 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS); 2042 } 2043 2044 /* -------------------------------------------------------------------------- */ 2045 2046 static void 2047 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu) 2048 { 2049 struct vmx_cpudata *cpudata = vcpu->cpudata; 2050 2051 fpu_kern_enter(); 2052 /* TODO: should we use *XSAVE64 here? */ 2053 fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask, false); 2054 2055 if (vmx_xcr0_mask != 0) { 2056 cpudata->hxcr0 = rdxcr(0); 2057 wrxcr(0, cpudata->gxcr0); 2058 } 2059 } 2060 2061 static void 2062 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu) 2063 { 2064 struct vmx_cpudata *cpudata = vcpu->cpudata; 2065 2066 if (vmx_xcr0_mask != 0) { 2067 cpudata->gxcr0 = rdxcr(0); 2068 wrxcr(0, cpudata->hxcr0); 2069 } 2070 2071 /* TODO: should we use *XSAVE64 here? */ 2072 fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask, false); 2073 fpu_kern_leave(); 2074 } 2075 2076 static void 2077 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu) 2078 { 2079 struct vmx_cpudata *cpudata = vcpu->cpudata; 2080 2081 x86_dbregs_save(curlwp); 2082 2083 ldr7(0); 2084 2085 ldr0(cpudata->drs[NVMM_X64_DR_DR0]); 2086 ldr1(cpudata->drs[NVMM_X64_DR_DR1]); 2087 ldr2(cpudata->drs[NVMM_X64_DR_DR2]); 2088 ldr3(cpudata->drs[NVMM_X64_DR_DR3]); 2089 ldr6(cpudata->drs[NVMM_X64_DR_DR6]); 2090 } 2091 2092 static void 2093 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu) 2094 { 2095 struct vmx_cpudata *cpudata = vcpu->cpudata; 2096 2097 cpudata->drs[NVMM_X64_DR_DR0] = rdr0(); 2098 cpudata->drs[NVMM_X64_DR_DR1] = rdr1(); 2099 cpudata->drs[NVMM_X64_DR_DR2] = rdr2(); 2100 cpudata->drs[NVMM_X64_DR_DR3] = rdr3(); 2101 cpudata->drs[NVMM_X64_DR_DR6] = rdr6(); 2102 2103 x86_dbregs_restore(curlwp); 2104 } 2105 2106 static void 2107 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu) 2108 { 2109 struct vmx_cpudata *cpudata = vcpu->cpudata; 2110 2111 /* This gets restored automatically by the CPU. */ 2112 vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)curcpu()->ci_idtvec.iv_idt); 2113 vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE)); 2114 vmx_vmwrite(VMCS_HOST_CR3, rcr3()); 2115 vmx_vmwrite(VMCS_HOST_CR4, rcr4()); 2116 2117 cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE); 2118 } 2119 2120 static void 2121 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu) 2122 { 2123 struct vmx_cpudata *cpudata = vcpu->cpudata; 2124 2125 wrmsr(MSR_STAR, cpudata->star); 2126 wrmsr(MSR_LSTAR, cpudata->lstar); 2127 wrmsr(MSR_CSTAR, cpudata->cstar); 2128 wrmsr(MSR_SFMASK, cpudata->sfmask); 2129 wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase); 2130 } 2131 2132 /* -------------------------------------------------------------------------- */ 2133 2134 #define VMX_INVVPID_ADDRESS 0 2135 #define VMX_INVVPID_CONTEXT 1 2136 #define VMX_INVVPID_ALL 2 2137 #define VMX_INVVPID_CONTEXT_NOGLOBAL 3 2138 2139 #define VMX_INVEPT_CONTEXT 1 2140 #define VMX_INVEPT_ALL 2 2141 2142 static inline void 2143 vmx_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu) 2144 { 2145 struct vmx_cpudata *cpudata = vcpu->cpudata; 2146 2147 if (vcpu->hcpu_last != hcpu) { 2148 cpudata->gtlb_want_flush = true; 2149 } 2150 } 2151 2152 static inline void 2153 vmx_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu) 2154 { 2155 struct vmx_cpudata *cpudata = vcpu->cpudata; 2156 struct ept_desc ept_desc; 2157 2158 if (__predict_true(!kcpuset_isset(cpudata->htlb_want_flush, hcpu))) { 2159 return; 2160 } 2161 2162 ept_desc.eptp = vmx_vmread(VMCS_EPTP); 2163 ept_desc.mbz = 0; 2164 vmx_invept(vmx_ept_flush_op, &ept_desc); 2165 kcpuset_clear(cpudata->htlb_want_flush, hcpu); 2166 } 2167 2168 static inline uint64_t 2169 vmx_htlb_flush(struct vmx_machdata *machdata, struct vmx_cpudata *cpudata) 2170 { 2171 struct ept_desc ept_desc; 2172 uint64_t machgen; 2173 2174 machgen = machdata->mach_htlb_gen; 2175 if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) { 2176 return machgen; 2177 } 2178 2179 kcpuset_copy(cpudata->htlb_want_flush, kcpuset_running); 2180 2181 ept_desc.eptp = vmx_vmread(VMCS_EPTP); 2182 ept_desc.mbz = 0; 2183 vmx_invept(vmx_ept_flush_op, &ept_desc); 2184 2185 return machgen; 2186 } 2187 2188 static inline void 2189 vmx_htlb_flush_ack(struct vmx_cpudata *cpudata, uint64_t machgen) 2190 { 2191 cpudata->vcpu_htlb_gen = machgen; 2192 kcpuset_clear(cpudata->htlb_want_flush, cpu_number()); 2193 } 2194 2195 static inline void 2196 vmx_exit_evt(struct vmx_cpudata *cpudata) 2197 { 2198 uint64_t info, err, inslen; 2199 2200 cpudata->evt_pending = false; 2201 2202 info = vmx_vmread(VMCS_IDT_VECTORING_INFO); 2203 if (__predict_true((info & INTR_INFO_VALID) == 0)) { 2204 return; 2205 } 2206 err = vmx_vmread(VMCS_IDT_VECTORING_ERROR); 2207 2208 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info); 2209 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, err); 2210 2211 switch (__SHIFTOUT(info, INTR_INFO_TYPE)) { 2212 case INTR_TYPE_SW_INT: 2213 case INTR_TYPE_PRIV_SW_EXC: 2214 case INTR_TYPE_SW_EXC: 2215 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH); 2216 vmx_vmwrite(VMCS_ENTRY_INSTRUCTION_LENGTH, inslen); 2217 } 2218 2219 cpudata->evt_pending = true; 2220 } 2221 2222 static int 2223 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 2224 struct nvmm_vcpu_exit *exit) 2225 { 2226 struct nvmm_comm_page *comm = vcpu->comm; 2227 struct vmx_machdata *machdata = mach->machdata; 2228 struct vmx_cpudata *cpudata = vcpu->cpudata; 2229 struct vpid_desc vpid_desc; 2230 struct cpu_info *ci; 2231 uint64_t exitcode; 2232 uint64_t intstate; 2233 uint64_t machgen; 2234 int hcpu, ret; 2235 bool launched; 2236 2237 vmx_vmcs_enter(vcpu); 2238 2239 vmx_vcpu_state_commit(vcpu); 2240 comm->state_cached = 0; 2241 2242 if (__predict_false(vmx_vcpu_event_commit(vcpu) != 0)) { 2243 vmx_vmcs_leave(vcpu); 2244 return EINVAL; 2245 } 2246 2247 ci = curcpu(); 2248 hcpu = cpu_number(); 2249 launched = cpudata->vmcs_launched; 2250 2251 vmx_gtlb_catchup(vcpu, hcpu); 2252 vmx_htlb_catchup(vcpu, hcpu); 2253 2254 if (vcpu->hcpu_last != hcpu) { 2255 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel); 2256 vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss); 2257 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt); 2258 vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE)); 2259 cpudata->gtsc_want_update = true; 2260 vcpu->hcpu_last = hcpu; 2261 } 2262 2263 vmx_vcpu_guest_dbregs_enter(vcpu); 2264 vmx_vcpu_guest_misc_enter(vcpu); 2265 2266 while (1) { 2267 if (cpudata->gtlb_want_flush) { 2268 vpid_desc.vpid = cpudata->asid; 2269 vpid_desc.addr = 0; 2270 vmx_invvpid(vmx_tlb_flush_op, &vpid_desc); 2271 cpudata->gtlb_want_flush = false; 2272 } 2273 2274 if (__predict_false(cpudata->gtsc_want_update)) { 2275 vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc - rdtsc()); 2276 cpudata->gtsc_want_update = false; 2277 } 2278 2279 vmx_vcpu_guest_fpu_enter(vcpu); 2280 vmx_cli(); 2281 machgen = vmx_htlb_flush(machdata, cpudata); 2282 lcr2(cpudata->gcr2); 2283 if (launched) { 2284 ret = vmx_vmresume(cpudata->gprs); 2285 } else { 2286 ret = vmx_vmlaunch(cpudata->gprs); 2287 } 2288 cpudata->gcr2 = rcr2(); 2289 vmx_htlb_flush_ack(cpudata, machgen); 2290 vmx_sti(); 2291 vmx_vcpu_guest_fpu_leave(vcpu); 2292 2293 if (__predict_false(ret != 0)) { 2294 vmx_exit_invalid(exit, -1); 2295 break; 2296 } 2297 vmx_exit_evt(cpudata); 2298 2299 launched = true; 2300 2301 exitcode = vmx_vmread(VMCS_EXIT_REASON); 2302 exitcode &= __BITS(15,0); 2303 2304 switch (exitcode) { 2305 case VMCS_EXITCODE_EXC_NMI: 2306 vmx_exit_exc_nmi(mach, vcpu, exit); 2307 break; 2308 case VMCS_EXITCODE_EXT_INT: 2309 exit->reason = NVMM_VCPU_EXIT_NONE; 2310 break; 2311 case VMCS_EXITCODE_CPUID: 2312 vmx_exit_cpuid(mach, vcpu, exit); 2313 break; 2314 case VMCS_EXITCODE_HLT: 2315 vmx_exit_hlt(mach, vcpu, exit); 2316 break; 2317 case VMCS_EXITCODE_CR: 2318 vmx_exit_cr(mach, vcpu, exit); 2319 break; 2320 case VMCS_EXITCODE_IO: 2321 vmx_exit_io(mach, vcpu, exit); 2322 break; 2323 case VMCS_EXITCODE_RDMSR: 2324 vmx_exit_rdmsr(mach, vcpu, exit); 2325 break; 2326 case VMCS_EXITCODE_WRMSR: 2327 vmx_exit_wrmsr(mach, vcpu, exit); 2328 break; 2329 case VMCS_EXITCODE_SHUTDOWN: 2330 exit->reason = NVMM_VCPU_EXIT_SHUTDOWN; 2331 break; 2332 case VMCS_EXITCODE_MONITOR: 2333 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MONITOR); 2334 break; 2335 case VMCS_EXITCODE_MWAIT: 2336 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MWAIT); 2337 break; 2338 case VMCS_EXITCODE_XSETBV: 2339 vmx_exit_xsetbv(mach, vcpu, exit); 2340 break; 2341 case VMCS_EXITCODE_RDPMC: 2342 case VMCS_EXITCODE_RDTSCP: 2343 case VMCS_EXITCODE_INVVPID: 2344 case VMCS_EXITCODE_INVEPT: 2345 case VMCS_EXITCODE_VMCALL: 2346 case VMCS_EXITCODE_VMCLEAR: 2347 case VMCS_EXITCODE_VMLAUNCH: 2348 case VMCS_EXITCODE_VMPTRLD: 2349 case VMCS_EXITCODE_VMPTRST: 2350 case VMCS_EXITCODE_VMREAD: 2351 case VMCS_EXITCODE_VMRESUME: 2352 case VMCS_EXITCODE_VMWRITE: 2353 case VMCS_EXITCODE_VMXOFF: 2354 case VMCS_EXITCODE_VMXON: 2355 vmx_inject_ud(vcpu); 2356 exit->reason = NVMM_VCPU_EXIT_NONE; 2357 break; 2358 case VMCS_EXITCODE_EPT_VIOLATION: 2359 vmx_exit_epf(mach, vcpu, exit); 2360 break; 2361 case VMCS_EXITCODE_INT_WINDOW: 2362 vmx_event_waitexit_disable(vcpu, false); 2363 exit->reason = NVMM_VCPU_EXIT_INT_READY; 2364 break; 2365 case VMCS_EXITCODE_NMI_WINDOW: 2366 vmx_event_waitexit_disable(vcpu, true); 2367 exit->reason = NVMM_VCPU_EXIT_NMI_READY; 2368 break; 2369 default: 2370 vmx_exit_invalid(exit, exitcode); 2371 break; 2372 } 2373 2374 /* If no reason to return to userland, keep rolling. */ 2375 if (nvmm_return_needed(vcpu, exit)) { 2376 break; 2377 } 2378 if (exit->reason != NVMM_VCPU_EXIT_NONE) { 2379 break; 2380 } 2381 } 2382 2383 cpudata->vmcs_launched = launched; 2384 2385 cpudata->gtsc = vmx_vmread(VMCS_TSC_OFFSET) + rdtsc(); 2386 2387 vmx_vcpu_guest_misc_leave(vcpu); 2388 vmx_vcpu_guest_dbregs_leave(vcpu); 2389 2390 exit->exitstate.rflags = vmx_vmread(VMCS_GUEST_RFLAGS); 2391 exit->exitstate.cr8 = cpudata->gcr8; 2392 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY); 2393 exit->exitstate.int_shadow = 2394 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0; 2395 exit->exitstate.int_window_exiting = cpudata->int_window_exit; 2396 exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit; 2397 exit->exitstate.evt_pending = cpudata->evt_pending; 2398 2399 vmx_vmcs_leave(vcpu); 2400 2401 return 0; 2402 } 2403 2404 /* -------------------------------------------------------------------------- */ 2405 2406 static int 2407 vmx_memalloc(paddr_t *pa, vaddr_t *va, size_t npages) 2408 { 2409 struct pglist pglist; 2410 paddr_t _pa; 2411 vaddr_t _va; 2412 size_t i; 2413 int ret; 2414 2415 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0, 2416 &pglist, 1, 0); 2417 if (ret != 0) 2418 return ENOMEM; 2419 _pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); 2420 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0, 2421 UVM_KMF_VAONLY | UVM_KMF_NOWAIT); 2422 if (_va == 0) 2423 goto error; 2424 2425 for (i = 0; i < npages; i++) { 2426 pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE, 2427 VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK); 2428 } 2429 pmap_update(pmap_kernel()); 2430 2431 memset((void *)_va, 0, npages * PAGE_SIZE); 2432 2433 *pa = _pa; 2434 *va = _va; 2435 return 0; 2436 2437 error: 2438 for (i = 0; i < npages; i++) { 2439 uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE)); 2440 } 2441 return ENOMEM; 2442 } 2443 2444 static void 2445 vmx_memfree(paddr_t pa, vaddr_t va, size_t npages) 2446 { 2447 size_t i; 2448 2449 pmap_kremove(va, npages * PAGE_SIZE); 2450 pmap_update(pmap_kernel()); 2451 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY); 2452 for (i = 0; i < npages; i++) { 2453 uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE)); 2454 } 2455 } 2456 2457 /* -------------------------------------------------------------------------- */ 2458 2459 static void 2460 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write) 2461 { 2462 uint64_t byte; 2463 uint8_t bitoff; 2464 2465 if (msr < 0x00002000) { 2466 /* Range 1 */ 2467 byte = ((msr - 0x00000000) / 8) + 0; 2468 } else if (msr >= 0xC0000000 && msr < 0xC0002000) { 2469 /* Range 2 */ 2470 byte = ((msr - 0xC0000000) / 8) + 1024; 2471 } else { 2472 panic("%s: wrong range", __func__); 2473 } 2474 2475 bitoff = (msr & 0x7); 2476 2477 if (read) { 2478 bitmap[byte] &= ~__BIT(bitoff); 2479 } 2480 if (write) { 2481 bitmap[2048 + byte] &= ~__BIT(bitoff); 2482 } 2483 } 2484 2485 #define VMX_SEG_ATTRIB_TYPE __BITS(3,0) 2486 #define VMX_SEG_ATTRIB_S __BIT(4) 2487 #define VMX_SEG_ATTRIB_DPL __BITS(6,5) 2488 #define VMX_SEG_ATTRIB_P __BIT(7) 2489 #define VMX_SEG_ATTRIB_AVL __BIT(12) 2490 #define VMX_SEG_ATTRIB_L __BIT(13) 2491 #define VMX_SEG_ATTRIB_DEF __BIT(14) 2492 #define VMX_SEG_ATTRIB_G __BIT(15) 2493 #define VMX_SEG_ATTRIB_UNUSABLE __BIT(16) 2494 2495 static void 2496 vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg *segs, int idx) 2497 { 2498 uint64_t attrib; 2499 2500 attrib = 2501 __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) | 2502 __SHIFTIN(segs[idx].attrib.s, VMX_SEG_ATTRIB_S) | 2503 __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) | 2504 __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) | 2505 __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) | 2506 __SHIFTIN(segs[idx].attrib.l, VMX_SEG_ATTRIB_L) | 2507 __SHIFTIN(segs[idx].attrib.def, VMX_SEG_ATTRIB_DEF) | 2508 __SHIFTIN(segs[idx].attrib.g, VMX_SEG_ATTRIB_G) | 2509 (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0); 2510 2511 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) { 2512 vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector); 2513 vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib); 2514 } 2515 vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit); 2516 vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base); 2517 } 2518 2519 static void 2520 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx) 2521 { 2522 uint64_t selector = 0, attrib = 0, base, limit; 2523 2524 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) { 2525 selector = vmx_vmread(vmx_guest_segs[idx].selector); 2526 attrib = vmx_vmread(vmx_guest_segs[idx].attrib); 2527 } 2528 limit = vmx_vmread(vmx_guest_segs[idx].limit); 2529 base = vmx_vmread(vmx_guest_segs[idx].base); 2530 2531 segs[idx].selector = selector; 2532 segs[idx].limit = limit; 2533 segs[idx].base = base; 2534 segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE); 2535 segs[idx].attrib.s = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_S); 2536 segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL); 2537 segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P); 2538 segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL); 2539 segs[idx].attrib.l = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_L); 2540 segs[idx].attrib.def = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF); 2541 segs[idx].attrib.g = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_G); 2542 if (attrib & VMX_SEG_ATTRIB_UNUSABLE) { 2543 segs[idx].attrib.p = 0; 2544 } 2545 } 2546 2547 static inline bool 2548 vmx_state_tlb_flush(const struct nvmm_x64_state *state, uint64_t flags) 2549 { 2550 uint64_t cr0, cr3, cr4, efer; 2551 2552 if (flags & NVMM_X64_STATE_CRS) { 2553 cr0 = vmx_vmread(VMCS_GUEST_CR0); 2554 if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) { 2555 return true; 2556 } 2557 cr3 = vmx_vmread(VMCS_GUEST_CR3); 2558 if (cr3 != state->crs[NVMM_X64_CR_CR3]) { 2559 return true; 2560 } 2561 cr4 = vmx_vmread(VMCS_GUEST_CR4); 2562 if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) { 2563 return true; 2564 } 2565 } 2566 2567 if (flags & NVMM_X64_STATE_MSRS) { 2568 efer = vmx_vmread(VMCS_GUEST_IA32_EFER); 2569 if ((efer ^ 2570 state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) { 2571 return true; 2572 } 2573 } 2574 2575 return false; 2576 } 2577 2578 static void 2579 vmx_vcpu_setstate(struct nvmm_cpu *vcpu) 2580 { 2581 struct nvmm_comm_page *comm = vcpu->comm; 2582 const struct nvmm_x64_state *state = &comm->state; 2583 struct vmx_cpudata *cpudata = vcpu->cpudata; 2584 struct fxsave *fpustate; 2585 uint64_t ctls1, intstate; 2586 uint64_t flags; 2587 2588 flags = comm->state_wanted; 2589 2590 vmx_vmcs_enter(vcpu); 2591 2592 if (vmx_state_tlb_flush(state, flags)) { 2593 cpudata->gtlb_want_flush = true; 2594 } 2595 2596 if (flags & NVMM_X64_STATE_SEGS) { 2597 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS); 2598 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS); 2599 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES); 2600 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS); 2601 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS); 2602 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS); 2603 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT); 2604 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT); 2605 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT); 2606 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR); 2607 } 2608 2609 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs)); 2610 if (flags & NVMM_X64_STATE_GPRS) { 2611 memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs)); 2612 2613 vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]); 2614 vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]); 2615 vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]); 2616 } 2617 2618 if (flags & NVMM_X64_STATE_CRS) { 2619 /* 2620 * CR0_ET must be 1 both in the shadow and the real register. 2621 * CR0_NE must be 1 in the real register. 2622 * CR0_NW and CR0_CD must be 0 in the real register. 2623 */ 2624 vmx_vmwrite(VMCS_CR0_SHADOW, 2625 (state->crs[NVMM_X64_CR_CR0] & CR0_STATIC_MASK) | 2626 CR0_ET); 2627 vmx_vmwrite(VMCS_GUEST_CR0, 2628 (state->crs[NVMM_X64_CR_CR0] & ~CR0_STATIC_MASK) | 2629 CR0_ET | CR0_NE); 2630 2631 cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2]; 2632 2633 /* XXX We are not handling PDPTE here. */ 2634 vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]); 2635 2636 /* CR4_VMXE is mandatory. */ 2637 vmx_vmwrite(VMCS_GUEST_CR4, 2638 (state->crs[NVMM_X64_CR_CR4] & CR4_VALID) | CR4_VMXE); 2639 2640 cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8]; 2641 2642 if (vmx_xcr0_mask != 0) { 2643 /* Clear illegal XCR0 bits, set mandatory X87 bit. */ 2644 cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0]; 2645 cpudata->gxcr0 &= vmx_xcr0_mask; 2646 cpudata->gxcr0 |= XCR0_X87; 2647 } 2648 } 2649 2650 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs)); 2651 if (flags & NVMM_X64_STATE_DRS) { 2652 memcpy(cpudata->drs, state->drs, sizeof(state->drs)); 2653 2654 cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF; 2655 vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]); 2656 } 2657 2658 if (flags & NVMM_X64_STATE_MSRS) { 2659 cpudata->gmsr[VMX_MSRLIST_STAR].val = 2660 state->msrs[NVMM_X64_MSR_STAR]; 2661 cpudata->gmsr[VMX_MSRLIST_LSTAR].val = 2662 state->msrs[NVMM_X64_MSR_LSTAR]; 2663 cpudata->gmsr[VMX_MSRLIST_CSTAR].val = 2664 state->msrs[NVMM_X64_MSR_CSTAR]; 2665 cpudata->gmsr[VMX_MSRLIST_SFMASK].val = 2666 state->msrs[NVMM_X64_MSR_SFMASK]; 2667 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val = 2668 state->msrs[NVMM_X64_MSR_KERNELGSBASE]; 2669 2670 vmx_vmwrite(VMCS_GUEST_IA32_EFER, 2671 state->msrs[NVMM_X64_MSR_EFER]); 2672 vmx_vmwrite(VMCS_GUEST_IA32_PAT, 2673 state->msrs[NVMM_X64_MSR_PAT]); 2674 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS, 2675 state->msrs[NVMM_X64_MSR_SYSENTER_CS]); 2676 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP, 2677 state->msrs[NVMM_X64_MSR_SYSENTER_ESP]); 2678 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP, 2679 state->msrs[NVMM_X64_MSR_SYSENTER_EIP]); 2680 2681 cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC]; 2682 cpudata->gtsc_want_update = true; 2683 2684 /* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */ 2685 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS); 2686 if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) { 2687 ctls1 |= ENTRY_CTLS_LONG_MODE; 2688 } else { 2689 ctls1 &= ~ENTRY_CTLS_LONG_MODE; 2690 } 2691 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1); 2692 } 2693 2694 if (flags & NVMM_X64_STATE_INTR) { 2695 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY); 2696 intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS); 2697 if (state->intr.int_shadow) { 2698 intstate |= INT_STATE_MOVSS; 2699 } 2700 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate); 2701 2702 if (state->intr.int_window_exiting) { 2703 vmx_event_waitexit_enable(vcpu, false); 2704 } else { 2705 vmx_event_waitexit_disable(vcpu, false); 2706 } 2707 2708 if (state->intr.nmi_window_exiting) { 2709 vmx_event_waitexit_enable(vcpu, true); 2710 } else { 2711 vmx_event_waitexit_disable(vcpu, true); 2712 } 2713 } 2714 2715 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu)); 2716 if (flags & NVMM_X64_STATE_FPU) { 2717 memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu, 2718 sizeof(state->fpu)); 2719 2720 fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave; 2721 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask; 2722 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask; 2723 2724 if (vmx_xcr0_mask != 0) { 2725 /* Reset XSTATE_BV, to force a reload. */ 2726 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask; 2727 } 2728 } 2729 2730 vmx_vmcs_leave(vcpu); 2731 2732 comm->state_wanted = 0; 2733 comm->state_cached |= flags; 2734 } 2735 2736 static void 2737 vmx_vcpu_getstate(struct nvmm_cpu *vcpu) 2738 { 2739 struct nvmm_comm_page *comm = vcpu->comm; 2740 struct nvmm_x64_state *state = &comm->state; 2741 struct vmx_cpudata *cpudata = vcpu->cpudata; 2742 uint64_t intstate, flags; 2743 2744 flags = comm->state_wanted; 2745 2746 vmx_vmcs_enter(vcpu); 2747 2748 if (flags & NVMM_X64_STATE_SEGS) { 2749 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS); 2750 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS); 2751 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES); 2752 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS); 2753 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS); 2754 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS); 2755 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT); 2756 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT); 2757 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT); 2758 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR); 2759 } 2760 2761 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs)); 2762 if (flags & NVMM_X64_STATE_GPRS) { 2763 memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs)); 2764 2765 state->gprs[NVMM_X64_GPR_RIP] = vmx_vmread(VMCS_GUEST_RIP); 2766 state->gprs[NVMM_X64_GPR_RSP] = vmx_vmread(VMCS_GUEST_RSP); 2767 state->gprs[NVMM_X64_GPR_RFLAGS] = vmx_vmread(VMCS_GUEST_RFLAGS); 2768 } 2769 2770 if (flags & NVMM_X64_STATE_CRS) { 2771 state->crs[NVMM_X64_CR_CR0] = 2772 (vmx_vmread(VMCS_CR0_SHADOW) & CR0_STATIC_MASK) | 2773 (vmx_vmread(VMCS_GUEST_CR0) & ~CR0_STATIC_MASK); 2774 state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2; 2775 state->crs[NVMM_X64_CR_CR3] = vmx_vmread(VMCS_GUEST_CR3); 2776 state->crs[NVMM_X64_CR_CR4] = vmx_vmread(VMCS_GUEST_CR4); 2777 state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8; 2778 state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0; 2779 2780 /* Hide VMXE. */ 2781 state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE; 2782 } 2783 2784 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs)); 2785 if (flags & NVMM_X64_STATE_DRS) { 2786 memcpy(state->drs, cpudata->drs, sizeof(state->drs)); 2787 2788 state->drs[NVMM_X64_DR_DR7] = vmx_vmread(VMCS_GUEST_DR7); 2789 } 2790 2791 if (flags & NVMM_X64_STATE_MSRS) { 2792 state->msrs[NVMM_X64_MSR_STAR] = 2793 cpudata->gmsr[VMX_MSRLIST_STAR].val; 2794 state->msrs[NVMM_X64_MSR_LSTAR] = 2795 cpudata->gmsr[VMX_MSRLIST_LSTAR].val; 2796 state->msrs[NVMM_X64_MSR_CSTAR] = 2797 cpudata->gmsr[VMX_MSRLIST_CSTAR].val; 2798 state->msrs[NVMM_X64_MSR_SFMASK] = 2799 cpudata->gmsr[VMX_MSRLIST_SFMASK].val; 2800 state->msrs[NVMM_X64_MSR_KERNELGSBASE] = 2801 cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val; 2802 state->msrs[NVMM_X64_MSR_EFER] = 2803 vmx_vmread(VMCS_GUEST_IA32_EFER); 2804 state->msrs[NVMM_X64_MSR_PAT] = 2805 vmx_vmread(VMCS_GUEST_IA32_PAT); 2806 state->msrs[NVMM_X64_MSR_SYSENTER_CS] = 2807 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS); 2808 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] = 2809 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP); 2810 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] = 2811 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP); 2812 state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc; 2813 } 2814 2815 if (flags & NVMM_X64_STATE_INTR) { 2816 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY); 2817 state->intr.int_shadow = 2818 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0; 2819 state->intr.int_window_exiting = cpudata->int_window_exit; 2820 state->intr.nmi_window_exiting = cpudata->nmi_window_exit; 2821 state->intr.evt_pending = cpudata->evt_pending; 2822 } 2823 2824 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu)); 2825 if (flags & NVMM_X64_STATE_FPU) { 2826 memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave, 2827 sizeof(state->fpu)); 2828 } 2829 2830 vmx_vmcs_leave(vcpu); 2831 2832 comm->state_wanted = 0; 2833 comm->state_cached |= flags; 2834 } 2835 2836 static void 2837 vmx_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags) 2838 { 2839 vcpu->comm->state_wanted = flags; 2840 vmx_vcpu_getstate(vcpu); 2841 } 2842 2843 static void 2844 vmx_vcpu_state_commit(struct nvmm_cpu *vcpu) 2845 { 2846 vcpu->comm->state_wanted = vcpu->comm->state_commit; 2847 vcpu->comm->state_commit = 0; 2848 vmx_vcpu_setstate(vcpu); 2849 } 2850 2851 /* -------------------------------------------------------------------------- */ 2852 2853 static void 2854 vmx_asid_alloc(struct nvmm_cpu *vcpu) 2855 { 2856 struct vmx_cpudata *cpudata = vcpu->cpudata; 2857 size_t i, oct, bit; 2858 2859 mutex_enter(&vmx_asidlock); 2860 2861 for (i = 0; i < vmx_maxasid; i++) { 2862 oct = i / 8; 2863 bit = i % 8; 2864 2865 if (vmx_asidmap[oct] & __BIT(bit)) { 2866 continue; 2867 } 2868 2869 cpudata->asid = i; 2870 2871 vmx_asidmap[oct] |= __BIT(bit); 2872 vmx_vmwrite(VMCS_VPID, i); 2873 mutex_exit(&vmx_asidlock); 2874 return; 2875 } 2876 2877 mutex_exit(&vmx_asidlock); 2878 2879 panic("%s: impossible", __func__); 2880 } 2881 2882 static void 2883 vmx_asid_free(struct nvmm_cpu *vcpu) 2884 { 2885 size_t oct, bit; 2886 uint64_t asid; 2887 2888 asid = vmx_vmread(VMCS_VPID); 2889 2890 oct = asid / 8; 2891 bit = asid % 8; 2892 2893 mutex_enter(&vmx_asidlock); 2894 vmx_asidmap[oct] &= ~__BIT(bit); 2895 mutex_exit(&vmx_asidlock); 2896 } 2897 2898 static void 2899 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 2900 { 2901 struct vmx_cpudata *cpudata = vcpu->cpudata; 2902 struct vmcs *vmcs = cpudata->vmcs; 2903 struct msr_entry *gmsr = cpudata->gmsr; 2904 extern uint8_t vmx_resume_rip; 2905 uint64_t rev, eptp; 2906 2907 rev = vmx_get_revision(); 2908 2909 memset(vmcs, 0, VMCS_SIZE); 2910 vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION); 2911 vmcs->abort = 0; 2912 2913 vmx_vmcs_enter(vcpu); 2914 2915 /* No link pointer. */ 2916 vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF); 2917 2918 /* Install the CTLSs. */ 2919 vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls); 2920 vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls); 2921 vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2); 2922 vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls); 2923 vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls); 2924 2925 /* Allow direct access to certain MSRs. */ 2926 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE); 2927 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true); 2928 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true); 2929 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true); 2930 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true); 2931 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true); 2932 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true); 2933 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true); 2934 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true); 2935 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true); 2936 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true); 2937 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true); 2938 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false); 2939 vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa); 2940 2941 /* 2942 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This 2943 * includes the L1D_FLUSH MSR, to mitigate L1TF. 2944 */ 2945 gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR; 2946 gmsr[VMX_MSRLIST_STAR].val = 0; 2947 gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR; 2948 gmsr[VMX_MSRLIST_LSTAR].val = 0; 2949 gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR; 2950 gmsr[VMX_MSRLIST_CSTAR].val = 0; 2951 gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK; 2952 gmsr[VMX_MSRLIST_SFMASK].val = 0; 2953 gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE; 2954 gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0; 2955 gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD; 2956 gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH; 2957 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa); 2958 vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa); 2959 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr); 2960 vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR); 2961 2962 /* Set the CR0 mask. Any change of these bits causes a VMEXIT. */ 2963 vmx_vmwrite(VMCS_CR0_MASK, CR0_STATIC_MASK); 2964 2965 /* Force unsupported CR4 fields to zero. */ 2966 vmx_vmwrite(VMCS_CR4_MASK, CR4_INVALID); 2967 vmx_vmwrite(VMCS_CR4_SHADOW, 0); 2968 2969 /* Set the Host state for resuming. */ 2970 vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)&vmx_resume_rip); 2971 vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL)); 2972 vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL)); 2973 vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL)); 2974 vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL)); 2975 vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0); 2976 vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0); 2977 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0); 2978 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0); 2979 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0); 2980 vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT)); 2981 vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER)); 2982 vmx_vmwrite(VMCS_HOST_CR0, rcr0() & ~CR0_TS); 2983 2984 /* Generate ASID. */ 2985 vmx_asid_alloc(vcpu); 2986 2987 /* Enable Extended Paging, 4-Level. */ 2988 eptp = 2989 __SHIFTIN(vmx_eptp_type, EPTP_TYPE) | 2990 __SHIFTIN(4-1, EPTP_WALKLEN) | 2991 (pmap_ept_has_ad ? EPTP_FLAGS_AD : 0) | 2992 mach->vm->vm_map.pmap->pm_pdirpa[0]; 2993 vmx_vmwrite(VMCS_EPTP, eptp); 2994 2995 /* Init IA32_MISC_ENABLE. */ 2996 cpudata->gmsr_misc_enable = rdmsr(MSR_MISC_ENABLE); 2997 cpudata->gmsr_misc_enable &= 2998 ~(IA32_MISC_PERFMON_EN|IA32_MISC_EISST_EN|IA32_MISC_MWAIT_EN); 2999 cpudata->gmsr_misc_enable |= 3000 (IA32_MISC_BTS_UNAVAIL|IA32_MISC_PEBS_UNAVAIL); 3001 3002 /* Init XSAVE header. */ 3003 cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask; 3004 cpudata->gfpu.xsh_xcomp_bv = 0; 3005 3006 /* These MSRs are static. */ 3007 cpudata->star = rdmsr(MSR_STAR); 3008 cpudata->lstar = rdmsr(MSR_LSTAR); 3009 cpudata->cstar = rdmsr(MSR_CSTAR); 3010 cpudata->sfmask = rdmsr(MSR_SFMASK); 3011 3012 /* Install the RESET state. */ 3013 memcpy(&vcpu->comm->state, &nvmm_x86_reset_state, 3014 sizeof(nvmm_x86_reset_state)); 3015 vcpu->comm->state_wanted = NVMM_X64_STATE_ALL; 3016 vcpu->comm->state_cached = 0; 3017 vmx_vcpu_setstate(vcpu); 3018 3019 vmx_vmcs_leave(vcpu); 3020 } 3021 3022 static int 3023 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 3024 { 3025 struct vmx_cpudata *cpudata; 3026 int error; 3027 3028 /* Allocate the VMX cpudata. */ 3029 cpudata = (struct vmx_cpudata *)uvm_km_alloc(kernel_map, 3030 roundup(sizeof(*cpudata), PAGE_SIZE), 0, 3031 UVM_KMF_WIRED|UVM_KMF_ZERO); 3032 vcpu->cpudata = cpudata; 3033 3034 /* VMCS */ 3035 error = vmx_memalloc(&cpudata->vmcs_pa, (vaddr_t *)&cpudata->vmcs, 3036 VMCS_NPAGES); 3037 if (error) 3038 goto error; 3039 3040 /* MSR Bitmap */ 3041 error = vmx_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm, 3042 MSRBM_NPAGES); 3043 if (error) 3044 goto error; 3045 3046 /* Guest MSR List */ 3047 error = vmx_memalloc(&cpudata->gmsr_pa, (vaddr_t *)&cpudata->gmsr, 1); 3048 if (error) 3049 goto error; 3050 3051 kcpuset_create(&cpudata->htlb_want_flush, true); 3052 3053 /* Init the VCPU info. */ 3054 vmx_vcpu_init(mach, vcpu); 3055 3056 return 0; 3057 3058 error: 3059 if (cpudata->vmcs_pa) { 3060 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, 3061 VMCS_NPAGES); 3062 } 3063 if (cpudata->msrbm_pa) { 3064 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, 3065 MSRBM_NPAGES); 3066 } 3067 if (cpudata->gmsr_pa) { 3068 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1); 3069 } 3070 3071 kmem_free(cpudata, sizeof(*cpudata)); 3072 return error; 3073 } 3074 3075 static void 3076 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 3077 { 3078 struct vmx_cpudata *cpudata = vcpu->cpudata; 3079 3080 vmx_vmcs_enter(vcpu); 3081 vmx_asid_free(vcpu); 3082 vmx_vmcs_destroy(vcpu); 3083 3084 kcpuset_destroy(cpudata->htlb_want_flush); 3085 3086 vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, VMCS_NPAGES); 3087 vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES); 3088 vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1); 3089 uvm_km_free(kernel_map, (vaddr_t)cpudata, 3090 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED); 3091 } 3092 3093 /* -------------------------------------------------------------------------- */ 3094 3095 static int 3096 vmx_vcpu_configure_cpuid(struct vmx_cpudata *cpudata, void *data) 3097 { 3098 struct nvmm_vcpu_conf_cpuid *cpuid = data; 3099 size_t i; 3100 3101 if (__predict_false(cpuid->mask && cpuid->exit)) { 3102 return EINVAL; 3103 } 3104 if (__predict_false(cpuid->mask && 3105 ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) || 3106 (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) || 3107 (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) || 3108 (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) { 3109 return EINVAL; 3110 } 3111 3112 /* If unset, delete, to restore the default behavior. */ 3113 if (!cpuid->mask && !cpuid->exit) { 3114 for (i = 0; i < VMX_NCPUIDS; i++) { 3115 if (!cpudata->cpuidpresent[i]) { 3116 continue; 3117 } 3118 if (cpudata->cpuid[i].leaf == cpuid->leaf) { 3119 cpudata->cpuidpresent[i] = false; 3120 } 3121 } 3122 return 0; 3123 } 3124 3125 /* If already here, replace. */ 3126 for (i = 0; i < VMX_NCPUIDS; i++) { 3127 if (!cpudata->cpuidpresent[i]) { 3128 continue; 3129 } 3130 if (cpudata->cpuid[i].leaf == cpuid->leaf) { 3131 memcpy(&cpudata->cpuid[i], cpuid, 3132 sizeof(struct nvmm_vcpu_conf_cpuid)); 3133 return 0; 3134 } 3135 } 3136 3137 /* Not here, insert. */ 3138 for (i = 0; i < VMX_NCPUIDS; i++) { 3139 if (!cpudata->cpuidpresent[i]) { 3140 cpudata->cpuidpresent[i] = true; 3141 memcpy(&cpudata->cpuid[i], cpuid, 3142 sizeof(struct nvmm_vcpu_conf_cpuid)); 3143 return 0; 3144 } 3145 } 3146 3147 return ENOBUFS; 3148 } 3149 3150 static int 3151 vmx_vcpu_configure_tpr(struct vmx_cpudata *cpudata, void *data) 3152 { 3153 struct nvmm_vcpu_conf_tpr *tpr = data; 3154 3155 memcpy(&cpudata->tpr, tpr, sizeof(*tpr)); 3156 return 0; 3157 } 3158 3159 static int 3160 vmx_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data) 3161 { 3162 struct vmx_cpudata *cpudata = vcpu->cpudata; 3163 3164 switch (op) { 3165 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID): 3166 return vmx_vcpu_configure_cpuid(cpudata, data); 3167 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR): 3168 return vmx_vcpu_configure_tpr(cpudata, data); 3169 default: 3170 return EINVAL; 3171 } 3172 } 3173 3174 static void 3175 vmx_vcpu_suspend(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 3176 { 3177 struct vmx_cpudata *cpudata = vcpu->cpudata; 3178 struct cpu_info *vmcs_ci; 3179 3180 KASSERT(cpudata->vmcs_refcnt == 0); 3181 3182 vmcs_ci = cpudata->vmcs_ci; 3183 cpudata->vmcs_ci = (void *)0x00FFFFFFFFFFFFFF; /* clobber */ 3184 3185 kpreempt_disable(); 3186 if (vmcs_ci == NULL) { 3187 /* VMCS is inactive, nothing to do. */ 3188 } else if (vmcs_ci != curcpu()) { 3189 /* VMCS is active on a remote CPU; clear it there. */ 3190 vmx_vmclear_remote(vmcs_ci, cpudata->vmcs_pa); 3191 } else { 3192 /* VMCS is active on this CPU; clear it here. */ 3193 vmx_vmclear(&cpudata->vmcs_pa); 3194 } 3195 kpreempt_enable(); 3196 } 3197 3198 static void 3199 vmx_vcpu_resume(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 3200 { 3201 struct vmx_cpudata *cpudata = vcpu->cpudata; 3202 3203 KASSERT(cpudata->vmcs_refcnt == 0); 3204 3205 /* Mark VMCS as inactive. */ 3206 cpudata->vmcs_ci = NULL; 3207 } 3208 3209 /* -------------------------------------------------------------------------- */ 3210 3211 static void 3212 vmx_tlb_flush(struct pmap *pm) 3213 { 3214 struct nvmm_machine *mach = pm->pm_data; 3215 struct vmx_machdata *machdata = mach->machdata; 3216 3217 atomic_inc_64(&machdata->mach_htlb_gen); 3218 3219 /* Generates IPIs, which cause #VMEXITs. */ 3220 pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM); 3221 } 3222 3223 static void 3224 vmx_machine_create(struct nvmm_machine *mach) 3225 { 3226 struct pmap *pmap = mach->vm->vm_map.pmap; 3227 struct vmx_machdata *machdata; 3228 3229 /* Convert to EPT. */ 3230 pmap_ept_transform(pmap); 3231 3232 /* Fill in pmap info. */ 3233 pmap->pm_data = (void *)mach; 3234 pmap->pm_tlb_flush = vmx_tlb_flush; 3235 3236 machdata = kmem_zalloc(sizeof(struct vmx_machdata), KM_SLEEP); 3237 mach->machdata = machdata; 3238 3239 /* Start with an hTLB flush everywhere. */ 3240 machdata->mach_htlb_gen = 1; 3241 } 3242 3243 static void 3244 vmx_machine_destroy(struct nvmm_machine *mach) 3245 { 3246 struct vmx_machdata *machdata = mach->machdata; 3247 3248 kmem_free(machdata, sizeof(struct vmx_machdata)); 3249 } 3250 3251 static int 3252 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data) 3253 { 3254 panic("%s: impossible", __func__); 3255 } 3256 3257 /* -------------------------------------------------------------------------- */ 3258 3259 #define CTLS_ONE_ALLOWED(msrval, bitoff) \ 3260 ((msrval & __BIT(32 + bitoff)) != 0) 3261 #define CTLS_ZERO_ALLOWED(msrval, bitoff) \ 3262 ((msrval & __BIT(bitoff)) == 0) 3263 3264 static int 3265 vmx_check_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls, uint64_t set_one) 3266 { 3267 uint64_t basic, val, true_val; 3268 bool has_true; 3269 size_t i; 3270 3271 basic = rdmsr(MSR_IA32_VMX_BASIC); 3272 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0; 3273 3274 val = rdmsr(msr_ctls); 3275 if (has_true) { 3276 true_val = rdmsr(msr_true_ctls); 3277 } else { 3278 true_val = val; 3279 } 3280 3281 for (i = 0; i < 32; i++) { 3282 if (!(set_one & __BIT(i))) { 3283 continue; 3284 } 3285 if (!CTLS_ONE_ALLOWED(true_val, i)) { 3286 return -1; 3287 } 3288 } 3289 3290 return 0; 3291 } 3292 3293 static int 3294 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls, 3295 uint64_t set_one, uint64_t set_zero, uint64_t *res) 3296 { 3297 uint64_t basic, val, true_val; 3298 bool one_allowed, zero_allowed, has_true; 3299 size_t i; 3300 3301 basic = rdmsr(MSR_IA32_VMX_BASIC); 3302 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0; 3303 3304 val = rdmsr(msr_ctls); 3305 if (has_true) { 3306 true_val = rdmsr(msr_true_ctls); 3307 } else { 3308 true_val = val; 3309 } 3310 3311 for (i = 0; i < 32; i++) { 3312 one_allowed = CTLS_ONE_ALLOWED(true_val, i); 3313 zero_allowed = CTLS_ZERO_ALLOWED(true_val, i); 3314 3315 if (zero_allowed && !one_allowed) { 3316 if (set_one & __BIT(i)) 3317 return -1; 3318 *res &= ~__BIT(i); 3319 } else if (one_allowed && !zero_allowed) { 3320 if (set_zero & __BIT(i)) 3321 return -1; 3322 *res |= __BIT(i); 3323 } else { 3324 if (set_zero & __BIT(i)) { 3325 *res &= ~__BIT(i); 3326 } else if (set_one & __BIT(i)) { 3327 *res |= __BIT(i); 3328 } else if (!has_true) { 3329 *res &= ~__BIT(i); 3330 } else if (CTLS_ZERO_ALLOWED(val, i)) { 3331 *res &= ~__BIT(i); 3332 } else if (CTLS_ONE_ALLOWED(val, i)) { 3333 *res |= __BIT(i); 3334 } else { 3335 return -1; 3336 } 3337 } 3338 } 3339 3340 return 0; 3341 } 3342 3343 static bool 3344 vmx_ident(void) 3345 { 3346 uint64_t msr; 3347 int ret; 3348 3349 if (!(cpu_feature[1] & CPUID2_VMX)) { 3350 return false; 3351 } 3352 3353 msr = rdmsr(MSR_IA32_FEATURE_CONTROL); 3354 if ((msr & IA32_FEATURE_CONTROL_LOCK) != 0 && 3355 (msr & IA32_FEATURE_CONTROL_OUT_SMX) == 0) { 3356 printf("NVMM: VMX disabled in BIOS\n"); 3357 return false; 3358 } 3359 3360 msr = rdmsr(MSR_IA32_VMX_BASIC); 3361 if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) { 3362 printf("NVMM: I/O reporting not supported\n"); 3363 return false; 3364 } 3365 if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) { 3366 printf("NVMM: WB memory not supported\n"); 3367 return false; 3368 } 3369 3370 /* PG and PE are reported, even if Unrestricted Guests is supported. */ 3371 vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE); 3372 vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE); 3373 ret = vmx_check_cr(rcr0(), vmx_cr0_fixed0, vmx_cr0_fixed1); 3374 if (ret == -1) { 3375 printf("NVMM: CR0 requirements not satisfied\n"); 3376 return false; 3377 } 3378 3379 vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0); 3380 vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1); 3381 ret = vmx_check_cr(rcr4() | CR4_VMXE, vmx_cr4_fixed0, vmx_cr4_fixed1); 3382 if (ret == -1) { 3383 printf("NVMM: CR4 requirements not satisfied\n"); 3384 return false; 3385 } 3386 3387 /* Init the CTLSs right now, and check for errors. */ 3388 ret = vmx_init_ctls( 3389 MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS, 3390 VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO, 3391 &vmx_pinbased_ctls); 3392 if (ret == -1) { 3393 printf("NVMM: pin-based-ctls requirements not satisfied\n"); 3394 return false; 3395 } 3396 ret = vmx_init_ctls( 3397 MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 3398 VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO, 3399 &vmx_procbased_ctls); 3400 if (ret == -1) { 3401 printf("NVMM: proc-based-ctls requirements not satisfied\n"); 3402 return false; 3403 } 3404 ret = vmx_init_ctls( 3405 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2, 3406 VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO, 3407 &vmx_procbased_ctls2); 3408 if (ret == -1) { 3409 printf("NVMM: proc-based-ctls2 requirements not satisfied\n"); 3410 return false; 3411 } 3412 ret = vmx_check_ctls( 3413 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2, 3414 PROC_CTLS2_INVPCID_ENABLE); 3415 if (ret != -1) { 3416 vmx_procbased_ctls2 |= PROC_CTLS2_INVPCID_ENABLE; 3417 } 3418 ret = vmx_init_ctls( 3419 MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS, 3420 VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO, 3421 &vmx_entry_ctls); 3422 if (ret == -1) { 3423 printf("NVMM: entry-ctls requirements not satisfied\n"); 3424 return false; 3425 } 3426 ret = vmx_init_ctls( 3427 MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS, 3428 VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO, 3429 &vmx_exit_ctls); 3430 if (ret == -1) { 3431 printf("NVMM: exit-ctls requirements not satisfied\n"); 3432 return false; 3433 } 3434 3435 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 3436 if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) { 3437 printf("NVMM: 4-level page tree not supported\n"); 3438 return false; 3439 } 3440 if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) { 3441 printf("NVMM: INVEPT not supported\n"); 3442 return false; 3443 } 3444 if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) { 3445 printf("NVMM: INVVPID not supported\n"); 3446 return false; 3447 } 3448 if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) != 0) { 3449 pmap_ept_has_ad = true; 3450 } else { 3451 pmap_ept_has_ad = false; 3452 } 3453 if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) { 3454 printf("NVMM: EPT UC/WB memory types not supported\n"); 3455 return false; 3456 } 3457 3458 return true; 3459 } 3460 3461 static void 3462 vmx_init_asid(uint32_t maxasid) 3463 { 3464 size_t allocsz; 3465 3466 mutex_init(&vmx_asidlock, MUTEX_DEFAULT, IPL_NONE); 3467 3468 vmx_maxasid = maxasid; 3469 allocsz = roundup(maxasid, 8) / 8; 3470 vmx_asidmap = kmem_zalloc(allocsz, KM_SLEEP); 3471 3472 /* ASID 0 is reserved for the host. */ 3473 vmx_asidmap[0] |= __BIT(0); 3474 } 3475 3476 static void 3477 vmx_change_cpu(void *arg1, void *arg2) 3478 { 3479 struct cpu_info *ci = curcpu(); 3480 bool enable = arg1 != NULL; 3481 uint64_t msr, cr4; 3482 3483 if (enable) { 3484 msr = rdmsr(MSR_IA32_FEATURE_CONTROL); 3485 if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) { 3486 /* Lock now, with VMX-outside-SMX enabled. */ 3487 wrmsr(MSR_IA32_FEATURE_CONTROL, msr | 3488 IA32_FEATURE_CONTROL_LOCK | 3489 IA32_FEATURE_CONTROL_OUT_SMX); 3490 } 3491 } 3492 3493 if (!enable) { 3494 vmx_vmxoff(); 3495 } 3496 3497 cr4 = rcr4(); 3498 if (enable) { 3499 cr4 |= CR4_VMXE; 3500 } else { 3501 cr4 &= ~CR4_VMXE; 3502 } 3503 lcr4(cr4); 3504 3505 if (enable) { 3506 vmx_vmxon(&vmxoncpu[cpu_index(ci)].pa); 3507 } 3508 } 3509 3510 static void 3511 vmx_init_l1tf(void) 3512 { 3513 u_int descs[4]; 3514 uint64_t msr; 3515 3516 if (cpuid_level < 7) { 3517 return; 3518 } 3519 3520 x86_cpuid(7, descs); 3521 3522 if (descs[3] & CPUID_SEF_ARCH_CAP) { 3523 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES); 3524 if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) { 3525 /* No mitigation needed. */ 3526 return; 3527 } 3528 } 3529 3530 if (descs[3] & CPUID_SEF_L1D_FLUSH) { 3531 /* Enable hardware mitigation. */ 3532 vmx_msrlist_entry_nmsr += 1; 3533 } 3534 } 3535 3536 static void 3537 vmx_suspend_interrupt(void) 3538 { 3539 3540 /* 3541 * Generates IPIs, which cause #VMEXITs. No other purpose for 3542 * the TLB business; the #VMEXIT triggered by IPI is the only 3543 * effect that matters here. 3544 */ 3545 pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM); 3546 } 3547 3548 static void 3549 vmx_suspend(void) 3550 { 3551 uint64_t xc; 3552 3553 xc = xc_broadcast(0, vmx_change_cpu, (void *)false, NULL); 3554 xc_wait(xc); 3555 } 3556 3557 static void 3558 vmx_resume(void) 3559 { 3560 uint64_t xc; 3561 3562 xc = xc_broadcast(0, vmx_change_cpu, (void *)true, NULL); 3563 xc_wait(xc); 3564 } 3565 3566 static void 3567 vmx_init(void) 3568 { 3569 CPU_INFO_ITERATOR cii; 3570 struct cpu_info *ci; 3571 uint64_t msr; 3572 struct vmxon *vmxon; 3573 uint32_t revision; 3574 u_int descs[4]; 3575 paddr_t pa; 3576 vaddr_t va; 3577 int error; 3578 3579 /* Init the ASID bitmap (VPID). */ 3580 vmx_init_asid(VPID_MAX); 3581 3582 /* Init the XCR0 mask. */ 3583 vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features; 3584 3585 /* Init the max basic CPUID leaf. */ 3586 vmx_cpuid_max_basic = uimin(cpuid_level, VMX_CPUID_MAX_BASIC); 3587 3588 /* Init the max extended CPUID leaf. */ 3589 x86_cpuid(0x80000000, descs); 3590 vmx_cpuid_max_extended = uimin(descs[0], VMX_CPUID_MAX_EXTENDED); 3591 3592 /* Init the TLB flush op, the EPT flush op and the EPTP type. */ 3593 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 3594 if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) { 3595 vmx_tlb_flush_op = VMX_INVVPID_CONTEXT; 3596 } else { 3597 vmx_tlb_flush_op = VMX_INVVPID_ALL; 3598 } 3599 if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) { 3600 vmx_ept_flush_op = VMX_INVEPT_CONTEXT; 3601 } else { 3602 vmx_ept_flush_op = VMX_INVEPT_ALL; 3603 } 3604 if ((msr & IA32_VMX_EPT_VPID_WB) != 0) { 3605 vmx_eptp_type = EPTP_TYPE_WB; 3606 } else { 3607 vmx_eptp_type = EPTP_TYPE_UC; 3608 } 3609 3610 /* Init the L1TF mitigation. */ 3611 vmx_init_l1tf(); 3612 3613 memset(vmxoncpu, 0, sizeof(vmxoncpu)); 3614 revision = vmx_get_revision(); 3615 3616 for (CPU_INFO_FOREACH(cii, ci)) { 3617 error = vmx_memalloc(&pa, &va, 1); 3618 if (error) { 3619 panic("%s: out of memory", __func__); 3620 } 3621 vmxoncpu[cpu_index(ci)].pa = pa; 3622 vmxoncpu[cpu_index(ci)].va = va; 3623 3624 vmxon = (struct vmxon *)vmxoncpu[cpu_index(ci)].va; 3625 vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION); 3626 } 3627 3628 vmx_resume(); 3629 } 3630 3631 static void 3632 vmx_fini_asid(void) 3633 { 3634 size_t allocsz; 3635 3636 allocsz = roundup(vmx_maxasid, 8) / 8; 3637 kmem_free(vmx_asidmap, allocsz); 3638 3639 mutex_destroy(&vmx_asidlock); 3640 } 3641 3642 static void 3643 vmx_fini(void) 3644 { 3645 size_t i; 3646 3647 vmx_suspend(); 3648 3649 for (i = 0; i < MAXCPUS; i++) { 3650 if (vmxoncpu[i].pa != 0) 3651 vmx_memfree(vmxoncpu[i].pa, vmxoncpu[i].va, 1); 3652 } 3653 3654 vmx_fini_asid(); 3655 } 3656 3657 static void 3658 vmx_capability(struct nvmm_capability *cap) 3659 { 3660 cap->arch.mach_conf_support = 0; 3661 cap->arch.vcpu_conf_support = 3662 NVMM_CAP_ARCH_VCPU_CONF_CPUID | 3663 NVMM_CAP_ARCH_VCPU_CONF_TPR; 3664 cap->arch.xcr0_mask = vmx_xcr0_mask; 3665 cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask; 3666 cap->arch.conf_cpuid_maxops = VMX_NCPUIDS; 3667 } 3668 3669 const struct nvmm_impl nvmm_x86_vmx = { 3670 .name = "x86-vmx", 3671 .ident = vmx_ident, 3672 .init = vmx_init, 3673 .fini = vmx_fini, 3674 .suspend_interrupt = vmx_suspend_interrupt, 3675 .suspend = vmx_suspend, 3676 .resume = vmx_resume, 3677 .capability = vmx_capability, 3678 .mach_conf_max = NVMM_X86_MACH_NCONF, 3679 .mach_conf_sizes = NULL, 3680 .vcpu_conf_max = NVMM_X86_VCPU_NCONF, 3681 .vcpu_conf_sizes = vmx_vcpu_conf_sizes, 3682 .state_size = sizeof(struct nvmm_x64_state), 3683 .machine_create = vmx_machine_create, 3684 .machine_destroy = vmx_machine_destroy, 3685 .machine_configure = vmx_machine_configure, 3686 .vcpu_create = vmx_vcpu_create, 3687 .vcpu_destroy = vmx_vcpu_destroy, 3688 .vcpu_configure = vmx_vcpu_configure, 3689 .vcpu_setstate = vmx_vcpu_setstate, 3690 .vcpu_getstate = vmx_vcpu_getstate, 3691 .vcpu_inject = vmx_vcpu_inject, 3692 .vcpu_run = vmx_vcpu_run, 3693 .vcpu_suspend = vmx_vcpu_suspend, 3694 .vcpu_resume = vmx_vcpu_resume, 3695 }; 3696