Home | History | Annotate | Line # | Download | only in x86
nvmm_x86_vmx.c revision 1.55
      1 /*	$NetBSD: nvmm_x86_vmx.c,v 1.55 2020/05/09 08:39:07 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.55 2020/05/09 08:39:07 maxv Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 #include <sys/kmem.h>
     39 #include <sys/cpu.h>
     40 #include <sys/xcall.h>
     41 #include <sys/mman.h>
     42 #include <sys/bitops.h>
     43 
     44 #include <uvm/uvm.h>
     45 #include <uvm/uvm_page.h>
     46 
     47 #include <x86/cputypes.h>
     48 #include <x86/specialreg.h>
     49 #include <x86/pmap.h>
     50 #include <x86/dbregs.h>
     51 #include <x86/cpu_counter.h>
     52 #include <machine/cpuvar.h>
     53 
     54 #include <dev/nvmm/nvmm.h>
     55 #include <dev/nvmm/nvmm_internal.h>
     56 #include <dev/nvmm/x86/nvmm_x86.h>
     57 
     58 int _vmx_vmxon(paddr_t *pa);
     59 int _vmx_vmxoff(void);
     60 int vmx_vmlaunch(uint64_t *gprs);
     61 int vmx_vmresume(uint64_t *gprs);
     62 
     63 #define vmx_vmxon(a) \
     64 	if (__predict_false(_vmx_vmxon(a) != 0)) { \
     65 		panic("%s: VMXON failed", __func__); \
     66 	}
     67 #define vmx_vmxoff() \
     68 	if (__predict_false(_vmx_vmxoff() != 0)) { \
     69 		panic("%s: VMXOFF failed", __func__); \
     70 	}
     71 
     72 struct ept_desc {
     73 	uint64_t eptp;
     74 	uint64_t mbz;
     75 } __packed;
     76 
     77 struct vpid_desc {
     78 	uint64_t vpid;
     79 	uint64_t addr;
     80 } __packed;
     81 
     82 static inline void
     83 vmx_invept(uint64_t op, struct ept_desc *desc)
     84 {
     85 	asm volatile (
     86 		"invept		%[desc],%[op];"
     87 		"jz		vmx_insn_failvalid;"
     88 		"jc		vmx_insn_failinvalid;"
     89 		:
     90 		: [desc] "m" (*desc), [op] "r" (op)
     91 		: "memory", "cc"
     92 	);
     93 }
     94 
     95 static inline void
     96 vmx_invvpid(uint64_t op, struct vpid_desc *desc)
     97 {
     98 	asm volatile (
     99 		"invvpid	%[desc],%[op];"
    100 		"jz		vmx_insn_failvalid;"
    101 		"jc		vmx_insn_failinvalid;"
    102 		:
    103 		: [desc] "m" (*desc), [op] "r" (op)
    104 		: "memory", "cc"
    105 	);
    106 }
    107 
    108 static inline uint64_t
    109 vmx_vmread(uint64_t field)
    110 {
    111 	uint64_t value;
    112 
    113 	asm volatile (
    114 		"vmread		%[field],%[value];"
    115 		"jz		vmx_insn_failvalid;"
    116 		"jc		vmx_insn_failinvalid;"
    117 		: [value] "=r" (value)
    118 		: [field] "r" (field)
    119 		: "cc"
    120 	);
    121 
    122 	return value;
    123 }
    124 
    125 static inline void
    126 vmx_vmwrite(uint64_t field, uint64_t value)
    127 {
    128 	asm volatile (
    129 		"vmwrite	%[value],%[field];"
    130 		"jz		vmx_insn_failvalid;"
    131 		"jc		vmx_insn_failinvalid;"
    132 		:
    133 		: [field] "r" (field), [value] "r" (value)
    134 		: "cc"
    135 	);
    136 }
    137 
    138 #ifdef DIAGNOSTIC
    139 static inline paddr_t
    140 vmx_vmptrst(void)
    141 {
    142 	paddr_t pa;
    143 
    144 	asm volatile (
    145 		"vmptrst	%[pa];"
    146 		:
    147 		: [pa] "m" (*(paddr_t *)&pa)
    148 		: "memory"
    149 	);
    150 
    151 	return pa;
    152 }
    153 #endif
    154 
    155 static inline void
    156 vmx_vmptrld(paddr_t *pa)
    157 {
    158 	asm volatile (
    159 		"vmptrld	%[pa];"
    160 		"jz		vmx_insn_failvalid;"
    161 		"jc		vmx_insn_failinvalid;"
    162 		:
    163 		: [pa] "m" (*pa)
    164 		: "memory", "cc"
    165 	);
    166 }
    167 
    168 static inline void
    169 vmx_vmclear(paddr_t *pa)
    170 {
    171 	asm volatile (
    172 		"vmclear	%[pa];"
    173 		"jz		vmx_insn_failvalid;"
    174 		"jc		vmx_insn_failinvalid;"
    175 		:
    176 		: [pa] "m" (*pa)
    177 		: "memory", "cc"
    178 	);
    179 }
    180 
    181 #define MSR_IA32_FEATURE_CONTROL	0x003A
    182 #define		IA32_FEATURE_CONTROL_LOCK	__BIT(0)
    183 #define		IA32_FEATURE_CONTROL_IN_SMX	__BIT(1)
    184 #define		IA32_FEATURE_CONTROL_OUT_SMX	__BIT(2)
    185 
    186 #define MSR_IA32_VMX_BASIC		0x0480
    187 #define		IA32_VMX_BASIC_IDENT		__BITS(30,0)
    188 #define		IA32_VMX_BASIC_DATA_SIZE	__BITS(44,32)
    189 #define		IA32_VMX_BASIC_MEM_WIDTH	__BIT(48)
    190 #define		IA32_VMX_BASIC_DUAL		__BIT(49)
    191 #define		IA32_VMX_BASIC_MEM_TYPE		__BITS(53,50)
    192 #define			MEM_TYPE_UC		0
    193 #define			MEM_TYPE_WB		6
    194 #define		IA32_VMX_BASIC_IO_REPORT	__BIT(54)
    195 #define		IA32_VMX_BASIC_TRUE_CTLS	__BIT(55)
    196 
    197 #define MSR_IA32_VMX_PINBASED_CTLS		0x0481
    198 #define MSR_IA32_VMX_PROCBASED_CTLS		0x0482
    199 #define MSR_IA32_VMX_EXIT_CTLS			0x0483
    200 #define MSR_IA32_VMX_ENTRY_CTLS			0x0484
    201 #define MSR_IA32_VMX_PROCBASED_CTLS2		0x048B
    202 
    203 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS		0x048D
    204 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS	0x048E
    205 #define MSR_IA32_VMX_TRUE_EXIT_CTLS		0x048F
    206 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS		0x0490
    207 
    208 #define MSR_IA32_VMX_CR0_FIXED0			0x0486
    209 #define MSR_IA32_VMX_CR0_FIXED1			0x0487
    210 #define MSR_IA32_VMX_CR4_FIXED0			0x0488
    211 #define MSR_IA32_VMX_CR4_FIXED1			0x0489
    212 
    213 #define MSR_IA32_VMX_EPT_VPID_CAP	0x048C
    214 #define		IA32_VMX_EPT_VPID_WALKLENGTH_4		__BIT(6)
    215 #define		IA32_VMX_EPT_VPID_UC			__BIT(8)
    216 #define		IA32_VMX_EPT_VPID_WB			__BIT(14)
    217 #define		IA32_VMX_EPT_VPID_INVEPT		__BIT(20)
    218 #define		IA32_VMX_EPT_VPID_FLAGS_AD		__BIT(21)
    219 #define		IA32_VMX_EPT_VPID_INVEPT_CONTEXT	__BIT(25)
    220 #define		IA32_VMX_EPT_VPID_INVEPT_ALL		__BIT(26)
    221 #define		IA32_VMX_EPT_VPID_INVVPID		__BIT(32)
    222 #define		IA32_VMX_EPT_VPID_INVVPID_ADDR		__BIT(40)
    223 #define		IA32_VMX_EPT_VPID_INVVPID_CONTEXT	__BIT(41)
    224 #define		IA32_VMX_EPT_VPID_INVVPID_ALL		__BIT(42)
    225 #define		IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG	__BIT(43)
    226 
    227 /* -------------------------------------------------------------------------- */
    228 
    229 /* 16-bit control fields */
    230 #define VMCS_VPID				0x00000000
    231 #define VMCS_PIR_VECTOR				0x00000002
    232 #define VMCS_EPTP_INDEX				0x00000004
    233 /* 16-bit guest-state fields */
    234 #define VMCS_GUEST_ES_SELECTOR			0x00000800
    235 #define VMCS_GUEST_CS_SELECTOR			0x00000802
    236 #define VMCS_GUEST_SS_SELECTOR			0x00000804
    237 #define VMCS_GUEST_DS_SELECTOR			0x00000806
    238 #define VMCS_GUEST_FS_SELECTOR			0x00000808
    239 #define VMCS_GUEST_GS_SELECTOR			0x0000080A
    240 #define VMCS_GUEST_LDTR_SELECTOR		0x0000080C
    241 #define VMCS_GUEST_TR_SELECTOR			0x0000080E
    242 #define VMCS_GUEST_INTR_STATUS			0x00000810
    243 #define VMCS_PML_INDEX				0x00000812
    244 /* 16-bit host-state fields */
    245 #define VMCS_HOST_ES_SELECTOR			0x00000C00
    246 #define VMCS_HOST_CS_SELECTOR			0x00000C02
    247 #define VMCS_HOST_SS_SELECTOR			0x00000C04
    248 #define VMCS_HOST_DS_SELECTOR			0x00000C06
    249 #define VMCS_HOST_FS_SELECTOR			0x00000C08
    250 #define VMCS_HOST_GS_SELECTOR			0x00000C0A
    251 #define VMCS_HOST_TR_SELECTOR			0x00000C0C
    252 /* 64-bit control fields */
    253 #define VMCS_IO_BITMAP_A			0x00002000
    254 #define VMCS_IO_BITMAP_B			0x00002002
    255 #define VMCS_MSR_BITMAP				0x00002004
    256 #define VMCS_EXIT_MSR_STORE_ADDRESS		0x00002006
    257 #define VMCS_EXIT_MSR_LOAD_ADDRESS		0x00002008
    258 #define VMCS_ENTRY_MSR_LOAD_ADDRESS		0x0000200A
    259 #define VMCS_EXECUTIVE_VMCS			0x0000200C
    260 #define VMCS_PML_ADDRESS			0x0000200E
    261 #define VMCS_TSC_OFFSET				0x00002010
    262 #define VMCS_VIRTUAL_APIC			0x00002012
    263 #define VMCS_APIC_ACCESS			0x00002014
    264 #define VMCS_PIR_DESC				0x00002016
    265 #define VMCS_VM_CONTROL				0x00002018
    266 #define VMCS_EPTP				0x0000201A
    267 #define		EPTP_TYPE			__BITS(2,0)
    268 #define			EPTP_TYPE_UC		0
    269 #define			EPTP_TYPE_WB		6
    270 #define		EPTP_WALKLEN			__BITS(5,3)
    271 #define		EPTP_FLAGS_AD			__BIT(6)
    272 #define		EPTP_PHYSADDR			__BITS(63,12)
    273 #define VMCS_EOI_EXIT0				0x0000201C
    274 #define VMCS_EOI_EXIT1				0x0000201E
    275 #define VMCS_EOI_EXIT2				0x00002020
    276 #define VMCS_EOI_EXIT3				0x00002022
    277 #define VMCS_EPTP_LIST				0x00002024
    278 #define VMCS_VMREAD_BITMAP			0x00002026
    279 #define VMCS_VMWRITE_BITMAP			0x00002028
    280 #define VMCS_VIRTUAL_EXCEPTION			0x0000202A
    281 #define VMCS_XSS_EXIT_BITMAP			0x0000202C
    282 #define VMCS_ENCLS_EXIT_BITMAP			0x0000202E
    283 #define VMCS_SUBPAGE_PERM_TABLE_PTR		0x00002030
    284 #define VMCS_TSC_MULTIPLIER			0x00002032
    285 /* 64-bit read-only fields */
    286 #define VMCS_GUEST_PHYSICAL_ADDRESS		0x00002400
    287 /* 64-bit guest-state fields */
    288 #define VMCS_LINK_POINTER			0x00002800
    289 #define VMCS_GUEST_IA32_DEBUGCTL		0x00002802
    290 #define VMCS_GUEST_IA32_PAT			0x00002804
    291 #define VMCS_GUEST_IA32_EFER			0x00002806
    292 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL	0x00002808
    293 #define VMCS_GUEST_PDPTE0			0x0000280A
    294 #define VMCS_GUEST_PDPTE1			0x0000280C
    295 #define VMCS_GUEST_PDPTE2			0x0000280E
    296 #define VMCS_GUEST_PDPTE3			0x00002810
    297 #define VMCS_GUEST_BNDCFGS			0x00002812
    298 /* 64-bit host-state fields */
    299 #define VMCS_HOST_IA32_PAT			0x00002C00
    300 #define VMCS_HOST_IA32_EFER			0x00002C02
    301 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL		0x00002C04
    302 /* 32-bit control fields */
    303 #define VMCS_PINBASED_CTLS			0x00004000
    304 #define		PIN_CTLS_INT_EXITING		__BIT(0)
    305 #define		PIN_CTLS_NMI_EXITING		__BIT(3)
    306 #define		PIN_CTLS_VIRTUAL_NMIS		__BIT(5)
    307 #define		PIN_CTLS_ACTIVATE_PREEMPT_TIMER	__BIT(6)
    308 #define		PIN_CTLS_PROCESS_POSTED_INTS	__BIT(7)
    309 #define VMCS_PROCBASED_CTLS			0x00004002
    310 #define		PROC_CTLS_INT_WINDOW_EXITING	__BIT(2)
    311 #define		PROC_CTLS_USE_TSC_OFFSETTING	__BIT(3)
    312 #define		PROC_CTLS_HLT_EXITING		__BIT(7)
    313 #define		PROC_CTLS_INVLPG_EXITING	__BIT(9)
    314 #define		PROC_CTLS_MWAIT_EXITING		__BIT(10)
    315 #define		PROC_CTLS_RDPMC_EXITING		__BIT(11)
    316 #define		PROC_CTLS_RDTSC_EXITING		__BIT(12)
    317 #define		PROC_CTLS_RCR3_EXITING		__BIT(15)
    318 #define		PROC_CTLS_LCR3_EXITING		__BIT(16)
    319 #define		PROC_CTLS_RCR8_EXITING		__BIT(19)
    320 #define		PROC_CTLS_LCR8_EXITING		__BIT(20)
    321 #define		PROC_CTLS_USE_TPR_SHADOW	__BIT(21)
    322 #define		PROC_CTLS_NMI_WINDOW_EXITING	__BIT(22)
    323 #define		PROC_CTLS_DR_EXITING		__BIT(23)
    324 #define		PROC_CTLS_UNCOND_IO_EXITING	__BIT(24)
    325 #define		PROC_CTLS_USE_IO_BITMAPS	__BIT(25)
    326 #define		PROC_CTLS_MONITOR_TRAP_FLAG	__BIT(27)
    327 #define		PROC_CTLS_USE_MSR_BITMAPS	__BIT(28)
    328 #define		PROC_CTLS_MONITOR_EXITING	__BIT(29)
    329 #define		PROC_CTLS_PAUSE_EXITING		__BIT(30)
    330 #define		PROC_CTLS_ACTIVATE_CTLS2	__BIT(31)
    331 #define VMCS_EXCEPTION_BITMAP			0x00004004
    332 #define VMCS_PF_ERROR_MASK			0x00004006
    333 #define VMCS_PF_ERROR_MATCH			0x00004008
    334 #define VMCS_CR3_TARGET_COUNT			0x0000400A
    335 #define VMCS_EXIT_CTLS				0x0000400C
    336 #define		EXIT_CTLS_SAVE_DEBUG_CONTROLS	__BIT(2)
    337 #define		EXIT_CTLS_HOST_LONG_MODE	__BIT(9)
    338 #define		EXIT_CTLS_LOAD_PERFGLOBALCTRL	__BIT(12)
    339 #define		EXIT_CTLS_ACK_INTERRUPT		__BIT(15)
    340 #define		EXIT_CTLS_SAVE_PAT		__BIT(18)
    341 #define		EXIT_CTLS_LOAD_PAT		__BIT(19)
    342 #define		EXIT_CTLS_SAVE_EFER		__BIT(20)
    343 #define		EXIT_CTLS_LOAD_EFER		__BIT(21)
    344 #define		EXIT_CTLS_SAVE_PREEMPT_TIMER	__BIT(22)
    345 #define		EXIT_CTLS_CLEAR_BNDCFGS		__BIT(23)
    346 #define		EXIT_CTLS_CONCEAL_PT		__BIT(24)
    347 #define VMCS_EXIT_MSR_STORE_COUNT		0x0000400E
    348 #define VMCS_EXIT_MSR_LOAD_COUNT		0x00004010
    349 #define VMCS_ENTRY_CTLS				0x00004012
    350 #define		ENTRY_CTLS_LOAD_DEBUG_CONTROLS	__BIT(2)
    351 #define		ENTRY_CTLS_LONG_MODE		__BIT(9)
    352 #define		ENTRY_CTLS_SMM			__BIT(10)
    353 #define		ENTRY_CTLS_DISABLE_DUAL		__BIT(11)
    354 #define		ENTRY_CTLS_LOAD_PERFGLOBALCTRL	__BIT(13)
    355 #define		ENTRY_CTLS_LOAD_PAT		__BIT(14)
    356 #define		ENTRY_CTLS_LOAD_EFER		__BIT(15)
    357 #define		ENTRY_CTLS_LOAD_BNDCFGS		__BIT(16)
    358 #define		ENTRY_CTLS_CONCEAL_PT		__BIT(17)
    359 #define VMCS_ENTRY_MSR_LOAD_COUNT		0x00004014
    360 #define VMCS_ENTRY_INTR_INFO			0x00004016
    361 #define		INTR_INFO_VECTOR		__BITS(7,0)
    362 #define		INTR_INFO_TYPE			__BITS(10,8)
    363 #define			INTR_TYPE_EXT_INT	0
    364 #define			INTR_TYPE_NMI		2
    365 #define			INTR_TYPE_HW_EXC	3
    366 #define			INTR_TYPE_SW_INT	4
    367 #define			INTR_TYPE_PRIV_SW_EXC	5
    368 #define			INTR_TYPE_SW_EXC	6
    369 #define			INTR_TYPE_OTHER		7
    370 #define		INTR_INFO_ERROR			__BIT(11)
    371 #define		INTR_INFO_VALID			__BIT(31)
    372 #define VMCS_ENTRY_EXCEPTION_ERROR		0x00004018
    373 #define VMCS_ENTRY_INSTRUCTION_LENGTH		0x0000401A
    374 #define VMCS_TPR_THRESHOLD			0x0000401C
    375 #define VMCS_PROCBASED_CTLS2			0x0000401E
    376 #define		PROC_CTLS2_VIRT_APIC_ACCESSES	__BIT(0)
    377 #define		PROC_CTLS2_ENABLE_EPT		__BIT(1)
    378 #define		PROC_CTLS2_DESC_TABLE_EXITING	__BIT(2)
    379 #define		PROC_CTLS2_ENABLE_RDTSCP	__BIT(3)
    380 #define		PROC_CTLS2_VIRT_X2APIC		__BIT(4)
    381 #define		PROC_CTLS2_ENABLE_VPID		__BIT(5)
    382 #define		PROC_CTLS2_WBINVD_EXITING	__BIT(6)
    383 #define		PROC_CTLS2_UNRESTRICTED_GUEST	__BIT(7)
    384 #define		PROC_CTLS2_APIC_REG_VIRT	__BIT(8)
    385 #define		PROC_CTLS2_VIRT_INT_DELIVERY	__BIT(9)
    386 #define		PROC_CTLS2_PAUSE_LOOP_EXITING	__BIT(10)
    387 #define		PROC_CTLS2_RDRAND_EXITING	__BIT(11)
    388 #define		PROC_CTLS2_INVPCID_ENABLE	__BIT(12)
    389 #define		PROC_CTLS2_VMFUNC_ENABLE	__BIT(13)
    390 #define		PROC_CTLS2_VMCS_SHADOWING	__BIT(14)
    391 #define		PROC_CTLS2_ENCLS_EXITING	__BIT(15)
    392 #define		PROC_CTLS2_RDSEED_EXITING	__BIT(16)
    393 #define		PROC_CTLS2_PML_ENABLE		__BIT(17)
    394 #define		PROC_CTLS2_EPT_VIOLATION	__BIT(18)
    395 #define		PROC_CTLS2_CONCEAL_VMX_FROM_PT	__BIT(19)
    396 #define		PROC_CTLS2_XSAVES_ENABLE	__BIT(20)
    397 #define		PROC_CTLS2_MODE_BASED_EXEC_EPT	__BIT(22)
    398 #define		PROC_CTLS2_SUBPAGE_PERMISSIONS	__BIT(23)
    399 #define		PROC_CTLS2_USE_TSC_SCALING	__BIT(25)
    400 #define		PROC_CTLS2_ENCLV_EXITING	__BIT(28)
    401 #define VMCS_PLE_GAP				0x00004020
    402 #define VMCS_PLE_WINDOW				0x00004022
    403 /* 32-bit read-only data fields */
    404 #define VMCS_INSTRUCTION_ERROR			0x00004400
    405 #define VMCS_EXIT_REASON			0x00004402
    406 #define VMCS_EXIT_INTR_INFO			0x00004404
    407 #define VMCS_EXIT_INTR_ERRCODE			0x00004406
    408 #define VMCS_IDT_VECTORING_INFO			0x00004408
    409 #define VMCS_IDT_VECTORING_ERROR		0x0000440A
    410 #define VMCS_EXIT_INSTRUCTION_LENGTH		0x0000440C
    411 #define VMCS_EXIT_INSTRUCTION_INFO		0x0000440E
    412 /* 32-bit guest-state fields */
    413 #define VMCS_GUEST_ES_LIMIT			0x00004800
    414 #define VMCS_GUEST_CS_LIMIT			0x00004802
    415 #define VMCS_GUEST_SS_LIMIT			0x00004804
    416 #define VMCS_GUEST_DS_LIMIT			0x00004806
    417 #define VMCS_GUEST_FS_LIMIT			0x00004808
    418 #define VMCS_GUEST_GS_LIMIT			0x0000480A
    419 #define VMCS_GUEST_LDTR_LIMIT			0x0000480C
    420 #define VMCS_GUEST_TR_LIMIT			0x0000480E
    421 #define VMCS_GUEST_GDTR_LIMIT			0x00004810
    422 #define VMCS_GUEST_IDTR_LIMIT			0x00004812
    423 #define VMCS_GUEST_ES_ACCESS_RIGHTS		0x00004814
    424 #define VMCS_GUEST_CS_ACCESS_RIGHTS		0x00004816
    425 #define VMCS_GUEST_SS_ACCESS_RIGHTS		0x00004818
    426 #define VMCS_GUEST_DS_ACCESS_RIGHTS		0x0000481A
    427 #define VMCS_GUEST_FS_ACCESS_RIGHTS		0x0000481C
    428 #define VMCS_GUEST_GS_ACCESS_RIGHTS		0x0000481E
    429 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS		0x00004820
    430 #define VMCS_GUEST_TR_ACCESS_RIGHTS		0x00004822
    431 #define VMCS_GUEST_INTERRUPTIBILITY		0x00004824
    432 #define		INT_STATE_STI			__BIT(0)
    433 #define		INT_STATE_MOVSS			__BIT(1)
    434 #define		INT_STATE_SMI			__BIT(2)
    435 #define		INT_STATE_NMI			__BIT(3)
    436 #define		INT_STATE_ENCLAVE		__BIT(4)
    437 #define VMCS_GUEST_ACTIVITY			0x00004826
    438 #define VMCS_GUEST_SMBASE			0x00004828
    439 #define VMCS_GUEST_IA32_SYSENTER_CS		0x0000482A
    440 #define VMCS_PREEMPTION_TIMER_VALUE		0x0000482E
    441 /* 32-bit host state fields */
    442 #define VMCS_HOST_IA32_SYSENTER_CS		0x00004C00
    443 /* Natural-Width control fields */
    444 #define VMCS_CR0_MASK				0x00006000
    445 #define VMCS_CR4_MASK				0x00006002
    446 #define VMCS_CR0_SHADOW				0x00006004
    447 #define VMCS_CR4_SHADOW				0x00006006
    448 #define VMCS_CR3_TARGET0			0x00006008
    449 #define VMCS_CR3_TARGET1			0x0000600A
    450 #define VMCS_CR3_TARGET2			0x0000600C
    451 #define VMCS_CR3_TARGET3			0x0000600E
    452 /* Natural-Width read-only fields */
    453 #define VMCS_EXIT_QUALIFICATION			0x00006400
    454 #define VMCS_IO_RCX				0x00006402
    455 #define VMCS_IO_RSI				0x00006404
    456 #define VMCS_IO_RDI				0x00006406
    457 #define VMCS_IO_RIP				0x00006408
    458 #define VMCS_GUEST_LINEAR_ADDRESS		0x0000640A
    459 /* Natural-Width guest-state fields */
    460 #define VMCS_GUEST_CR0				0x00006800
    461 #define VMCS_GUEST_CR3				0x00006802
    462 #define VMCS_GUEST_CR4				0x00006804
    463 #define VMCS_GUEST_ES_BASE			0x00006806
    464 #define VMCS_GUEST_CS_BASE			0x00006808
    465 #define VMCS_GUEST_SS_BASE			0x0000680A
    466 #define VMCS_GUEST_DS_BASE			0x0000680C
    467 #define VMCS_GUEST_FS_BASE			0x0000680E
    468 #define VMCS_GUEST_GS_BASE			0x00006810
    469 #define VMCS_GUEST_LDTR_BASE			0x00006812
    470 #define VMCS_GUEST_TR_BASE			0x00006814
    471 #define VMCS_GUEST_GDTR_BASE			0x00006816
    472 #define VMCS_GUEST_IDTR_BASE			0x00006818
    473 #define VMCS_GUEST_DR7				0x0000681A
    474 #define VMCS_GUEST_RSP				0x0000681C
    475 #define VMCS_GUEST_RIP				0x0000681E
    476 #define VMCS_GUEST_RFLAGS			0x00006820
    477 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS	0x00006822
    478 #define VMCS_GUEST_IA32_SYSENTER_ESP		0x00006824
    479 #define VMCS_GUEST_IA32_SYSENTER_EIP		0x00006826
    480 /* Natural-Width host-state fields */
    481 #define VMCS_HOST_CR0				0x00006C00
    482 #define VMCS_HOST_CR3				0x00006C02
    483 #define VMCS_HOST_CR4				0x00006C04
    484 #define VMCS_HOST_FS_BASE			0x00006C06
    485 #define VMCS_HOST_GS_BASE			0x00006C08
    486 #define VMCS_HOST_TR_BASE			0x00006C0A
    487 #define VMCS_HOST_GDTR_BASE			0x00006C0C
    488 #define VMCS_HOST_IDTR_BASE			0x00006C0E
    489 #define VMCS_HOST_IA32_SYSENTER_ESP		0x00006C10
    490 #define VMCS_HOST_IA32_SYSENTER_EIP		0x00006C12
    491 #define VMCS_HOST_RSP				0x00006C14
    492 #define VMCS_HOST_RIP				0x00006c16
    493 
    494 /* VMX basic exit reasons. */
    495 #define VMCS_EXITCODE_EXC_NMI			0
    496 #define VMCS_EXITCODE_EXT_INT			1
    497 #define VMCS_EXITCODE_SHUTDOWN			2
    498 #define VMCS_EXITCODE_INIT			3
    499 #define VMCS_EXITCODE_SIPI			4
    500 #define VMCS_EXITCODE_SMI			5
    501 #define VMCS_EXITCODE_OTHER_SMI			6
    502 #define VMCS_EXITCODE_INT_WINDOW		7
    503 #define VMCS_EXITCODE_NMI_WINDOW		8
    504 #define VMCS_EXITCODE_TASK_SWITCH		9
    505 #define VMCS_EXITCODE_CPUID			10
    506 #define VMCS_EXITCODE_GETSEC			11
    507 #define VMCS_EXITCODE_HLT			12
    508 #define VMCS_EXITCODE_INVD			13
    509 #define VMCS_EXITCODE_INVLPG			14
    510 #define VMCS_EXITCODE_RDPMC			15
    511 #define VMCS_EXITCODE_RDTSC			16
    512 #define VMCS_EXITCODE_RSM			17
    513 #define VMCS_EXITCODE_VMCALL			18
    514 #define VMCS_EXITCODE_VMCLEAR			19
    515 #define VMCS_EXITCODE_VMLAUNCH			20
    516 #define VMCS_EXITCODE_VMPTRLD			21
    517 #define VMCS_EXITCODE_VMPTRST			22
    518 #define VMCS_EXITCODE_VMREAD			23
    519 #define VMCS_EXITCODE_VMRESUME			24
    520 #define VMCS_EXITCODE_VMWRITE			25
    521 #define VMCS_EXITCODE_VMXOFF			26
    522 #define VMCS_EXITCODE_VMXON			27
    523 #define VMCS_EXITCODE_CR			28
    524 #define VMCS_EXITCODE_DR			29
    525 #define VMCS_EXITCODE_IO			30
    526 #define VMCS_EXITCODE_RDMSR			31
    527 #define VMCS_EXITCODE_WRMSR			32
    528 #define VMCS_EXITCODE_FAIL_GUEST_INVALID	33
    529 #define VMCS_EXITCODE_FAIL_MSR_INVALID		34
    530 #define VMCS_EXITCODE_MWAIT			36
    531 #define VMCS_EXITCODE_TRAP_FLAG			37
    532 #define VMCS_EXITCODE_MONITOR			39
    533 #define VMCS_EXITCODE_PAUSE			40
    534 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK	41
    535 #define VMCS_EXITCODE_TPR_BELOW			43
    536 #define VMCS_EXITCODE_APIC_ACCESS		44
    537 #define VMCS_EXITCODE_VEOI			45
    538 #define VMCS_EXITCODE_GDTR_IDTR			46
    539 #define VMCS_EXITCODE_LDTR_TR			47
    540 #define VMCS_EXITCODE_EPT_VIOLATION		48
    541 #define VMCS_EXITCODE_EPT_MISCONFIG		49
    542 #define VMCS_EXITCODE_INVEPT			50
    543 #define VMCS_EXITCODE_RDTSCP			51
    544 #define VMCS_EXITCODE_PREEMPT_TIMEOUT		52
    545 #define VMCS_EXITCODE_INVVPID			53
    546 #define VMCS_EXITCODE_WBINVD			54
    547 #define VMCS_EXITCODE_XSETBV			55
    548 #define VMCS_EXITCODE_APIC_WRITE		56
    549 #define VMCS_EXITCODE_RDRAND			57
    550 #define VMCS_EXITCODE_INVPCID			58
    551 #define VMCS_EXITCODE_VMFUNC			59
    552 #define VMCS_EXITCODE_ENCLS			60
    553 #define VMCS_EXITCODE_RDSEED			61
    554 #define VMCS_EXITCODE_PAGE_LOG_FULL		62
    555 #define VMCS_EXITCODE_XSAVES			63
    556 #define VMCS_EXITCODE_XRSTORS			64
    557 
    558 /* -------------------------------------------------------------------------- */
    559 
    560 static void vmx_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
    561 static void vmx_vcpu_state_commit(struct nvmm_cpu *);
    562 
    563 #define VMX_MSRLIST_STAR		0
    564 #define VMX_MSRLIST_LSTAR		1
    565 #define VMX_MSRLIST_CSTAR		2
    566 #define VMX_MSRLIST_SFMASK		3
    567 #define VMX_MSRLIST_KERNELGSBASE	4
    568 #define VMX_MSRLIST_EXIT_NMSR		5
    569 #define VMX_MSRLIST_L1DFLUSH		5
    570 
    571 /* On entry, we may do +1 to include L1DFLUSH. */
    572 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR;
    573 
    574 struct vmxon {
    575 	uint32_t ident;
    576 #define VMXON_IDENT_REVISION	__BITS(30,0)
    577 
    578 	uint8_t data[PAGE_SIZE - 4];
    579 } __packed;
    580 
    581 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE);
    582 
    583 struct vmxoncpu {
    584 	vaddr_t va;
    585 	paddr_t pa;
    586 };
    587 
    588 static struct vmxoncpu vmxoncpu[MAXCPUS];
    589 
    590 struct vmcs {
    591 	uint32_t ident;
    592 #define VMCS_IDENT_REVISION	__BITS(30,0)
    593 #define VMCS_IDENT_SHADOW	__BIT(31)
    594 
    595 	uint32_t abort;
    596 	uint8_t data[PAGE_SIZE - 8];
    597 } __packed;
    598 
    599 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
    600 
    601 struct msr_entry {
    602 	uint32_t msr;
    603 	uint32_t rsvd;
    604 	uint64_t val;
    605 } __packed;
    606 
    607 #define VPID_MAX	0xFFFF
    608 
    609 /* Make sure we never run out of VPIDs. */
    610 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS);
    611 
    612 static uint64_t vmx_tlb_flush_op __read_mostly;
    613 static uint64_t vmx_ept_flush_op __read_mostly;
    614 static uint64_t vmx_eptp_type __read_mostly;
    615 
    616 static uint64_t vmx_pinbased_ctls __read_mostly;
    617 static uint64_t vmx_procbased_ctls __read_mostly;
    618 static uint64_t vmx_procbased_ctls2 __read_mostly;
    619 static uint64_t vmx_entry_ctls __read_mostly;
    620 static uint64_t vmx_exit_ctls __read_mostly;
    621 
    622 static uint64_t vmx_cr0_fixed0 __read_mostly;
    623 static uint64_t vmx_cr0_fixed1 __read_mostly;
    624 static uint64_t vmx_cr4_fixed0 __read_mostly;
    625 static uint64_t vmx_cr4_fixed1 __read_mostly;
    626 
    627 extern bool pmap_ept_has_ad;
    628 
    629 #define VMX_PINBASED_CTLS_ONE	\
    630 	(PIN_CTLS_INT_EXITING| \
    631 	 PIN_CTLS_NMI_EXITING| \
    632 	 PIN_CTLS_VIRTUAL_NMIS)
    633 
    634 #define VMX_PINBASED_CTLS_ZERO	0
    635 
    636 #define VMX_PROCBASED_CTLS_ONE	\
    637 	(PROC_CTLS_USE_TSC_OFFSETTING| \
    638 	 PROC_CTLS_HLT_EXITING| \
    639 	 PROC_CTLS_MWAIT_EXITING | \
    640 	 PROC_CTLS_RDPMC_EXITING | \
    641 	 PROC_CTLS_RCR8_EXITING | \
    642 	 PROC_CTLS_LCR8_EXITING | \
    643 	 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \
    644 	 PROC_CTLS_USE_MSR_BITMAPS | \
    645 	 PROC_CTLS_MONITOR_EXITING | \
    646 	 PROC_CTLS_ACTIVATE_CTLS2)
    647 
    648 #define VMX_PROCBASED_CTLS_ZERO	\
    649 	(PROC_CTLS_RCR3_EXITING| \
    650 	 PROC_CTLS_LCR3_EXITING)
    651 
    652 #define VMX_PROCBASED_CTLS2_ONE	\
    653 	(PROC_CTLS2_ENABLE_EPT| \
    654 	 PROC_CTLS2_ENABLE_VPID| \
    655 	 PROC_CTLS2_UNRESTRICTED_GUEST)
    656 
    657 #define VMX_PROCBASED_CTLS2_ZERO	0
    658 
    659 #define VMX_ENTRY_CTLS_ONE	\
    660 	(ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \
    661 	 ENTRY_CTLS_LOAD_EFER| \
    662 	 ENTRY_CTLS_LOAD_PAT)
    663 
    664 #define VMX_ENTRY_CTLS_ZERO	\
    665 	(ENTRY_CTLS_SMM| \
    666 	 ENTRY_CTLS_DISABLE_DUAL)
    667 
    668 #define VMX_EXIT_CTLS_ONE	\
    669 	(EXIT_CTLS_SAVE_DEBUG_CONTROLS| \
    670 	 EXIT_CTLS_HOST_LONG_MODE| \
    671 	 EXIT_CTLS_SAVE_PAT| \
    672 	 EXIT_CTLS_LOAD_PAT| \
    673 	 EXIT_CTLS_SAVE_EFER| \
    674 	 EXIT_CTLS_LOAD_EFER)
    675 
    676 #define VMX_EXIT_CTLS_ZERO	0
    677 
    678 static uint8_t *vmx_asidmap __read_mostly;
    679 static uint32_t vmx_maxasid __read_mostly;
    680 static kmutex_t vmx_asidlock __cacheline_aligned;
    681 
    682 #define VMX_XCR0_MASK_DEFAULT	(XCR0_X87|XCR0_SSE)
    683 static uint64_t vmx_xcr0_mask __read_mostly;
    684 
    685 #define VMX_NCPUIDS	32
    686 
    687 #define VMCS_NPAGES	1
    688 #define VMCS_SIZE	(VMCS_NPAGES * PAGE_SIZE)
    689 
    690 #define MSRBM_NPAGES	1
    691 #define MSRBM_SIZE	(MSRBM_NPAGES * PAGE_SIZE)
    692 
    693 #define EFER_TLB_FLUSH \
    694 	(EFER_NXE|EFER_LMA|EFER_LME)
    695 #define CR0_TLB_FLUSH \
    696 	(CR0_PG|CR0_WP|CR0_CD|CR0_NW)
    697 #define CR4_TLB_FLUSH \
    698 	(CR4_PGE|CR4_PAE|CR4_PSE)
    699 
    700 /* -------------------------------------------------------------------------- */
    701 
    702 struct vmx_machdata {
    703 	volatile uint64_t mach_htlb_gen;
    704 };
    705 
    706 static const size_t vmx_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
    707 	[NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
    708 	    sizeof(struct nvmm_vcpu_conf_cpuid),
    709 	[NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] =
    710 	    sizeof(struct nvmm_vcpu_conf_tpr)
    711 };
    712 
    713 struct vmx_cpudata {
    714 	/* General */
    715 	uint64_t asid;
    716 	bool gtlb_want_flush;
    717 	bool gtsc_want_update;
    718 	uint64_t vcpu_htlb_gen;
    719 	kcpuset_t *htlb_want_flush;
    720 
    721 	/* VMCS */
    722 	struct vmcs *vmcs;
    723 	paddr_t vmcs_pa;
    724 	size_t vmcs_refcnt;
    725 	struct cpu_info *vmcs_ci;
    726 	bool vmcs_launched;
    727 
    728 	/* MSR bitmap */
    729 	uint8_t *msrbm;
    730 	paddr_t msrbm_pa;
    731 
    732 	/* Host state */
    733 	uint64_t hxcr0;
    734 	uint64_t star;
    735 	uint64_t lstar;
    736 	uint64_t cstar;
    737 	uint64_t sfmask;
    738 	uint64_t kernelgsbase;
    739 
    740 	/* Intr state */
    741 	bool int_window_exit;
    742 	bool nmi_window_exit;
    743 	bool evt_pending;
    744 
    745 	/* Guest state */
    746 	struct msr_entry *gmsr;
    747 	paddr_t gmsr_pa;
    748 	uint64_t gmsr_misc_enable;
    749 	uint64_t gcr2;
    750 	uint64_t gcr8;
    751 	uint64_t gxcr0;
    752 	uint64_t gprs[NVMM_X64_NGPR];
    753 	uint64_t drs[NVMM_X64_NDR];
    754 	uint64_t gtsc;
    755 	struct xsave_header gfpu __aligned(64);
    756 
    757 	/* VCPU configuration. */
    758 	bool cpuidpresent[VMX_NCPUIDS];
    759 	struct nvmm_vcpu_conf_cpuid cpuid[VMX_NCPUIDS];
    760 	struct nvmm_vcpu_conf_tpr tpr;
    761 };
    762 
    763 static const struct {
    764 	uint64_t selector;
    765 	uint64_t attrib;
    766 	uint64_t limit;
    767 	uint64_t base;
    768 } vmx_guest_segs[NVMM_X64_NSEG] = {
    769 	[NVMM_X64_SEG_ES] = {
    770 		VMCS_GUEST_ES_SELECTOR,
    771 		VMCS_GUEST_ES_ACCESS_RIGHTS,
    772 		VMCS_GUEST_ES_LIMIT,
    773 		VMCS_GUEST_ES_BASE
    774 	},
    775 	[NVMM_X64_SEG_CS] = {
    776 		VMCS_GUEST_CS_SELECTOR,
    777 		VMCS_GUEST_CS_ACCESS_RIGHTS,
    778 		VMCS_GUEST_CS_LIMIT,
    779 		VMCS_GUEST_CS_BASE
    780 	},
    781 	[NVMM_X64_SEG_SS] = {
    782 		VMCS_GUEST_SS_SELECTOR,
    783 		VMCS_GUEST_SS_ACCESS_RIGHTS,
    784 		VMCS_GUEST_SS_LIMIT,
    785 		VMCS_GUEST_SS_BASE
    786 	},
    787 	[NVMM_X64_SEG_DS] = {
    788 		VMCS_GUEST_DS_SELECTOR,
    789 		VMCS_GUEST_DS_ACCESS_RIGHTS,
    790 		VMCS_GUEST_DS_LIMIT,
    791 		VMCS_GUEST_DS_BASE
    792 	},
    793 	[NVMM_X64_SEG_FS] = {
    794 		VMCS_GUEST_FS_SELECTOR,
    795 		VMCS_GUEST_FS_ACCESS_RIGHTS,
    796 		VMCS_GUEST_FS_LIMIT,
    797 		VMCS_GUEST_FS_BASE
    798 	},
    799 	[NVMM_X64_SEG_GS] = {
    800 		VMCS_GUEST_GS_SELECTOR,
    801 		VMCS_GUEST_GS_ACCESS_RIGHTS,
    802 		VMCS_GUEST_GS_LIMIT,
    803 		VMCS_GUEST_GS_BASE
    804 	},
    805 	[NVMM_X64_SEG_GDT] = {
    806 		0, /* doesn't exist */
    807 		0, /* doesn't exist */
    808 		VMCS_GUEST_GDTR_LIMIT,
    809 		VMCS_GUEST_GDTR_BASE
    810 	},
    811 	[NVMM_X64_SEG_IDT] = {
    812 		0, /* doesn't exist */
    813 		0, /* doesn't exist */
    814 		VMCS_GUEST_IDTR_LIMIT,
    815 		VMCS_GUEST_IDTR_BASE
    816 	},
    817 	[NVMM_X64_SEG_LDT] = {
    818 		VMCS_GUEST_LDTR_SELECTOR,
    819 		VMCS_GUEST_LDTR_ACCESS_RIGHTS,
    820 		VMCS_GUEST_LDTR_LIMIT,
    821 		VMCS_GUEST_LDTR_BASE
    822 	},
    823 	[NVMM_X64_SEG_TR] = {
    824 		VMCS_GUEST_TR_SELECTOR,
    825 		VMCS_GUEST_TR_ACCESS_RIGHTS,
    826 		VMCS_GUEST_TR_LIMIT,
    827 		VMCS_GUEST_TR_BASE
    828 	}
    829 };
    830 
    831 /* -------------------------------------------------------------------------- */
    832 
    833 static uint64_t
    834 vmx_get_revision(void)
    835 {
    836 	uint64_t msr;
    837 
    838 	msr = rdmsr(MSR_IA32_VMX_BASIC);
    839 	msr &= IA32_VMX_BASIC_IDENT;
    840 
    841 	return msr;
    842 }
    843 
    844 static void
    845 vmx_vmclear_ipi(void *arg1, void *arg2)
    846 {
    847 	paddr_t vmcs_pa = (paddr_t)arg1;
    848 	vmx_vmclear(&vmcs_pa);
    849 }
    850 
    851 static void
    852 vmx_vmclear_remote(struct cpu_info *ci, paddr_t vmcs_pa)
    853 {
    854 	uint64_t xc;
    855 	int bound;
    856 
    857 	KASSERT(kpreempt_disabled());
    858 
    859 	bound = curlwp_bind();
    860 	kpreempt_enable();
    861 
    862 	xc = xc_unicast(XC_HIGHPRI, vmx_vmclear_ipi, (void *)vmcs_pa, NULL, ci);
    863 	xc_wait(xc);
    864 
    865 	kpreempt_disable();
    866 	curlwp_bindx(bound);
    867 }
    868 
    869 static void
    870 vmx_vmcs_enter(struct nvmm_cpu *vcpu)
    871 {
    872 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    873 	struct cpu_info *vmcs_ci;
    874 	paddr_t oldpa __diagused;
    875 
    876 	cpudata->vmcs_refcnt++;
    877 	if (cpudata->vmcs_refcnt > 1) {
    878 #ifdef DIAGNOSTIC
    879 		KASSERT(kpreempt_disabled());
    880 		oldpa = vmx_vmptrst();
    881 		KASSERT(oldpa == cpudata->vmcs_pa);
    882 #endif
    883 		return;
    884 	}
    885 
    886 	vmcs_ci = cpudata->vmcs_ci;
    887 	cpudata->vmcs_ci = (void *)0x00FFFFFFFFFFFFFF; /* clobber */
    888 
    889 	kpreempt_disable();
    890 
    891 	if (vmcs_ci == NULL) {
    892 		/* This VMCS is loaded for the first time. */
    893 		vmx_vmclear(&cpudata->vmcs_pa);
    894 		cpudata->vmcs_launched = false;
    895 	} else if (vmcs_ci != curcpu()) {
    896 		/* This VMCS is active on a remote CPU. */
    897 		vmx_vmclear_remote(vmcs_ci, cpudata->vmcs_pa);
    898 		cpudata->vmcs_launched = false;
    899 	} else {
    900 		/* This VMCS is active on curcpu, nothing to do. */
    901 	}
    902 
    903 	vmx_vmptrld(&cpudata->vmcs_pa);
    904 }
    905 
    906 static void
    907 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
    908 {
    909 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    910 
    911 	KASSERT(kpreempt_disabled());
    912 #ifdef DIAGNOSTIC
    913 	KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
    914 #endif
    915 	KASSERT(cpudata->vmcs_refcnt > 0);
    916 	cpudata->vmcs_refcnt--;
    917 
    918 	if (cpudata->vmcs_refcnt > 0) {
    919 		return;
    920 	}
    921 
    922 	cpudata->vmcs_ci = curcpu();
    923 	kpreempt_enable();
    924 }
    925 
    926 static void
    927 vmx_vmcs_destroy(struct nvmm_cpu *vcpu)
    928 {
    929 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    930 
    931 	KASSERT(kpreempt_disabled());
    932 #ifdef DIAGNOSTIC
    933 	KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
    934 #endif
    935 	KASSERT(cpudata->vmcs_refcnt == 1);
    936 	cpudata->vmcs_refcnt--;
    937 
    938 	vmx_vmclear(&cpudata->vmcs_pa);
    939 	kpreempt_enable();
    940 }
    941 
    942 /* -------------------------------------------------------------------------- */
    943 
    944 static void
    945 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
    946 {
    947 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    948 	uint64_t ctls1;
    949 
    950 	ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
    951 
    952 	if (nmi) {
    953 		// XXX INT_STATE_NMI?
    954 		ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING;
    955 		cpudata->nmi_window_exit = true;
    956 	} else {
    957 		ctls1 |= PROC_CTLS_INT_WINDOW_EXITING;
    958 		cpudata->int_window_exit = true;
    959 	}
    960 
    961 	vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
    962 }
    963 
    964 static void
    965 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
    966 {
    967 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    968 	uint64_t ctls1;
    969 
    970 	ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
    971 
    972 	if (nmi) {
    973 		ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
    974 		cpudata->nmi_window_exit = false;
    975 	} else {
    976 		ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING;
    977 		cpudata->int_window_exit = false;
    978 	}
    979 
    980 	vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
    981 }
    982 
    983 static inline int
    984 vmx_event_has_error(uint8_t vector)
    985 {
    986 	switch (vector) {
    987 	case 8:		/* #DF */
    988 	case 10:	/* #TS */
    989 	case 11:	/* #NP */
    990 	case 12:	/* #SS */
    991 	case 13:	/* #GP */
    992 	case 14:	/* #PF */
    993 	case 17:	/* #AC */
    994 	case 30:	/* #SX */
    995 		return 1;
    996 	default:
    997 		return 0;
    998 	}
    999 }
   1000 
   1001 static int
   1002 vmx_vcpu_inject(struct nvmm_cpu *vcpu)
   1003 {
   1004 	struct nvmm_comm_page *comm = vcpu->comm;
   1005 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1006 	int type = 0, err = 0, ret = EINVAL;
   1007 	u_int evtype;
   1008 	uint8_t vector;
   1009 	uint64_t info, error;
   1010 
   1011 	evtype = comm->event.type;
   1012 	vector = comm->event.vector;
   1013 	error = comm->event.u.excp.error;
   1014 	__insn_barrier();
   1015 
   1016 	vmx_vmcs_enter(vcpu);
   1017 
   1018 	switch (evtype) {
   1019 	case NVMM_VCPU_EVENT_EXCP:
   1020 		if (vector == 2 || vector >= 32)
   1021 			goto out;
   1022 		if (vector == 3 || vector == 0)
   1023 			goto out;
   1024 		type = INTR_TYPE_HW_EXC;
   1025 		err = vmx_event_has_error(vector);
   1026 		break;
   1027 	case NVMM_VCPU_EVENT_INTR:
   1028 		type = INTR_TYPE_EXT_INT;
   1029 		if (vector == 2) {
   1030 			type = INTR_TYPE_NMI;
   1031 			vmx_event_waitexit_enable(vcpu, true);
   1032 		}
   1033 		err = 0;
   1034 		break;
   1035 	default:
   1036 		goto out;
   1037 	}
   1038 
   1039 	info =
   1040 	    __SHIFTIN((uint64_t)vector, INTR_INFO_VECTOR) |
   1041 	    __SHIFTIN((uint64_t)type, INTR_INFO_TYPE) |
   1042 	    __SHIFTIN((uint64_t)err, INTR_INFO_ERROR) |
   1043 	    __SHIFTIN((uint64_t)1, INTR_INFO_VALID);
   1044 	vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
   1045 	vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, error);
   1046 
   1047 	cpudata->evt_pending = true;
   1048 	ret = 0;
   1049 
   1050 out:
   1051 	vmx_vmcs_leave(vcpu);
   1052 	return ret;
   1053 }
   1054 
   1055 static void
   1056 vmx_inject_ud(struct nvmm_cpu *vcpu)
   1057 {
   1058 	struct nvmm_comm_page *comm = vcpu->comm;
   1059 	int ret __diagused;
   1060 
   1061 	comm->event.type = NVMM_VCPU_EVENT_EXCP;
   1062 	comm->event.vector = 6;
   1063 	comm->event.u.excp.error = 0;
   1064 
   1065 	ret = vmx_vcpu_inject(vcpu);
   1066 	KASSERT(ret == 0);
   1067 }
   1068 
   1069 static void
   1070 vmx_inject_gp(struct nvmm_cpu *vcpu)
   1071 {
   1072 	struct nvmm_comm_page *comm = vcpu->comm;
   1073 	int ret __diagused;
   1074 
   1075 	comm->event.type = NVMM_VCPU_EVENT_EXCP;
   1076 	comm->event.vector = 13;
   1077 	comm->event.u.excp.error = 0;
   1078 
   1079 	ret = vmx_vcpu_inject(vcpu);
   1080 	KASSERT(ret == 0);
   1081 }
   1082 
   1083 static inline int
   1084 vmx_vcpu_event_commit(struct nvmm_cpu *vcpu)
   1085 {
   1086 	if (__predict_true(!vcpu->comm->event_commit)) {
   1087 		return 0;
   1088 	}
   1089 	vcpu->comm->event_commit = false;
   1090 	return vmx_vcpu_inject(vcpu);
   1091 }
   1092 
   1093 static inline void
   1094 vmx_inkernel_advance(void)
   1095 {
   1096 	uint64_t rip, inslen, intstate;
   1097 
   1098 	/*
   1099 	 * Maybe we should also apply single-stepping and debug exceptions.
   1100 	 * Matters for guest-ring3, because it can execute 'cpuid' under a
   1101 	 * debugger.
   1102 	 */
   1103 	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
   1104 	rip = vmx_vmread(VMCS_GUEST_RIP);
   1105 	vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
   1106 	intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
   1107 	vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
   1108 	    intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
   1109 }
   1110 
   1111 static void
   1112 vmx_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
   1113 {
   1114 	exit->u.inv.hwcode = code;
   1115 	exit->reason = NVMM_VCPU_EXIT_INVALID;
   1116 }
   1117 
   1118 static void
   1119 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1120     struct nvmm_vcpu_exit *exit)
   1121 {
   1122 	uint64_t qual;
   1123 
   1124 	qual = vmx_vmread(VMCS_EXIT_INTR_INFO);
   1125 
   1126 	if ((qual & INTR_INFO_VALID) == 0) {
   1127 		goto error;
   1128 	}
   1129 	if (__SHIFTOUT(qual, INTR_INFO_TYPE) != INTR_TYPE_NMI) {
   1130 		goto error;
   1131 	}
   1132 
   1133 	exit->reason = NVMM_VCPU_EXIT_NONE;
   1134 	return;
   1135 
   1136 error:
   1137 	vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI);
   1138 }
   1139 
   1140 static void
   1141 vmx_inkernel_handle_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1142     uint64_t eax, uint64_t ecx)
   1143 {
   1144 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1145 	unsigned int ncpus;
   1146 	uint64_t cr4;
   1147 
   1148 	switch (eax) {
   1149 	case 0x00000001:
   1150 		cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax;
   1151 
   1152 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
   1153 		cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
   1154 		    CPUID_LOCAL_APIC_ID);
   1155 
   1156 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
   1157 		cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
   1158 		if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
   1159 			cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_PCID;
   1160 		}
   1161 
   1162 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
   1163 
   1164 		/* CPUID2_OSXSAVE depends on CR4. */
   1165 		cr4 = vmx_vmread(VMCS_GUEST_CR4);
   1166 		if (!(cr4 & CR4_OSXSAVE)) {
   1167 			cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
   1168 		}
   1169 		break;
   1170 	case 0x00000005:
   1171 	case 0x00000006:
   1172 		cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
   1173 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1174 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
   1175 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
   1176 		break;
   1177 	case 0x00000007:
   1178 		cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000007.eax;
   1179 		cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
   1180 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
   1181 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
   1182 		if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
   1183 			cpudata->gprs[NVMM_X64_GPR_RBX] |= CPUID_SEF_INVPCID;
   1184 		}
   1185 		break;
   1186 	case 0x0000000A:
   1187 		cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
   1188 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1189 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
   1190 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
   1191 		break;
   1192 	case 0x0000000B:
   1193 		switch (ecx) {
   1194 		case 0: /* Threads */
   1195 			cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
   1196 			cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1197 			cpudata->gprs[NVMM_X64_GPR_RCX] =
   1198 			    __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
   1199 			    __SHIFTIN(CPUID_TOP_LVLTYPE_SMT, CPUID_TOP_LVLTYPE);
   1200 			cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
   1201 			break;
   1202 		case 1: /* Cores */
   1203 			ncpus = atomic_load_relaxed(&mach->ncpus);
   1204 			cpudata->gprs[NVMM_X64_GPR_RAX] = ilog2(ncpus);
   1205 			cpudata->gprs[NVMM_X64_GPR_RBX] = ncpus;
   1206 			cpudata->gprs[NVMM_X64_GPR_RCX] =
   1207 			    __SHIFTIN(ecx, CPUID_TOP_LVLNUM) |
   1208 			    __SHIFTIN(CPUID_TOP_LVLTYPE_CORE, CPUID_TOP_LVLTYPE);
   1209 			cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
   1210 			break;
   1211 		default:
   1212 			cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
   1213 			cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1214 			cpudata->gprs[NVMM_X64_GPR_RCX] = 0; /* LVLTYPE_INVAL */
   1215 			cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
   1216 			break;
   1217 		}
   1218 		break;
   1219 	case 0x0000000D:
   1220 		if (vmx_xcr0_mask == 0) {
   1221 			break;
   1222 		}
   1223 		switch (ecx) {
   1224 		case 0:
   1225 			cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF;
   1226 			if (cpudata->gxcr0 & XCR0_SSE) {
   1227 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
   1228 			} else {
   1229 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
   1230 			}
   1231 			cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
   1232 			cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64;
   1233 			cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32;
   1234 			break;
   1235 		case 1:
   1236 			cpudata->gprs[NVMM_X64_GPR_RAX] &=
   1237 			    (CPUID_PES1_XSAVEOPT | CPUID_PES1_XSAVEC |
   1238 			     CPUID_PES1_XGETBV);
   1239 			cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1240 			cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
   1241 			cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
   1242 			break;
   1243 		default:
   1244 			cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
   1245 			cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1246 			cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
   1247 			cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
   1248 			break;
   1249 		}
   1250 		break;
   1251 	case 0x40000000:
   1252 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1253 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
   1254 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
   1255 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
   1256 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
   1257 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
   1258 		break;
   1259 	case 0x80000001:
   1260 		cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax;
   1261 		cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
   1262 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
   1263 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
   1264 		break;
   1265 	default:
   1266 		break;
   1267 	}
   1268 }
   1269 
   1270 static void
   1271 vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason)
   1272 {
   1273 	uint64_t inslen, rip;
   1274 
   1275 	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
   1276 	rip = vmx_vmread(VMCS_GUEST_RIP);
   1277 	exit->u.insn.npc = rip + inslen;
   1278 	exit->reason = reason;
   1279 }
   1280 
   1281 static void
   1282 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1283     struct nvmm_vcpu_exit *exit)
   1284 {
   1285 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1286 	struct nvmm_vcpu_conf_cpuid *cpuid;
   1287 	uint64_t eax, ecx;
   1288 	u_int descs[4];
   1289 	size_t i;
   1290 
   1291 	eax = cpudata->gprs[NVMM_X64_GPR_RAX];
   1292 	ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
   1293 	x86_cpuid2(eax, ecx, descs);
   1294 
   1295 	cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0];
   1296 	cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
   1297 	cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
   1298 	cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
   1299 
   1300 	vmx_inkernel_handle_cpuid(mach, vcpu, eax, ecx);
   1301 
   1302 	for (i = 0; i < VMX_NCPUIDS; i++) {
   1303 		if (!cpudata->cpuidpresent[i]) {
   1304 			continue;
   1305 		}
   1306 		cpuid = &cpudata->cpuid[i];
   1307 		if (cpuid->leaf != eax) {
   1308 			continue;
   1309 		}
   1310 
   1311 		if (cpuid->exit) {
   1312 			vmx_exit_insn(exit, NVMM_VCPU_EXIT_CPUID);
   1313 			return;
   1314 		}
   1315 		KASSERT(cpuid->mask);
   1316 
   1317 		/* del */
   1318 		cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->u.mask.del.eax;
   1319 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
   1320 		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
   1321 		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
   1322 
   1323 		/* set */
   1324 		cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->u.mask.set.eax;
   1325 		cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
   1326 		cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
   1327 		cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
   1328 
   1329 		break;
   1330 	}
   1331 
   1332 	vmx_inkernel_advance();
   1333 	exit->reason = NVMM_VCPU_EXIT_NONE;
   1334 }
   1335 
   1336 static void
   1337 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1338     struct nvmm_vcpu_exit *exit)
   1339 {
   1340 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1341 	uint64_t rflags;
   1342 
   1343 	if (cpudata->int_window_exit) {
   1344 		rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
   1345 		if (rflags & PSL_I) {
   1346 			vmx_event_waitexit_disable(vcpu, false);
   1347 		}
   1348 	}
   1349 
   1350 	vmx_inkernel_advance();
   1351 	exit->reason = NVMM_VCPU_EXIT_HALTED;
   1352 }
   1353 
   1354 #define VMX_QUAL_CR_NUM		__BITS(3,0)
   1355 #define VMX_QUAL_CR_TYPE	__BITS(5,4)
   1356 #define		CR_TYPE_WRITE	0
   1357 #define		CR_TYPE_READ	1
   1358 #define		CR_TYPE_CLTS	2
   1359 #define		CR_TYPE_LMSW	3
   1360 #define VMX_QUAL_CR_LMSW_OPMEM	__BIT(6)
   1361 #define VMX_QUAL_CR_GPR		__BITS(11,8)
   1362 #define VMX_QUAL_CR_LMSW_SRC	__BIT(31,16)
   1363 
   1364 static inline int
   1365 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1)
   1366 {
   1367 	/* Bits set to 1 in fixed0 are fixed to 1. */
   1368 	if ((crval & fixed0) != fixed0) {
   1369 		return -1;
   1370 	}
   1371 	/* Bits set to 0 in fixed1 are fixed to 0. */
   1372 	if (crval & ~fixed1) {
   1373 		return -1;
   1374 	}
   1375 	return 0;
   1376 }
   1377 
   1378 static int
   1379 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1380     uint64_t qual)
   1381 {
   1382 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1383 	uint64_t type, gpr, cr0;
   1384 	uint64_t efer, ctls1;
   1385 
   1386 	type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
   1387 	if (type != CR_TYPE_WRITE) {
   1388 		return -1;
   1389 	}
   1390 
   1391 	gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
   1392 	KASSERT(gpr < 16);
   1393 
   1394 	if (gpr == NVMM_X64_GPR_RSP) {
   1395 		gpr = vmx_vmread(VMCS_GUEST_RSP);
   1396 	} else {
   1397 		gpr = cpudata->gprs[gpr];
   1398 	}
   1399 
   1400 	cr0 = gpr | CR0_NE | CR0_ET;
   1401 	cr0 &= ~(CR0_NW|CR0_CD);
   1402 
   1403 	if (vmx_check_cr(cr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) {
   1404 		return -1;
   1405 	}
   1406 
   1407 	/*
   1408 	 * XXX Handle 32bit PAE paging, need to set PDPTEs, fetched manually
   1409 	 * from CR3.
   1410 	 */
   1411 
   1412 	if (cr0 & CR0_PG) {
   1413 		ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
   1414 		efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
   1415 		if (efer & EFER_LME) {
   1416 			ctls1 |= ENTRY_CTLS_LONG_MODE;
   1417 			efer |= EFER_LMA;
   1418 		} else {
   1419 			ctls1 &= ~ENTRY_CTLS_LONG_MODE;
   1420 			efer &= ~EFER_LMA;
   1421 		}
   1422 		vmx_vmwrite(VMCS_GUEST_IA32_EFER, efer);
   1423 		vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
   1424 	}
   1425 
   1426 	vmx_vmwrite(VMCS_GUEST_CR0, cr0);
   1427 	vmx_inkernel_advance();
   1428 	return 0;
   1429 }
   1430 
   1431 static int
   1432 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1433     uint64_t qual)
   1434 {
   1435 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1436 	uint64_t type, gpr, cr4;
   1437 
   1438 	type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
   1439 	if (type != CR_TYPE_WRITE) {
   1440 		return -1;
   1441 	}
   1442 
   1443 	gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
   1444 	KASSERT(gpr < 16);
   1445 
   1446 	if (gpr == NVMM_X64_GPR_RSP) {
   1447 		gpr = vmx_vmread(VMCS_GUEST_RSP);
   1448 	} else {
   1449 		gpr = cpudata->gprs[gpr];
   1450 	}
   1451 
   1452 	cr4 = gpr | CR4_VMXE;
   1453 
   1454 	if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) {
   1455 		return -1;
   1456 	}
   1457 
   1458 	vmx_vmwrite(VMCS_GUEST_CR4, cr4);
   1459 	vmx_inkernel_advance();
   1460 	return 0;
   1461 }
   1462 
   1463 static int
   1464 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1465     uint64_t qual, struct nvmm_vcpu_exit *exit)
   1466 {
   1467 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1468 	uint64_t type, gpr;
   1469 	bool write;
   1470 
   1471 	type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
   1472 	if (type == CR_TYPE_WRITE) {
   1473 		write = true;
   1474 	} else if (type == CR_TYPE_READ) {
   1475 		write = false;
   1476 	} else {
   1477 		return -1;
   1478 	}
   1479 
   1480 	gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
   1481 	KASSERT(gpr < 16);
   1482 
   1483 	if (write) {
   1484 		if (gpr == NVMM_X64_GPR_RSP) {
   1485 			cpudata->gcr8 = vmx_vmread(VMCS_GUEST_RSP);
   1486 		} else {
   1487 			cpudata->gcr8 = cpudata->gprs[gpr];
   1488 		}
   1489 		if (cpudata->tpr.exit_changed) {
   1490 			exit->reason = NVMM_VCPU_EXIT_TPR_CHANGED;
   1491 		}
   1492 	} else {
   1493 		if (gpr == NVMM_X64_GPR_RSP) {
   1494 			vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8);
   1495 		} else {
   1496 			cpudata->gprs[gpr] = cpudata->gcr8;
   1497 		}
   1498 	}
   1499 
   1500 	vmx_inkernel_advance();
   1501 	return 0;
   1502 }
   1503 
   1504 static void
   1505 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1506     struct nvmm_vcpu_exit *exit)
   1507 {
   1508 	uint64_t qual;
   1509 	int ret;
   1510 
   1511 	exit->reason = NVMM_VCPU_EXIT_NONE;
   1512 
   1513 	qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
   1514 
   1515 	switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) {
   1516 	case 0:
   1517 		ret = vmx_inkernel_handle_cr0(mach, vcpu, qual);
   1518 		break;
   1519 	case 4:
   1520 		ret = vmx_inkernel_handle_cr4(mach, vcpu, qual);
   1521 		break;
   1522 	case 8:
   1523 		ret = vmx_inkernel_handle_cr8(mach, vcpu, qual, exit);
   1524 		break;
   1525 	default:
   1526 		ret = -1;
   1527 		break;
   1528 	}
   1529 
   1530 	if (ret == -1) {
   1531 		vmx_inject_gp(vcpu);
   1532 	}
   1533 }
   1534 
   1535 #define VMX_QUAL_IO_SIZE	__BITS(2,0)
   1536 #define		IO_SIZE_8	0
   1537 #define		IO_SIZE_16	1
   1538 #define		IO_SIZE_32	3
   1539 #define VMX_QUAL_IO_IN		__BIT(3)
   1540 #define VMX_QUAL_IO_STR		__BIT(4)
   1541 #define VMX_QUAL_IO_REP		__BIT(5)
   1542 #define VMX_QUAL_IO_DX		__BIT(6)
   1543 #define VMX_QUAL_IO_PORT	__BITS(31,16)
   1544 
   1545 #define VMX_INFO_IO_ADRSIZE	__BITS(9,7)
   1546 #define		IO_ADRSIZE_16	0
   1547 #define		IO_ADRSIZE_32	1
   1548 #define		IO_ADRSIZE_64	2
   1549 #define VMX_INFO_IO_SEG		__BITS(17,15)
   1550 
   1551 static void
   1552 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1553     struct nvmm_vcpu_exit *exit)
   1554 {
   1555 	uint64_t qual, info, inslen, rip;
   1556 
   1557 	qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
   1558 	info = vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO);
   1559 
   1560 	exit->reason = NVMM_VCPU_EXIT_IO;
   1561 
   1562 	exit->u.io.in = (qual & VMX_QUAL_IO_IN) != 0;
   1563 	exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
   1564 
   1565 	KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
   1566 	exit->u.io.seg = __SHIFTOUT(info, VMX_INFO_IO_SEG);
   1567 
   1568 	if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) {
   1569 		exit->u.io.address_size = 8;
   1570 	} else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) {
   1571 		exit->u.io.address_size = 4;
   1572 	} else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) {
   1573 		exit->u.io.address_size = 2;
   1574 	}
   1575 
   1576 	if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) {
   1577 		exit->u.io.operand_size = 4;
   1578 	} else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) {
   1579 		exit->u.io.operand_size = 2;
   1580 	} else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) {
   1581 		exit->u.io.operand_size = 1;
   1582 	}
   1583 
   1584 	exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
   1585 	exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
   1586 
   1587 	if (exit->u.io.in && exit->u.io.str) {
   1588 		exit->u.io.seg = NVMM_X64_SEG_ES;
   1589 	}
   1590 
   1591 	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
   1592 	rip = vmx_vmread(VMCS_GUEST_RIP);
   1593 	exit->u.io.npc = rip + inslen;
   1594 
   1595 	vmx_vcpu_state_provide(vcpu,
   1596 	    NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
   1597 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
   1598 }
   1599 
   1600 static const uint64_t msr_ignore_list[] = {
   1601 	MSR_BIOS_SIGN,
   1602 	MSR_IA32_PLATFORM_ID
   1603 };
   1604 
   1605 static bool
   1606 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1607     struct nvmm_vcpu_exit *exit)
   1608 {
   1609 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1610 	uint64_t val;
   1611 	size_t i;
   1612 
   1613 	if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
   1614 		if (exit->u.rdmsr.msr == MSR_CR_PAT) {
   1615 			val = vmx_vmread(VMCS_GUEST_IA32_PAT);
   1616 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
   1617 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1618 			goto handled;
   1619 		}
   1620 		if (exit->u.rdmsr.msr == MSR_MISC_ENABLE) {
   1621 			val = cpudata->gmsr_misc_enable;
   1622 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
   1623 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1624 			goto handled;
   1625 		}
   1626 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
   1627 			if (msr_ignore_list[i] != exit->u.rdmsr.msr)
   1628 				continue;
   1629 			val = 0;
   1630 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
   1631 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1632 			goto handled;
   1633 		}
   1634 	} else {
   1635 		if (exit->u.wrmsr.msr == MSR_TSC) {
   1636 			cpudata->gtsc = exit->u.wrmsr.val;
   1637 			cpudata->gtsc_want_update = true;
   1638 			goto handled;
   1639 		}
   1640 		if (exit->u.wrmsr.msr == MSR_CR_PAT) {
   1641 			val = exit->u.wrmsr.val;
   1642 			if (__predict_false(!nvmm_x86_pat_validate(val))) {
   1643 				goto error;
   1644 			}
   1645 			vmx_vmwrite(VMCS_GUEST_IA32_PAT, val);
   1646 			goto handled;
   1647 		}
   1648 		if (exit->u.wrmsr.msr == MSR_MISC_ENABLE) {
   1649 			/* Don't care. */
   1650 			goto handled;
   1651 		}
   1652 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
   1653 			if (msr_ignore_list[i] != exit->u.wrmsr.msr)
   1654 				continue;
   1655 			goto handled;
   1656 		}
   1657 	}
   1658 
   1659 	return false;
   1660 
   1661 handled:
   1662 	vmx_inkernel_advance();
   1663 	return true;
   1664 
   1665 error:
   1666 	vmx_inject_gp(vcpu);
   1667 	return true;
   1668 }
   1669 
   1670 static void
   1671 vmx_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1672     struct nvmm_vcpu_exit *exit)
   1673 {
   1674 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1675 	uint64_t inslen, rip;
   1676 
   1677 	exit->reason = NVMM_VCPU_EXIT_RDMSR;
   1678 	exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
   1679 
   1680 	if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
   1681 		exit->reason = NVMM_VCPU_EXIT_NONE;
   1682 		return;
   1683 	}
   1684 
   1685 	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
   1686 	rip = vmx_vmread(VMCS_GUEST_RIP);
   1687 	exit->u.rdmsr.npc = rip + inslen;
   1688 
   1689 	vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
   1690 }
   1691 
   1692 static void
   1693 vmx_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1694     struct nvmm_vcpu_exit *exit)
   1695 {
   1696 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1697 	uint64_t rdx, rax, inslen, rip;
   1698 
   1699 	rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
   1700 	rax = cpudata->gprs[NVMM_X64_GPR_RAX];
   1701 
   1702 	exit->reason = NVMM_VCPU_EXIT_WRMSR;
   1703 	exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
   1704 	exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
   1705 
   1706 	if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
   1707 		exit->reason = NVMM_VCPU_EXIT_NONE;
   1708 		return;
   1709 	}
   1710 
   1711 	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
   1712 	rip = vmx_vmread(VMCS_GUEST_RIP);
   1713 	exit->u.wrmsr.npc = rip + inslen;
   1714 
   1715 	vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
   1716 }
   1717 
   1718 static void
   1719 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1720     struct nvmm_vcpu_exit *exit)
   1721 {
   1722 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1723 	uint64_t val;
   1724 
   1725 	exit->reason = NVMM_VCPU_EXIT_NONE;
   1726 
   1727 	val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
   1728 	    (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
   1729 
   1730 	if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
   1731 		goto error;
   1732 	} else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
   1733 		goto error;
   1734 	} else if (__predict_false((val & XCR0_X87) == 0)) {
   1735 		goto error;
   1736 	}
   1737 
   1738 	cpudata->gxcr0 = val;
   1739 	if (vmx_xcr0_mask != 0) {
   1740 		wrxcr(0, cpudata->gxcr0);
   1741 	}
   1742 
   1743 	vmx_inkernel_advance();
   1744 	return;
   1745 
   1746 error:
   1747 	vmx_inject_gp(vcpu);
   1748 }
   1749 
   1750 #define VMX_EPT_VIOLATION_READ		__BIT(0)
   1751 #define VMX_EPT_VIOLATION_WRITE		__BIT(1)
   1752 #define VMX_EPT_VIOLATION_EXECUTE	__BIT(2)
   1753 
   1754 static void
   1755 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1756     struct nvmm_vcpu_exit *exit)
   1757 {
   1758 	uint64_t perm;
   1759 	gpaddr_t gpa;
   1760 
   1761 	gpa = vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS);
   1762 
   1763 	exit->reason = NVMM_VCPU_EXIT_MEMORY;
   1764 	perm = vmx_vmread(VMCS_EXIT_QUALIFICATION);
   1765 	if (perm & VMX_EPT_VIOLATION_WRITE)
   1766 		exit->u.mem.prot = PROT_WRITE;
   1767 	else if (perm & VMX_EPT_VIOLATION_EXECUTE)
   1768 		exit->u.mem.prot = PROT_EXEC;
   1769 	else
   1770 		exit->u.mem.prot = PROT_READ;
   1771 	exit->u.mem.gpa = gpa;
   1772 	exit->u.mem.inst_len = 0;
   1773 
   1774 	vmx_vcpu_state_provide(vcpu,
   1775 	    NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
   1776 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
   1777 }
   1778 
   1779 /* -------------------------------------------------------------------------- */
   1780 
   1781 static void
   1782 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
   1783 {
   1784 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1785 
   1786 	fpu_save();
   1787 	fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask);
   1788 
   1789 	if (vmx_xcr0_mask != 0) {
   1790 		cpudata->hxcr0 = rdxcr(0);
   1791 		wrxcr(0, cpudata->gxcr0);
   1792 	}
   1793 }
   1794 
   1795 static void
   1796 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
   1797 {
   1798 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1799 
   1800 	if (vmx_xcr0_mask != 0) {
   1801 		cpudata->gxcr0 = rdxcr(0);
   1802 		wrxcr(0, cpudata->hxcr0);
   1803 	}
   1804 
   1805 	fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask);
   1806 }
   1807 
   1808 static void
   1809 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
   1810 {
   1811 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1812 
   1813 	x86_dbregs_save(curlwp);
   1814 
   1815 	ldr7(0);
   1816 
   1817 	ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
   1818 	ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
   1819 	ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
   1820 	ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
   1821 	ldr6(cpudata->drs[NVMM_X64_DR_DR6]);
   1822 }
   1823 
   1824 static void
   1825 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
   1826 {
   1827 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1828 
   1829 	cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
   1830 	cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
   1831 	cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
   1832 	cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
   1833 	cpudata->drs[NVMM_X64_DR_DR6] = rdr6();
   1834 
   1835 	x86_dbregs_restore(curlwp);
   1836 }
   1837 
   1838 static void
   1839 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
   1840 {
   1841 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1842 
   1843 	/* This gets restored automatically by the CPU. */
   1844 	vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE));
   1845 	vmx_vmwrite(VMCS_HOST_CR3, rcr3());
   1846 	vmx_vmwrite(VMCS_HOST_CR4, rcr4());
   1847 
   1848 	cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
   1849 }
   1850 
   1851 static void
   1852 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
   1853 {
   1854 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1855 
   1856 	wrmsr(MSR_STAR, cpudata->star);
   1857 	wrmsr(MSR_LSTAR, cpudata->lstar);
   1858 	wrmsr(MSR_CSTAR, cpudata->cstar);
   1859 	wrmsr(MSR_SFMASK, cpudata->sfmask);
   1860 	wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
   1861 }
   1862 
   1863 /* -------------------------------------------------------------------------- */
   1864 
   1865 #define VMX_INVVPID_ADDRESS		0
   1866 #define VMX_INVVPID_CONTEXT		1
   1867 #define VMX_INVVPID_ALL			2
   1868 #define VMX_INVVPID_CONTEXT_NOGLOBAL	3
   1869 
   1870 #define VMX_INVEPT_CONTEXT		1
   1871 #define VMX_INVEPT_ALL			2
   1872 
   1873 static inline void
   1874 vmx_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1875 {
   1876 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1877 
   1878 	if (vcpu->hcpu_last != hcpu) {
   1879 		cpudata->gtlb_want_flush = true;
   1880 	}
   1881 }
   1882 
   1883 static inline void
   1884 vmx_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1885 {
   1886 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1887 	struct ept_desc ept_desc;
   1888 
   1889 	if (__predict_true(!kcpuset_isset(cpudata->htlb_want_flush, hcpu))) {
   1890 		return;
   1891 	}
   1892 
   1893 	ept_desc.eptp = vmx_vmread(VMCS_EPTP);
   1894 	ept_desc.mbz = 0;
   1895 	vmx_invept(vmx_ept_flush_op, &ept_desc);
   1896 	kcpuset_clear(cpudata->htlb_want_flush, hcpu);
   1897 }
   1898 
   1899 static inline uint64_t
   1900 vmx_htlb_flush(struct vmx_machdata *machdata, struct vmx_cpudata *cpudata)
   1901 {
   1902 	struct ept_desc ept_desc;
   1903 	uint64_t machgen;
   1904 
   1905 	machgen = machdata->mach_htlb_gen;
   1906 	if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
   1907 		return machgen;
   1908 	}
   1909 
   1910 	kcpuset_copy(cpudata->htlb_want_flush, kcpuset_running);
   1911 
   1912 	ept_desc.eptp = vmx_vmread(VMCS_EPTP);
   1913 	ept_desc.mbz = 0;
   1914 	vmx_invept(vmx_ept_flush_op, &ept_desc);
   1915 
   1916 	return machgen;
   1917 }
   1918 
   1919 static inline void
   1920 vmx_htlb_flush_ack(struct vmx_cpudata *cpudata, uint64_t machgen)
   1921 {
   1922 	cpudata->vcpu_htlb_gen = machgen;
   1923 	kcpuset_clear(cpudata->htlb_want_flush, cpu_number());
   1924 }
   1925 
   1926 static inline void
   1927 vmx_exit_evt(struct vmx_cpudata *cpudata)
   1928 {
   1929 	uint64_t info, err, inslen;
   1930 
   1931 	cpudata->evt_pending = false;
   1932 
   1933 	info = vmx_vmread(VMCS_IDT_VECTORING_INFO);
   1934 	if (__predict_true((info & INTR_INFO_VALID) == 0)) {
   1935 		return;
   1936 	}
   1937 	err = vmx_vmread(VMCS_IDT_VECTORING_ERROR);
   1938 
   1939 	vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
   1940 	vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, err);
   1941 
   1942 	switch (__SHIFTOUT(info, INTR_INFO_TYPE)) {
   1943 	case INTR_TYPE_SW_INT:
   1944 	case INTR_TYPE_PRIV_SW_EXC:
   1945 	case INTR_TYPE_SW_EXC:
   1946 		inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
   1947 		vmx_vmwrite(VMCS_ENTRY_INSTRUCTION_LENGTH, inslen);
   1948 	}
   1949 
   1950 	cpudata->evt_pending = true;
   1951 }
   1952 
   1953 static int
   1954 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1955     struct nvmm_vcpu_exit *exit)
   1956 {
   1957 	struct nvmm_comm_page *comm = vcpu->comm;
   1958 	struct vmx_machdata *machdata = mach->machdata;
   1959 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1960 	struct vpid_desc vpid_desc;
   1961 	struct cpu_info *ci;
   1962 	uint64_t exitcode;
   1963 	uint64_t intstate;
   1964 	uint64_t machgen;
   1965 	int hcpu, s, ret;
   1966 	bool launched;
   1967 
   1968 	vmx_vmcs_enter(vcpu);
   1969 
   1970 	if (__predict_false(vmx_vcpu_event_commit(vcpu) != 0)) {
   1971 		vmx_vmcs_leave(vcpu);
   1972 		return EINVAL;
   1973 	}
   1974 	vmx_vcpu_state_commit(vcpu);
   1975 	comm->state_cached = 0;
   1976 
   1977 	ci = curcpu();
   1978 	hcpu = cpu_number();
   1979 	launched = cpudata->vmcs_launched;
   1980 
   1981 	vmx_gtlb_catchup(vcpu, hcpu);
   1982 	vmx_htlb_catchup(vcpu, hcpu);
   1983 
   1984 	if (vcpu->hcpu_last != hcpu) {
   1985 		vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel);
   1986 		vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss);
   1987 		vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt);
   1988 		vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
   1989 		cpudata->gtsc_want_update = true;
   1990 		vcpu->hcpu_last = hcpu;
   1991 	}
   1992 
   1993 	vmx_vcpu_guest_dbregs_enter(vcpu);
   1994 	vmx_vcpu_guest_misc_enter(vcpu);
   1995 	vmx_vcpu_guest_fpu_enter(vcpu);
   1996 
   1997 	while (1) {
   1998 		if (cpudata->gtlb_want_flush) {
   1999 			vpid_desc.vpid = cpudata->asid;
   2000 			vpid_desc.addr = 0;
   2001 			vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
   2002 			cpudata->gtlb_want_flush = false;
   2003 		}
   2004 
   2005 		if (__predict_false(cpudata->gtsc_want_update)) {
   2006 			vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc - rdtsc());
   2007 			cpudata->gtsc_want_update = false;
   2008 		}
   2009 
   2010 		s = splhigh();
   2011 		machgen = vmx_htlb_flush(machdata, cpudata);
   2012 		lcr2(cpudata->gcr2);
   2013 		if (launched) {
   2014 			ret = vmx_vmresume(cpudata->gprs);
   2015 		} else {
   2016 			ret = vmx_vmlaunch(cpudata->gprs);
   2017 		}
   2018 		cpudata->gcr2 = rcr2();
   2019 		vmx_htlb_flush_ack(cpudata, machgen);
   2020 		splx(s);
   2021 
   2022 		if (__predict_false(ret != 0)) {
   2023 			vmx_exit_invalid(exit, -1);
   2024 			break;
   2025 		}
   2026 		vmx_exit_evt(cpudata);
   2027 
   2028 		launched = true;
   2029 
   2030 		exitcode = vmx_vmread(VMCS_EXIT_REASON);
   2031 		exitcode &= __BITS(15,0);
   2032 
   2033 		switch (exitcode) {
   2034 		case VMCS_EXITCODE_EXC_NMI:
   2035 			vmx_exit_exc_nmi(mach, vcpu, exit);
   2036 			break;
   2037 		case VMCS_EXITCODE_EXT_INT:
   2038 			exit->reason = NVMM_VCPU_EXIT_NONE;
   2039 			break;
   2040 		case VMCS_EXITCODE_CPUID:
   2041 			vmx_exit_cpuid(mach, vcpu, exit);
   2042 			break;
   2043 		case VMCS_EXITCODE_HLT:
   2044 			vmx_exit_hlt(mach, vcpu, exit);
   2045 			break;
   2046 		case VMCS_EXITCODE_CR:
   2047 			vmx_exit_cr(mach, vcpu, exit);
   2048 			break;
   2049 		case VMCS_EXITCODE_IO:
   2050 			vmx_exit_io(mach, vcpu, exit);
   2051 			break;
   2052 		case VMCS_EXITCODE_RDMSR:
   2053 			vmx_exit_rdmsr(mach, vcpu, exit);
   2054 			break;
   2055 		case VMCS_EXITCODE_WRMSR:
   2056 			vmx_exit_wrmsr(mach, vcpu, exit);
   2057 			break;
   2058 		case VMCS_EXITCODE_SHUTDOWN:
   2059 			exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
   2060 			break;
   2061 		case VMCS_EXITCODE_MONITOR:
   2062 			vmx_exit_insn(exit, NVMM_VCPU_EXIT_MONITOR);
   2063 			break;
   2064 		case VMCS_EXITCODE_MWAIT:
   2065 			vmx_exit_insn(exit, NVMM_VCPU_EXIT_MWAIT);
   2066 			break;
   2067 		case VMCS_EXITCODE_XSETBV:
   2068 			vmx_exit_xsetbv(mach, vcpu, exit);
   2069 			break;
   2070 		case VMCS_EXITCODE_RDPMC:
   2071 		case VMCS_EXITCODE_RDTSCP:
   2072 		case VMCS_EXITCODE_INVVPID:
   2073 		case VMCS_EXITCODE_INVEPT:
   2074 		case VMCS_EXITCODE_VMCALL:
   2075 		case VMCS_EXITCODE_VMCLEAR:
   2076 		case VMCS_EXITCODE_VMLAUNCH:
   2077 		case VMCS_EXITCODE_VMPTRLD:
   2078 		case VMCS_EXITCODE_VMPTRST:
   2079 		case VMCS_EXITCODE_VMREAD:
   2080 		case VMCS_EXITCODE_VMRESUME:
   2081 		case VMCS_EXITCODE_VMWRITE:
   2082 		case VMCS_EXITCODE_VMXOFF:
   2083 		case VMCS_EXITCODE_VMXON:
   2084 			vmx_inject_ud(vcpu);
   2085 			exit->reason = NVMM_VCPU_EXIT_NONE;
   2086 			break;
   2087 		case VMCS_EXITCODE_EPT_VIOLATION:
   2088 			vmx_exit_epf(mach, vcpu, exit);
   2089 			break;
   2090 		case VMCS_EXITCODE_INT_WINDOW:
   2091 			vmx_event_waitexit_disable(vcpu, false);
   2092 			exit->reason = NVMM_VCPU_EXIT_INT_READY;
   2093 			break;
   2094 		case VMCS_EXITCODE_NMI_WINDOW:
   2095 			vmx_event_waitexit_disable(vcpu, true);
   2096 			exit->reason = NVMM_VCPU_EXIT_NMI_READY;
   2097 			break;
   2098 		default:
   2099 			vmx_exit_invalid(exit, exitcode);
   2100 			break;
   2101 		}
   2102 
   2103 		/* If no reason to return to userland, keep rolling. */
   2104 		if (preempt_needed()) {
   2105 			break;
   2106 		}
   2107 		if (curlwp->l_flag & LW_USERRET) {
   2108 			break;
   2109 		}
   2110 		if (exit->reason != NVMM_VCPU_EXIT_NONE) {
   2111 			break;
   2112 		}
   2113 	}
   2114 
   2115 	cpudata->vmcs_launched = launched;
   2116 
   2117 	cpudata->gtsc = vmx_vmread(VMCS_TSC_OFFSET) + rdtsc();
   2118 
   2119 	vmx_vcpu_guest_fpu_leave(vcpu);
   2120 	vmx_vcpu_guest_misc_leave(vcpu);
   2121 	vmx_vcpu_guest_dbregs_leave(vcpu);
   2122 
   2123 	exit->exitstate.rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
   2124 	exit->exitstate.cr8 = cpudata->gcr8;
   2125 	intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
   2126 	exit->exitstate.int_shadow =
   2127 	    (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
   2128 	exit->exitstate.int_window_exiting = cpudata->int_window_exit;
   2129 	exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit;
   2130 	exit->exitstate.evt_pending = cpudata->evt_pending;
   2131 
   2132 	vmx_vmcs_leave(vcpu);
   2133 
   2134 	return 0;
   2135 }
   2136 
   2137 /* -------------------------------------------------------------------------- */
   2138 
   2139 static int
   2140 vmx_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
   2141 {
   2142 	struct pglist pglist;
   2143 	paddr_t _pa;
   2144 	vaddr_t _va;
   2145 	size_t i;
   2146 	int ret;
   2147 
   2148 	ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
   2149 	    &pglist, 1, 0);
   2150 	if (ret != 0)
   2151 		return ENOMEM;
   2152 	_pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
   2153 	_va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
   2154 	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
   2155 	if (_va == 0)
   2156 		goto error;
   2157 
   2158 	for (i = 0; i < npages; i++) {
   2159 		pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
   2160 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
   2161 	}
   2162 	pmap_update(pmap_kernel());
   2163 
   2164 	memset((void *)_va, 0, npages * PAGE_SIZE);
   2165 
   2166 	*pa = _pa;
   2167 	*va = _va;
   2168 	return 0;
   2169 
   2170 error:
   2171 	for (i = 0; i < npages; i++) {
   2172 		uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
   2173 	}
   2174 	return ENOMEM;
   2175 }
   2176 
   2177 static void
   2178 vmx_memfree(paddr_t pa, vaddr_t va, size_t npages)
   2179 {
   2180 	size_t i;
   2181 
   2182 	pmap_kremove(va, npages * PAGE_SIZE);
   2183 	pmap_update(pmap_kernel());
   2184 	uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
   2185 	for (i = 0; i < npages; i++) {
   2186 		uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
   2187 	}
   2188 }
   2189 
   2190 /* -------------------------------------------------------------------------- */
   2191 
   2192 static void
   2193 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
   2194 {
   2195 	uint64_t byte;
   2196 	uint8_t bitoff;
   2197 
   2198 	if (msr < 0x00002000) {
   2199 		/* Range 1 */
   2200 		byte = ((msr - 0x00000000) / 8) + 0;
   2201 	} else if (msr >= 0xC0000000 && msr < 0xC0002000) {
   2202 		/* Range 2 */
   2203 		byte = ((msr - 0xC0000000) / 8) + 1024;
   2204 	} else {
   2205 		panic("%s: wrong range", __func__);
   2206 	}
   2207 
   2208 	bitoff = (msr & 0x7);
   2209 
   2210 	if (read) {
   2211 		bitmap[byte] &= ~__BIT(bitoff);
   2212 	}
   2213 	if (write) {
   2214 		bitmap[2048 + byte] &= ~__BIT(bitoff);
   2215 	}
   2216 }
   2217 
   2218 #define VMX_SEG_ATTRIB_TYPE		__BITS(3,0)
   2219 #define VMX_SEG_ATTRIB_S		__BIT(4)
   2220 #define VMX_SEG_ATTRIB_DPL		__BITS(6,5)
   2221 #define VMX_SEG_ATTRIB_P		__BIT(7)
   2222 #define VMX_SEG_ATTRIB_AVL		__BIT(12)
   2223 #define VMX_SEG_ATTRIB_L		__BIT(13)
   2224 #define VMX_SEG_ATTRIB_DEF		__BIT(14)
   2225 #define VMX_SEG_ATTRIB_G		__BIT(15)
   2226 #define VMX_SEG_ATTRIB_UNUSABLE		__BIT(16)
   2227 
   2228 static void
   2229 vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg *segs, int idx)
   2230 {
   2231 	uint64_t attrib;
   2232 
   2233 	attrib =
   2234 	    __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) |
   2235 	    __SHIFTIN(segs[idx].attrib.s, VMX_SEG_ATTRIB_S) |
   2236 	    __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) |
   2237 	    __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) |
   2238 	    __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) |
   2239 	    __SHIFTIN(segs[idx].attrib.l, VMX_SEG_ATTRIB_L) |
   2240 	    __SHIFTIN(segs[idx].attrib.def, VMX_SEG_ATTRIB_DEF) |
   2241 	    __SHIFTIN(segs[idx].attrib.g, VMX_SEG_ATTRIB_G) |
   2242 	    (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0);
   2243 
   2244 	if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
   2245 		vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector);
   2246 		vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib);
   2247 	}
   2248 	vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit);
   2249 	vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base);
   2250 }
   2251 
   2252 static void
   2253 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx)
   2254 {
   2255 	uint64_t selector = 0, attrib = 0, base, limit;
   2256 
   2257 	if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
   2258 		selector = vmx_vmread(vmx_guest_segs[idx].selector);
   2259 		attrib = vmx_vmread(vmx_guest_segs[idx].attrib);
   2260 	}
   2261 	limit = vmx_vmread(vmx_guest_segs[idx].limit);
   2262 	base = vmx_vmread(vmx_guest_segs[idx].base);
   2263 
   2264 	segs[idx].selector = selector;
   2265 	segs[idx].limit = limit;
   2266 	segs[idx].base = base;
   2267 	segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE);
   2268 	segs[idx].attrib.s = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_S);
   2269 	segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL);
   2270 	segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P);
   2271 	segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL);
   2272 	segs[idx].attrib.l = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_L);
   2273 	segs[idx].attrib.def = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF);
   2274 	segs[idx].attrib.g = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_G);
   2275 	if (attrib & VMX_SEG_ATTRIB_UNUSABLE) {
   2276 		segs[idx].attrib.p = 0;
   2277 	}
   2278 }
   2279 
   2280 static inline bool
   2281 vmx_state_tlb_flush(const struct nvmm_x64_state *state, uint64_t flags)
   2282 {
   2283 	uint64_t cr0, cr3, cr4, efer;
   2284 
   2285 	if (flags & NVMM_X64_STATE_CRS) {
   2286 		cr0 = vmx_vmread(VMCS_GUEST_CR0);
   2287 		if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
   2288 			return true;
   2289 		}
   2290 		cr3 = vmx_vmread(VMCS_GUEST_CR3);
   2291 		if (cr3 != state->crs[NVMM_X64_CR_CR3]) {
   2292 			return true;
   2293 		}
   2294 		cr4 = vmx_vmread(VMCS_GUEST_CR4);
   2295 		if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
   2296 			return true;
   2297 		}
   2298 	}
   2299 
   2300 	if (flags & NVMM_X64_STATE_MSRS) {
   2301 		efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
   2302 		if ((efer ^
   2303 		     state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
   2304 			return true;
   2305 		}
   2306 	}
   2307 
   2308 	return false;
   2309 }
   2310 
   2311 static void
   2312 vmx_vcpu_setstate(struct nvmm_cpu *vcpu)
   2313 {
   2314 	struct nvmm_comm_page *comm = vcpu->comm;
   2315 	const struct nvmm_x64_state *state = &comm->state;
   2316 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2317 	struct fxsave *fpustate;
   2318 	uint64_t ctls1, intstate;
   2319 	uint64_t flags;
   2320 
   2321 	flags = comm->state_wanted;
   2322 
   2323 	vmx_vmcs_enter(vcpu);
   2324 
   2325 	if (vmx_state_tlb_flush(state, flags)) {
   2326 		cpudata->gtlb_want_flush = true;
   2327 	}
   2328 
   2329 	if (flags & NVMM_X64_STATE_SEGS) {
   2330 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS);
   2331 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS);
   2332 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES);
   2333 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS);
   2334 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS);
   2335 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS);
   2336 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT);
   2337 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT);
   2338 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT);
   2339 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR);
   2340 	}
   2341 
   2342 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   2343 	if (flags & NVMM_X64_STATE_GPRS) {
   2344 		memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
   2345 
   2346 		vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]);
   2347 		vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]);
   2348 		vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]);
   2349 	}
   2350 
   2351 	if (flags & NVMM_X64_STATE_CRS) {
   2352 		/*
   2353 		 * CR0_NE and CR4_VMXE are mandatory.
   2354 		 */
   2355 		vmx_vmwrite(VMCS_GUEST_CR0,
   2356 		    state->crs[NVMM_X64_CR_CR0] | CR0_NE);
   2357 		cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2];
   2358 		vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]); // XXX PDPTE?
   2359 		vmx_vmwrite(VMCS_GUEST_CR4,
   2360 		    state->crs[NVMM_X64_CR_CR4] | CR4_VMXE);
   2361 		cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8];
   2362 
   2363 		if (vmx_xcr0_mask != 0) {
   2364 			/* Clear illegal XCR0 bits, set mandatory X87 bit. */
   2365 			cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
   2366 			cpudata->gxcr0 &= vmx_xcr0_mask;
   2367 			cpudata->gxcr0 |= XCR0_X87;
   2368 		}
   2369 	}
   2370 
   2371 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   2372 	if (flags & NVMM_X64_STATE_DRS) {
   2373 		memcpy(cpudata->drs, state->drs, sizeof(state->drs));
   2374 
   2375 		cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF;
   2376 		vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]);
   2377 	}
   2378 
   2379 	if (flags & NVMM_X64_STATE_MSRS) {
   2380 		cpudata->gmsr[VMX_MSRLIST_STAR].val =
   2381 		    state->msrs[NVMM_X64_MSR_STAR];
   2382 		cpudata->gmsr[VMX_MSRLIST_LSTAR].val =
   2383 		    state->msrs[NVMM_X64_MSR_LSTAR];
   2384 		cpudata->gmsr[VMX_MSRLIST_CSTAR].val =
   2385 		    state->msrs[NVMM_X64_MSR_CSTAR];
   2386 		cpudata->gmsr[VMX_MSRLIST_SFMASK].val =
   2387 		    state->msrs[NVMM_X64_MSR_SFMASK];
   2388 		cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val =
   2389 		    state->msrs[NVMM_X64_MSR_KERNELGSBASE];
   2390 
   2391 		vmx_vmwrite(VMCS_GUEST_IA32_EFER,
   2392 		    state->msrs[NVMM_X64_MSR_EFER]);
   2393 		vmx_vmwrite(VMCS_GUEST_IA32_PAT,
   2394 		    state->msrs[NVMM_X64_MSR_PAT]);
   2395 		vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS,
   2396 		    state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
   2397 		vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP,
   2398 		    state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
   2399 		vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP,
   2400 		    state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
   2401 
   2402 		cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC];
   2403 		cpudata->gtsc_want_update = true;
   2404 
   2405 		/* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */
   2406 		ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
   2407 		if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) {
   2408 			ctls1 |= ENTRY_CTLS_LONG_MODE;
   2409 		} else {
   2410 			ctls1 &= ~ENTRY_CTLS_LONG_MODE;
   2411 		}
   2412 		vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
   2413 	}
   2414 
   2415 	if (flags & NVMM_X64_STATE_INTR) {
   2416 		intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
   2417 		intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS);
   2418 		if (state->intr.int_shadow) {
   2419 			intstate |= INT_STATE_MOVSS;
   2420 		}
   2421 		vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate);
   2422 
   2423 		if (state->intr.int_window_exiting) {
   2424 			vmx_event_waitexit_enable(vcpu, false);
   2425 		} else {
   2426 			vmx_event_waitexit_disable(vcpu, false);
   2427 		}
   2428 
   2429 		if (state->intr.nmi_window_exiting) {
   2430 			vmx_event_waitexit_enable(vcpu, true);
   2431 		} else {
   2432 			vmx_event_waitexit_disable(vcpu, true);
   2433 		}
   2434 	}
   2435 
   2436 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   2437 	if (flags & NVMM_X64_STATE_FPU) {
   2438 		memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
   2439 		    sizeof(state->fpu));
   2440 
   2441 		fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
   2442 		fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
   2443 		fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
   2444 
   2445 		if (vmx_xcr0_mask != 0) {
   2446 			/* Reset XSTATE_BV, to force a reload. */
   2447 			cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
   2448 		}
   2449 	}
   2450 
   2451 	vmx_vmcs_leave(vcpu);
   2452 
   2453 	comm->state_wanted = 0;
   2454 	comm->state_cached |= flags;
   2455 }
   2456 
   2457 static void
   2458 vmx_vcpu_getstate(struct nvmm_cpu *vcpu)
   2459 {
   2460 	struct nvmm_comm_page *comm = vcpu->comm;
   2461 	struct nvmm_x64_state *state = &comm->state;
   2462 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2463 	uint64_t intstate, flags;
   2464 
   2465 	flags = comm->state_wanted;
   2466 
   2467 	vmx_vmcs_enter(vcpu);
   2468 
   2469 	if (flags & NVMM_X64_STATE_SEGS) {
   2470 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS);
   2471 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS);
   2472 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES);
   2473 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS);
   2474 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS);
   2475 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS);
   2476 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT);
   2477 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT);
   2478 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT);
   2479 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR);
   2480 	}
   2481 
   2482 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   2483 	if (flags & NVMM_X64_STATE_GPRS) {
   2484 		memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
   2485 
   2486 		state->gprs[NVMM_X64_GPR_RIP] = vmx_vmread(VMCS_GUEST_RIP);
   2487 		state->gprs[NVMM_X64_GPR_RSP] = vmx_vmread(VMCS_GUEST_RSP);
   2488 		state->gprs[NVMM_X64_GPR_RFLAGS] = vmx_vmread(VMCS_GUEST_RFLAGS);
   2489 	}
   2490 
   2491 	if (flags & NVMM_X64_STATE_CRS) {
   2492 		state->crs[NVMM_X64_CR_CR0] = vmx_vmread(VMCS_GUEST_CR0);
   2493 		state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2;
   2494 		state->crs[NVMM_X64_CR_CR3] = vmx_vmread(VMCS_GUEST_CR3);
   2495 		state->crs[NVMM_X64_CR_CR4] = vmx_vmread(VMCS_GUEST_CR4);
   2496 		state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8;
   2497 		state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
   2498 
   2499 		/* Hide VMXE. */
   2500 		state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE;
   2501 	}
   2502 
   2503 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   2504 	if (flags & NVMM_X64_STATE_DRS) {
   2505 		memcpy(state->drs, cpudata->drs, sizeof(state->drs));
   2506 
   2507 		state->drs[NVMM_X64_DR_DR7] = vmx_vmread(VMCS_GUEST_DR7);
   2508 	}
   2509 
   2510 	if (flags & NVMM_X64_STATE_MSRS) {
   2511 		state->msrs[NVMM_X64_MSR_STAR] =
   2512 		    cpudata->gmsr[VMX_MSRLIST_STAR].val;
   2513 		state->msrs[NVMM_X64_MSR_LSTAR] =
   2514 		    cpudata->gmsr[VMX_MSRLIST_LSTAR].val;
   2515 		state->msrs[NVMM_X64_MSR_CSTAR] =
   2516 		    cpudata->gmsr[VMX_MSRLIST_CSTAR].val;
   2517 		state->msrs[NVMM_X64_MSR_SFMASK] =
   2518 		    cpudata->gmsr[VMX_MSRLIST_SFMASK].val;
   2519 		state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
   2520 		    cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val;
   2521 		state->msrs[NVMM_X64_MSR_EFER] =
   2522 		    vmx_vmread(VMCS_GUEST_IA32_EFER);
   2523 		state->msrs[NVMM_X64_MSR_PAT] =
   2524 		    vmx_vmread(VMCS_GUEST_IA32_PAT);
   2525 		state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
   2526 		    vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS);
   2527 		state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
   2528 		    vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP);
   2529 		state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
   2530 		    vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP);
   2531 		state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc;
   2532 	}
   2533 
   2534 	if (flags & NVMM_X64_STATE_INTR) {
   2535 		intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
   2536 		state->intr.int_shadow =
   2537 		    (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
   2538 		state->intr.int_window_exiting = cpudata->int_window_exit;
   2539 		state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
   2540 		state->intr.evt_pending = cpudata->evt_pending;
   2541 	}
   2542 
   2543 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   2544 	if (flags & NVMM_X64_STATE_FPU) {
   2545 		memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
   2546 		    sizeof(state->fpu));
   2547 	}
   2548 
   2549 	vmx_vmcs_leave(vcpu);
   2550 
   2551 	comm->state_wanted = 0;
   2552 	comm->state_cached |= flags;
   2553 }
   2554 
   2555 static void
   2556 vmx_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
   2557 {
   2558 	vcpu->comm->state_wanted = flags;
   2559 	vmx_vcpu_getstate(vcpu);
   2560 }
   2561 
   2562 static void
   2563 vmx_vcpu_state_commit(struct nvmm_cpu *vcpu)
   2564 {
   2565 	vcpu->comm->state_wanted = vcpu->comm->state_commit;
   2566 	vcpu->comm->state_commit = 0;
   2567 	vmx_vcpu_setstate(vcpu);
   2568 }
   2569 
   2570 /* -------------------------------------------------------------------------- */
   2571 
   2572 static void
   2573 vmx_asid_alloc(struct nvmm_cpu *vcpu)
   2574 {
   2575 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2576 	size_t i, oct, bit;
   2577 
   2578 	mutex_enter(&vmx_asidlock);
   2579 
   2580 	for (i = 0; i < vmx_maxasid; i++) {
   2581 		oct = i / 8;
   2582 		bit = i % 8;
   2583 
   2584 		if (vmx_asidmap[oct] & __BIT(bit)) {
   2585 			continue;
   2586 		}
   2587 
   2588 		cpudata->asid = i;
   2589 
   2590 		vmx_asidmap[oct] |= __BIT(bit);
   2591 		vmx_vmwrite(VMCS_VPID, i);
   2592 		mutex_exit(&vmx_asidlock);
   2593 		return;
   2594 	}
   2595 
   2596 	mutex_exit(&vmx_asidlock);
   2597 
   2598 	panic("%s: impossible", __func__);
   2599 }
   2600 
   2601 static void
   2602 vmx_asid_free(struct nvmm_cpu *vcpu)
   2603 {
   2604 	size_t oct, bit;
   2605 	uint64_t asid;
   2606 
   2607 	asid = vmx_vmread(VMCS_VPID);
   2608 
   2609 	oct = asid / 8;
   2610 	bit = asid % 8;
   2611 
   2612 	mutex_enter(&vmx_asidlock);
   2613 	vmx_asidmap[oct] &= ~__BIT(bit);
   2614 	mutex_exit(&vmx_asidlock);
   2615 }
   2616 
   2617 static void
   2618 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2619 {
   2620 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2621 	struct vmcs *vmcs = cpudata->vmcs;
   2622 	struct msr_entry *gmsr = cpudata->gmsr;
   2623 	extern uint8_t vmx_resume_rip;
   2624 	uint64_t rev, eptp;
   2625 
   2626 	rev = vmx_get_revision();
   2627 
   2628 	memset(vmcs, 0, VMCS_SIZE);
   2629 	vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION);
   2630 	vmcs->abort = 0;
   2631 
   2632 	vmx_vmcs_enter(vcpu);
   2633 
   2634 	/* No link pointer. */
   2635 	vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF);
   2636 
   2637 	/* Install the CTLSs. */
   2638 	vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls);
   2639 	vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls);
   2640 	vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2);
   2641 	vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls);
   2642 	vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls);
   2643 
   2644 	/* Allow direct access to certain MSRs. */
   2645 	memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
   2646 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true);
   2647 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
   2648 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
   2649 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
   2650 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
   2651 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
   2652 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
   2653 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
   2654 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
   2655 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
   2656 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
   2657 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
   2658 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_IA32_ARCH_CAPABILITIES,
   2659 	    true, false);
   2660 	vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa);
   2661 
   2662 	/*
   2663 	 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This
   2664 	 * includes the L1D_FLUSH MSR, to mitigate L1TF.
   2665 	 */
   2666 	gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR;
   2667 	gmsr[VMX_MSRLIST_STAR].val = 0;
   2668 	gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR;
   2669 	gmsr[VMX_MSRLIST_LSTAR].val = 0;
   2670 	gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR;
   2671 	gmsr[VMX_MSRLIST_CSTAR].val = 0;
   2672 	gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK;
   2673 	gmsr[VMX_MSRLIST_SFMASK].val = 0;
   2674 	gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE;
   2675 	gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0;
   2676 	gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD;
   2677 	gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH;
   2678 	vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa);
   2679 	vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa);
   2680 	vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr);
   2681 	vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR);
   2682 
   2683 	/* Force CR0_NW and CR0_CD to zero, CR0_ET to one. */
   2684 	vmx_vmwrite(VMCS_CR0_MASK, CR0_NW|CR0_CD|CR0_ET);
   2685 	vmx_vmwrite(VMCS_CR0_SHADOW, CR0_ET);
   2686 
   2687 	/* Force CR4_VMXE to zero. */
   2688 	vmx_vmwrite(VMCS_CR4_MASK, CR4_VMXE);
   2689 
   2690 	/* Set the Host state for resuming. */
   2691 	vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)&vmx_resume_rip);
   2692 	vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL));
   2693 	vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
   2694 	vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
   2695 	vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
   2696 	vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0);
   2697 	vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0);
   2698 	vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0);
   2699 	vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0);
   2700 	vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0);
   2701 	vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)idt);
   2702 	vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT));
   2703 	vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER));
   2704 	vmx_vmwrite(VMCS_HOST_CR0, rcr0() & ~CR0_TS);
   2705 
   2706 	/* Generate ASID. */
   2707 	vmx_asid_alloc(vcpu);
   2708 
   2709 	/* Enable Extended Paging, 4-Level. */
   2710 	eptp =
   2711 	    __SHIFTIN(vmx_eptp_type, EPTP_TYPE) |
   2712 	    __SHIFTIN(4-1, EPTP_WALKLEN) |
   2713 	    (pmap_ept_has_ad ? EPTP_FLAGS_AD : 0) |
   2714 	    mach->vm->vm_map.pmap->pm_pdirpa[0];
   2715 	vmx_vmwrite(VMCS_EPTP, eptp);
   2716 
   2717 	/* Init IA32_MISC_ENABLE. */
   2718 	cpudata->gmsr_misc_enable = rdmsr(MSR_MISC_ENABLE);
   2719 	cpudata->gmsr_misc_enable &=
   2720 	    ~(IA32_MISC_PERFMON_EN|IA32_MISC_EISST_EN|IA32_MISC_MWAIT_EN);
   2721 	cpudata->gmsr_misc_enable |=
   2722 	    (IA32_MISC_BTS_UNAVAIL|IA32_MISC_PEBS_UNAVAIL);
   2723 
   2724 	/* Init XSAVE header. */
   2725 	cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
   2726 	cpudata->gfpu.xsh_xcomp_bv = 0;
   2727 
   2728 	/* These MSRs are static. */
   2729 	cpudata->star = rdmsr(MSR_STAR);
   2730 	cpudata->lstar = rdmsr(MSR_LSTAR);
   2731 	cpudata->cstar = rdmsr(MSR_CSTAR);
   2732 	cpudata->sfmask = rdmsr(MSR_SFMASK);
   2733 
   2734 	/* Install the RESET state. */
   2735 	memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
   2736 	    sizeof(nvmm_x86_reset_state));
   2737 	vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
   2738 	vcpu->comm->state_cached = 0;
   2739 	vmx_vcpu_setstate(vcpu);
   2740 
   2741 	vmx_vmcs_leave(vcpu);
   2742 }
   2743 
   2744 static int
   2745 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2746 {
   2747 	struct vmx_cpudata *cpudata;
   2748 	int error;
   2749 
   2750 	/* Allocate the VMX cpudata. */
   2751 	cpudata = (struct vmx_cpudata *)uvm_km_alloc(kernel_map,
   2752 	    roundup(sizeof(*cpudata), PAGE_SIZE), 0,
   2753 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
   2754 	vcpu->cpudata = cpudata;
   2755 
   2756 	/* VMCS */
   2757 	error = vmx_memalloc(&cpudata->vmcs_pa, (vaddr_t *)&cpudata->vmcs,
   2758 	    VMCS_NPAGES);
   2759 	if (error)
   2760 		goto error;
   2761 
   2762 	/* MSR Bitmap */
   2763 	error = vmx_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
   2764 	    MSRBM_NPAGES);
   2765 	if (error)
   2766 		goto error;
   2767 
   2768 	/* Guest MSR List */
   2769 	error = vmx_memalloc(&cpudata->gmsr_pa, (vaddr_t *)&cpudata->gmsr, 1);
   2770 	if (error)
   2771 		goto error;
   2772 
   2773 	kcpuset_create(&cpudata->htlb_want_flush, true);
   2774 
   2775 	/* Init the VCPU info. */
   2776 	vmx_vcpu_init(mach, vcpu);
   2777 
   2778 	return 0;
   2779 
   2780 error:
   2781 	if (cpudata->vmcs_pa) {
   2782 		vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
   2783 		    VMCS_NPAGES);
   2784 	}
   2785 	if (cpudata->msrbm_pa) {
   2786 		vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
   2787 		    MSRBM_NPAGES);
   2788 	}
   2789 	if (cpudata->gmsr_pa) {
   2790 		vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
   2791 	}
   2792 
   2793 	kmem_free(cpudata, sizeof(*cpudata));
   2794 	return error;
   2795 }
   2796 
   2797 static void
   2798 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2799 {
   2800 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2801 
   2802 	vmx_vmcs_enter(vcpu);
   2803 	vmx_asid_free(vcpu);
   2804 	vmx_vmcs_destroy(vcpu);
   2805 
   2806 	kcpuset_destroy(cpudata->htlb_want_flush);
   2807 
   2808 	vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, VMCS_NPAGES);
   2809 	vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
   2810 	vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
   2811 	uvm_km_free(kernel_map, (vaddr_t)cpudata,
   2812 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
   2813 }
   2814 
   2815 /* -------------------------------------------------------------------------- */
   2816 
   2817 static int
   2818 vmx_vcpu_configure_cpuid(struct vmx_cpudata *cpudata, void *data)
   2819 {
   2820 	struct nvmm_vcpu_conf_cpuid *cpuid = data;
   2821 	size_t i;
   2822 
   2823 	if (__predict_false(cpuid->mask && cpuid->exit)) {
   2824 		return EINVAL;
   2825 	}
   2826 	if (__predict_false(cpuid->mask &&
   2827 	    ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
   2828 	     (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
   2829 	     (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
   2830 	     (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
   2831 		return EINVAL;
   2832 	}
   2833 
   2834 	/* If unset, delete, to restore the default behavior. */
   2835 	if (!cpuid->mask && !cpuid->exit) {
   2836 		for (i = 0; i < VMX_NCPUIDS; i++) {
   2837 			if (!cpudata->cpuidpresent[i]) {
   2838 				continue;
   2839 			}
   2840 			if (cpudata->cpuid[i].leaf == cpuid->leaf) {
   2841 				cpudata->cpuidpresent[i] = false;
   2842 			}
   2843 		}
   2844 		return 0;
   2845 	}
   2846 
   2847 	/* If already here, replace. */
   2848 	for (i = 0; i < VMX_NCPUIDS; i++) {
   2849 		if (!cpudata->cpuidpresent[i]) {
   2850 			continue;
   2851 		}
   2852 		if (cpudata->cpuid[i].leaf == cpuid->leaf) {
   2853 			memcpy(&cpudata->cpuid[i], cpuid,
   2854 			    sizeof(struct nvmm_vcpu_conf_cpuid));
   2855 			return 0;
   2856 		}
   2857 	}
   2858 
   2859 	/* Not here, insert. */
   2860 	for (i = 0; i < VMX_NCPUIDS; i++) {
   2861 		if (!cpudata->cpuidpresent[i]) {
   2862 			cpudata->cpuidpresent[i] = true;
   2863 			memcpy(&cpudata->cpuid[i], cpuid,
   2864 			    sizeof(struct nvmm_vcpu_conf_cpuid));
   2865 			return 0;
   2866 		}
   2867 	}
   2868 
   2869 	return ENOBUFS;
   2870 }
   2871 
   2872 static int
   2873 vmx_vcpu_configure_tpr(struct vmx_cpudata *cpudata, void *data)
   2874 {
   2875 	struct nvmm_vcpu_conf_tpr *tpr = data;
   2876 
   2877 	memcpy(&cpudata->tpr, tpr, sizeof(*tpr));
   2878 	return 0;
   2879 }
   2880 
   2881 static int
   2882 vmx_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
   2883 {
   2884 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2885 
   2886 	switch (op) {
   2887 	case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID):
   2888 		return vmx_vcpu_configure_cpuid(cpudata, data);
   2889 	case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR):
   2890 		return vmx_vcpu_configure_tpr(cpudata, data);
   2891 	default:
   2892 		return EINVAL;
   2893 	}
   2894 }
   2895 
   2896 /* -------------------------------------------------------------------------- */
   2897 
   2898 static void
   2899 vmx_tlb_flush(struct pmap *pm)
   2900 {
   2901 	struct nvmm_machine *mach = pm->pm_data;
   2902 	struct vmx_machdata *machdata = mach->machdata;
   2903 
   2904 	atomic_inc_64(&machdata->mach_htlb_gen);
   2905 
   2906 	/* Generates IPIs, which cause #VMEXITs. */
   2907 	pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM);
   2908 }
   2909 
   2910 static void
   2911 vmx_machine_create(struct nvmm_machine *mach)
   2912 {
   2913 	struct pmap *pmap = mach->vm->vm_map.pmap;
   2914 	struct vmx_machdata *machdata;
   2915 
   2916 	/* Convert to EPT. */
   2917 	pmap_ept_transform(pmap);
   2918 
   2919 	/* Fill in pmap info. */
   2920 	pmap->pm_data = (void *)mach;
   2921 	pmap->pm_tlb_flush = vmx_tlb_flush;
   2922 
   2923 	machdata = kmem_zalloc(sizeof(struct vmx_machdata), KM_SLEEP);
   2924 	mach->machdata = machdata;
   2925 
   2926 	/* Start with an hTLB flush everywhere. */
   2927 	machdata->mach_htlb_gen = 1;
   2928 }
   2929 
   2930 static void
   2931 vmx_machine_destroy(struct nvmm_machine *mach)
   2932 {
   2933 	struct vmx_machdata *machdata = mach->machdata;
   2934 
   2935 	kmem_free(machdata, sizeof(struct vmx_machdata));
   2936 }
   2937 
   2938 static int
   2939 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
   2940 {
   2941 	panic("%s: impossible", __func__);
   2942 }
   2943 
   2944 /* -------------------------------------------------------------------------- */
   2945 
   2946 #define CTLS_ONE_ALLOWED(msrval, bitoff) \
   2947 	((msrval & __BIT(32 + bitoff)) != 0)
   2948 #define CTLS_ZERO_ALLOWED(msrval, bitoff) \
   2949 	((msrval & __BIT(bitoff)) == 0)
   2950 
   2951 static int
   2952 vmx_check_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls, uint64_t set_one)
   2953 {
   2954 	uint64_t basic, val, true_val;
   2955 	bool has_true;
   2956 	size_t i;
   2957 
   2958 	basic = rdmsr(MSR_IA32_VMX_BASIC);
   2959 	has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
   2960 
   2961 	val = rdmsr(msr_ctls);
   2962 	if (has_true) {
   2963 		true_val = rdmsr(msr_true_ctls);
   2964 	} else {
   2965 		true_val = val;
   2966 	}
   2967 
   2968 	for (i = 0; i < 32; i++) {
   2969 		if (!(set_one & __BIT(i))) {
   2970 			continue;
   2971 		}
   2972 		if (!CTLS_ONE_ALLOWED(true_val, i)) {
   2973 			return -1;
   2974 		}
   2975 	}
   2976 
   2977 	return 0;
   2978 }
   2979 
   2980 static int
   2981 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls,
   2982     uint64_t set_one, uint64_t set_zero, uint64_t *res)
   2983 {
   2984 	uint64_t basic, val, true_val;
   2985 	bool one_allowed, zero_allowed, has_true;
   2986 	size_t i;
   2987 
   2988 	basic = rdmsr(MSR_IA32_VMX_BASIC);
   2989 	has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
   2990 
   2991 	val = rdmsr(msr_ctls);
   2992 	if (has_true) {
   2993 		true_val = rdmsr(msr_true_ctls);
   2994 	} else {
   2995 		true_val = val;
   2996 	}
   2997 
   2998 	for (i = 0; i < 32; i++) {
   2999 		one_allowed = CTLS_ONE_ALLOWED(true_val, i);
   3000 		zero_allowed = CTLS_ZERO_ALLOWED(true_val, i);
   3001 
   3002 		if (zero_allowed && !one_allowed) {
   3003 			if (set_one & __BIT(i))
   3004 				return -1;
   3005 			*res &= ~__BIT(i);
   3006 		} else if (one_allowed && !zero_allowed) {
   3007 			if (set_zero & __BIT(i))
   3008 				return -1;
   3009 			*res |= __BIT(i);
   3010 		} else {
   3011 			if (set_zero & __BIT(i)) {
   3012 				*res &= ~__BIT(i);
   3013 			} else if (set_one & __BIT(i)) {
   3014 				*res |= __BIT(i);
   3015 			} else if (!has_true) {
   3016 				*res &= ~__BIT(i);
   3017 			} else if (CTLS_ZERO_ALLOWED(val, i)) {
   3018 				*res &= ~__BIT(i);
   3019 			} else if (CTLS_ONE_ALLOWED(val, i)) {
   3020 				*res |= __BIT(i);
   3021 			} else {
   3022 				return -1;
   3023 			}
   3024 		}
   3025 	}
   3026 
   3027 	return 0;
   3028 }
   3029 
   3030 static bool
   3031 vmx_ident(void)
   3032 {
   3033 	uint64_t msr;
   3034 	int ret;
   3035 
   3036 	if (!(cpu_feature[1] & CPUID2_VMX)) {
   3037 		return false;
   3038 	}
   3039 
   3040 	msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
   3041 	if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) {
   3042 		printf("NVMM: VMX disabled in BIOS\n");
   3043 		return false;
   3044 	}
   3045 	if ((msr & IA32_FEATURE_CONTROL_OUT_SMX) == 0) {
   3046 		printf("NVMM: VMX disabled in BIOS\n");
   3047 		return false;
   3048 	}
   3049 
   3050 	msr = rdmsr(MSR_IA32_VMX_BASIC);
   3051 	if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) {
   3052 		printf("NVMM: I/O reporting not supported\n");
   3053 		return false;
   3054 	}
   3055 	if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) {
   3056 		printf("NVMM: WB memory not supported\n");
   3057 		return false;
   3058 	}
   3059 
   3060 	/* PG and PE are reported, even if Unrestricted Guests is supported. */
   3061 	vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE);
   3062 	vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE);
   3063 	ret = vmx_check_cr(rcr0(), vmx_cr0_fixed0, vmx_cr0_fixed1);
   3064 	if (ret == -1) {
   3065 		printf("NVMM: CR0 requirements not satisfied\n");
   3066 		return false;
   3067 	}
   3068 
   3069 	vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
   3070 	vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
   3071 	ret = vmx_check_cr(rcr4() | CR4_VMXE, vmx_cr4_fixed0, vmx_cr4_fixed1);
   3072 	if (ret == -1) {
   3073 		printf("NVMM: CR4 requirements not satisfied\n");
   3074 		return false;
   3075 	}
   3076 
   3077 	/* Init the CTLSs right now, and check for errors. */
   3078 	ret = vmx_init_ctls(
   3079 	    MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
   3080 	    VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO,
   3081 	    &vmx_pinbased_ctls);
   3082 	if (ret == -1) {
   3083 		printf("NVMM: pin-based-ctls requirements not satisfied\n");
   3084 		return false;
   3085 	}
   3086 	ret = vmx_init_ctls(
   3087 	    MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
   3088 	    VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO,
   3089 	    &vmx_procbased_ctls);
   3090 	if (ret == -1) {
   3091 		printf("NVMM: proc-based-ctls requirements not satisfied\n");
   3092 		return false;
   3093 	}
   3094 	ret = vmx_init_ctls(
   3095 	    MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
   3096 	    VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO,
   3097 	    &vmx_procbased_ctls2);
   3098 	if (ret == -1) {
   3099 		printf("NVMM: proc-based-ctls2 requirements not satisfied\n");
   3100 		return false;
   3101 	}
   3102 	ret = vmx_check_ctls(
   3103 	    MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
   3104 	    PROC_CTLS2_INVPCID_ENABLE);
   3105 	if (ret != -1) {
   3106 		vmx_procbased_ctls2 |= PROC_CTLS2_INVPCID_ENABLE;
   3107 	}
   3108 	ret = vmx_init_ctls(
   3109 	    MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
   3110 	    VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO,
   3111 	    &vmx_entry_ctls);
   3112 	if (ret == -1) {
   3113 		printf("NVMM: entry-ctls requirements not satisfied\n");
   3114 		return false;
   3115 	}
   3116 	ret = vmx_init_ctls(
   3117 	    MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS,
   3118 	    VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO,
   3119 	    &vmx_exit_ctls);
   3120 	if (ret == -1) {
   3121 		printf("NVMM: exit-ctls requirements not satisfied\n");
   3122 		return false;
   3123 	}
   3124 
   3125 	msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
   3126 	if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) {
   3127 		printf("NVMM: 4-level page tree not supported\n");
   3128 		return false;
   3129 	}
   3130 	if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) {
   3131 		printf("NVMM: INVEPT not supported\n");
   3132 		return false;
   3133 	}
   3134 	if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) {
   3135 		printf("NVMM: INVVPID not supported\n");
   3136 		return false;
   3137 	}
   3138 	if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) != 0) {
   3139 		pmap_ept_has_ad = true;
   3140 	} else {
   3141 		pmap_ept_has_ad = false;
   3142 	}
   3143 	if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) {
   3144 		printf("NVMM: EPT UC/WB memory types not supported\n");
   3145 		return false;
   3146 	}
   3147 
   3148 	return true;
   3149 }
   3150 
   3151 static void
   3152 vmx_init_asid(uint32_t maxasid)
   3153 {
   3154 	size_t allocsz;
   3155 
   3156 	mutex_init(&vmx_asidlock, MUTEX_DEFAULT, IPL_NONE);
   3157 
   3158 	vmx_maxasid = maxasid;
   3159 	allocsz = roundup(maxasid, 8) / 8;
   3160 	vmx_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
   3161 
   3162 	/* ASID 0 is reserved for the host. */
   3163 	vmx_asidmap[0] |= __BIT(0);
   3164 }
   3165 
   3166 static void
   3167 vmx_change_cpu(void *arg1, void *arg2)
   3168 {
   3169 	struct cpu_info *ci = curcpu();
   3170 	bool enable = arg1 != NULL;
   3171 	uint64_t cr4;
   3172 
   3173 	if (!enable) {
   3174 		vmx_vmxoff();
   3175 	}
   3176 
   3177 	cr4 = rcr4();
   3178 	if (enable) {
   3179 		cr4 |= CR4_VMXE;
   3180 	} else {
   3181 		cr4 &= ~CR4_VMXE;
   3182 	}
   3183 	lcr4(cr4);
   3184 
   3185 	if (enable) {
   3186 		vmx_vmxon(&vmxoncpu[cpu_index(ci)].pa);
   3187 	}
   3188 }
   3189 
   3190 static void
   3191 vmx_init_l1tf(void)
   3192 {
   3193 	u_int descs[4];
   3194 	uint64_t msr;
   3195 
   3196 	if (cpuid_level < 7) {
   3197 		return;
   3198 	}
   3199 
   3200 	x86_cpuid(7, descs);
   3201 
   3202 	if (descs[3] & CPUID_SEF_ARCH_CAP) {
   3203 		msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
   3204 		if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) {
   3205 			/* No mitigation needed. */
   3206 			return;
   3207 		}
   3208 	}
   3209 
   3210 	if (descs[3] & CPUID_SEF_L1D_FLUSH) {
   3211 		/* Enable hardware mitigation. */
   3212 		vmx_msrlist_entry_nmsr += 1;
   3213 	}
   3214 }
   3215 
   3216 static void
   3217 vmx_init(void)
   3218 {
   3219 	CPU_INFO_ITERATOR cii;
   3220 	struct cpu_info *ci;
   3221 	uint64_t xc, msr;
   3222 	struct vmxon *vmxon;
   3223 	uint32_t revision;
   3224 	paddr_t pa;
   3225 	vaddr_t va;
   3226 	int error;
   3227 
   3228 	/* Init the ASID bitmap (VPID). */
   3229 	vmx_init_asid(VPID_MAX);
   3230 
   3231 	/* Init the XCR0 mask. */
   3232 	vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
   3233 
   3234 	/* Init the TLB flush op, the EPT flush op and the EPTP type. */
   3235 	msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
   3236 	if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
   3237 		vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
   3238 	} else {
   3239 		vmx_tlb_flush_op = VMX_INVVPID_ALL;
   3240 	}
   3241 	if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
   3242 		vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
   3243 	} else {
   3244 		vmx_ept_flush_op = VMX_INVEPT_ALL;
   3245 	}
   3246 	if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {
   3247 		vmx_eptp_type = EPTP_TYPE_WB;
   3248 	} else {
   3249 		vmx_eptp_type = EPTP_TYPE_UC;
   3250 	}
   3251 
   3252 	/* Init the L1TF mitigation. */
   3253 	vmx_init_l1tf();
   3254 
   3255 	memset(vmxoncpu, 0, sizeof(vmxoncpu));
   3256 	revision = vmx_get_revision();
   3257 
   3258 	for (CPU_INFO_FOREACH(cii, ci)) {
   3259 		error = vmx_memalloc(&pa, &va, 1);
   3260 		if (error) {
   3261 			panic("%s: out of memory", __func__);
   3262 		}
   3263 		vmxoncpu[cpu_index(ci)].pa = pa;
   3264 		vmxoncpu[cpu_index(ci)].va = va;
   3265 
   3266 		vmxon = (struct vmxon *)vmxoncpu[cpu_index(ci)].va;
   3267 		vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION);
   3268 	}
   3269 
   3270 	xc = xc_broadcast(0, vmx_change_cpu, (void *)true, NULL);
   3271 	xc_wait(xc);
   3272 }
   3273 
   3274 static void
   3275 vmx_fini_asid(void)
   3276 {
   3277 	size_t allocsz;
   3278 
   3279 	allocsz = roundup(vmx_maxasid, 8) / 8;
   3280 	kmem_free(vmx_asidmap, allocsz);
   3281 
   3282 	mutex_destroy(&vmx_asidlock);
   3283 }
   3284 
   3285 static void
   3286 vmx_fini(void)
   3287 {
   3288 	uint64_t xc;
   3289 	size_t i;
   3290 
   3291 	xc = xc_broadcast(0, vmx_change_cpu, (void *)false, NULL);
   3292 	xc_wait(xc);
   3293 
   3294 	for (i = 0; i < MAXCPUS; i++) {
   3295 		if (vmxoncpu[i].pa != 0)
   3296 			vmx_memfree(vmxoncpu[i].pa, vmxoncpu[i].va, 1);
   3297 	}
   3298 
   3299 	vmx_fini_asid();
   3300 }
   3301 
   3302 static void
   3303 vmx_capability(struct nvmm_capability *cap)
   3304 {
   3305 	cap->arch.mach_conf_support = 0;
   3306 	cap->arch.vcpu_conf_support =
   3307 	    NVMM_CAP_ARCH_VCPU_CONF_CPUID |
   3308 	    NVMM_CAP_ARCH_VCPU_CONF_TPR;
   3309 	cap->arch.xcr0_mask = vmx_xcr0_mask;
   3310 	cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
   3311 	cap->arch.conf_cpuid_maxops = VMX_NCPUIDS;
   3312 }
   3313 
   3314 const struct nvmm_impl nvmm_x86_vmx = {
   3315 	.ident = vmx_ident,
   3316 	.init = vmx_init,
   3317 	.fini = vmx_fini,
   3318 	.capability = vmx_capability,
   3319 	.mach_conf_max = NVMM_X86_MACH_NCONF,
   3320 	.mach_conf_sizes = NULL,
   3321 	.vcpu_conf_max = NVMM_X86_VCPU_NCONF,
   3322 	.vcpu_conf_sizes = vmx_vcpu_conf_sizes,
   3323 	.state_size = sizeof(struct nvmm_x64_state),
   3324 	.machine_create = vmx_machine_create,
   3325 	.machine_destroy = vmx_machine_destroy,
   3326 	.machine_configure = vmx_machine_configure,
   3327 	.vcpu_create = vmx_vcpu_create,
   3328 	.vcpu_destroy = vmx_vcpu_destroy,
   3329 	.vcpu_configure = vmx_vcpu_configure,
   3330 	.vcpu_setstate = vmx_vcpu_setstate,
   3331 	.vcpu_getstate = vmx_vcpu_getstate,
   3332 	.vcpu_inject = vmx_vcpu_inject,
   3333 	.vcpu_run = vmx_vcpu_run
   3334 };
   3335