Home | History | Annotate | Line # | Download | only in x86
nvmm_x86_vmx.c revision 1.37
      1 /*	$NetBSD: nvmm_x86_vmx.c,v 1.37 2019/09/13 14:19:13 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.37 2019/09/13 14:19:13 maxv Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 #include <sys/kmem.h>
     39 #include <sys/cpu.h>
     40 #include <sys/xcall.h>
     41 #include <sys/mman.h>
     42 
     43 #include <uvm/uvm.h>
     44 #include <uvm/uvm_page.h>
     45 
     46 #include <x86/cputypes.h>
     47 #include <x86/specialreg.h>
     48 #include <x86/pmap.h>
     49 #include <x86/dbregs.h>
     50 #include <x86/cpu_counter.h>
     51 #include <machine/cpuvar.h>
     52 
     53 #include <dev/nvmm/nvmm.h>
     54 #include <dev/nvmm/nvmm_internal.h>
     55 #include <dev/nvmm/x86/nvmm_x86.h>
     56 
     57 int _vmx_vmxon(paddr_t *pa);
     58 int _vmx_vmxoff(void);
     59 int vmx_vmlaunch(uint64_t *gprs);
     60 int vmx_vmresume(uint64_t *gprs);
     61 
     62 #define vmx_vmxon(a) \
     63 	if (__predict_false(_vmx_vmxon(a) != 0)) { \
     64 		panic("%s: VMXON failed", __func__); \
     65 	}
     66 #define vmx_vmxoff() \
     67 	if (__predict_false(_vmx_vmxoff() != 0)) { \
     68 		panic("%s: VMXOFF failed", __func__); \
     69 	}
     70 
     71 struct ept_desc {
     72 	uint64_t eptp;
     73 	uint64_t mbz;
     74 } __packed;
     75 
     76 struct vpid_desc {
     77 	uint64_t vpid;
     78 	uint64_t addr;
     79 } __packed;
     80 
     81 static inline void
     82 vmx_invept(uint64_t op, struct ept_desc *desc)
     83 {
     84 	asm volatile (
     85 		"invept		%[desc],%[op];"
     86 		"jz		vmx_insn_failvalid;"
     87 		"jc		vmx_insn_failinvalid;"
     88 		:
     89 		: [desc] "m" (*desc), [op] "r" (op)
     90 		: "memory", "cc"
     91 	);
     92 }
     93 
     94 static inline void
     95 vmx_invvpid(uint64_t op, struct vpid_desc *desc)
     96 {
     97 	asm volatile (
     98 		"invvpid	%[desc],%[op];"
     99 		"jz		vmx_insn_failvalid;"
    100 		"jc		vmx_insn_failinvalid;"
    101 		:
    102 		: [desc] "m" (*desc), [op] "r" (op)
    103 		: "memory", "cc"
    104 	);
    105 }
    106 
    107 static inline uint64_t
    108 vmx_vmread(uint64_t field)
    109 {
    110 	uint64_t value;
    111 
    112 	asm volatile (
    113 		"vmread		%[field],%[value];"
    114 		"jz		vmx_insn_failvalid;"
    115 		"jc		vmx_insn_failinvalid;"
    116 		: [value] "=r" (value)
    117 		: [field] "r" (field)
    118 		: "cc"
    119 	);
    120 
    121 	return value;
    122 }
    123 
    124 static inline void
    125 vmx_vmwrite(uint64_t field, uint64_t value)
    126 {
    127 	asm volatile (
    128 		"vmwrite	%[value],%[field];"
    129 		"jz		vmx_insn_failvalid;"
    130 		"jc		vmx_insn_failinvalid;"
    131 		:
    132 		: [field] "r" (field), [value] "r" (value)
    133 		: "cc"
    134 	);
    135 }
    136 
    137 static inline paddr_t
    138 vmx_vmptrst(void)
    139 {
    140 	paddr_t pa;
    141 
    142 	asm volatile (
    143 		"vmptrst	%[pa];"
    144 		:
    145 		: [pa] "m" (*(paddr_t *)&pa)
    146 		: "memory"
    147 	);
    148 
    149 	return pa;
    150 }
    151 
    152 static inline void
    153 vmx_vmptrld(paddr_t *pa)
    154 {
    155 	asm volatile (
    156 		"vmptrld	%[pa];"
    157 		"jz		vmx_insn_failvalid;"
    158 		"jc		vmx_insn_failinvalid;"
    159 		:
    160 		: [pa] "m" (*pa)
    161 		: "memory", "cc"
    162 	);
    163 }
    164 
    165 static inline void
    166 vmx_vmclear(paddr_t *pa)
    167 {
    168 	asm volatile (
    169 		"vmclear	%[pa];"
    170 		"jz		vmx_insn_failvalid;"
    171 		"jc		vmx_insn_failinvalid;"
    172 		:
    173 		: [pa] "m" (*pa)
    174 		: "memory", "cc"
    175 	);
    176 }
    177 
    178 #define MSR_IA32_FEATURE_CONTROL	0x003A
    179 #define		IA32_FEATURE_CONTROL_LOCK	__BIT(0)
    180 #define		IA32_FEATURE_CONTROL_IN_SMX	__BIT(1)
    181 #define		IA32_FEATURE_CONTROL_OUT_SMX	__BIT(2)
    182 
    183 #define MSR_IA32_VMX_BASIC		0x0480
    184 #define		IA32_VMX_BASIC_IDENT		__BITS(30,0)
    185 #define		IA32_VMX_BASIC_DATA_SIZE	__BITS(44,32)
    186 #define		IA32_VMX_BASIC_MEM_WIDTH	__BIT(48)
    187 #define		IA32_VMX_BASIC_DUAL		__BIT(49)
    188 #define		IA32_VMX_BASIC_MEM_TYPE		__BITS(53,50)
    189 #define			MEM_TYPE_UC		0
    190 #define			MEM_TYPE_WB		6
    191 #define		IA32_VMX_BASIC_IO_REPORT	__BIT(54)
    192 #define		IA32_VMX_BASIC_TRUE_CTLS	__BIT(55)
    193 
    194 #define MSR_IA32_VMX_PINBASED_CTLS		0x0481
    195 #define MSR_IA32_VMX_PROCBASED_CTLS		0x0482
    196 #define MSR_IA32_VMX_EXIT_CTLS			0x0483
    197 #define MSR_IA32_VMX_ENTRY_CTLS			0x0484
    198 #define MSR_IA32_VMX_PROCBASED_CTLS2		0x048B
    199 
    200 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS		0x048D
    201 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS	0x048E
    202 #define MSR_IA32_VMX_TRUE_EXIT_CTLS		0x048F
    203 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS		0x0490
    204 
    205 #define MSR_IA32_VMX_CR0_FIXED0			0x0486
    206 #define MSR_IA32_VMX_CR0_FIXED1			0x0487
    207 #define MSR_IA32_VMX_CR4_FIXED0			0x0488
    208 #define MSR_IA32_VMX_CR4_FIXED1			0x0489
    209 
    210 #define MSR_IA32_VMX_EPT_VPID_CAP	0x048C
    211 #define		IA32_VMX_EPT_VPID_WALKLENGTH_4		__BIT(6)
    212 #define		IA32_VMX_EPT_VPID_UC			__BIT(8)
    213 #define		IA32_VMX_EPT_VPID_WB			__BIT(14)
    214 #define		IA32_VMX_EPT_VPID_INVEPT		__BIT(20)
    215 #define		IA32_VMX_EPT_VPID_FLAGS_AD		__BIT(21)
    216 #define		IA32_VMX_EPT_VPID_INVEPT_CONTEXT	__BIT(25)
    217 #define		IA32_VMX_EPT_VPID_INVEPT_ALL		__BIT(26)
    218 #define		IA32_VMX_EPT_VPID_INVVPID		__BIT(32)
    219 #define		IA32_VMX_EPT_VPID_INVVPID_ADDR		__BIT(40)
    220 #define		IA32_VMX_EPT_VPID_INVVPID_CONTEXT	__BIT(41)
    221 #define		IA32_VMX_EPT_VPID_INVVPID_ALL		__BIT(42)
    222 #define		IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG	__BIT(43)
    223 
    224 /* -------------------------------------------------------------------------- */
    225 
    226 /* 16-bit control fields */
    227 #define VMCS_VPID				0x00000000
    228 #define VMCS_PIR_VECTOR				0x00000002
    229 #define VMCS_EPTP_INDEX				0x00000004
    230 /* 16-bit guest-state fields */
    231 #define VMCS_GUEST_ES_SELECTOR			0x00000800
    232 #define VMCS_GUEST_CS_SELECTOR			0x00000802
    233 #define VMCS_GUEST_SS_SELECTOR			0x00000804
    234 #define VMCS_GUEST_DS_SELECTOR			0x00000806
    235 #define VMCS_GUEST_FS_SELECTOR			0x00000808
    236 #define VMCS_GUEST_GS_SELECTOR			0x0000080A
    237 #define VMCS_GUEST_LDTR_SELECTOR		0x0000080C
    238 #define VMCS_GUEST_TR_SELECTOR			0x0000080E
    239 #define VMCS_GUEST_INTR_STATUS			0x00000810
    240 #define VMCS_PML_INDEX				0x00000812
    241 /* 16-bit host-state fields */
    242 #define VMCS_HOST_ES_SELECTOR			0x00000C00
    243 #define VMCS_HOST_CS_SELECTOR			0x00000C02
    244 #define VMCS_HOST_SS_SELECTOR			0x00000C04
    245 #define VMCS_HOST_DS_SELECTOR			0x00000C06
    246 #define VMCS_HOST_FS_SELECTOR			0x00000C08
    247 #define VMCS_HOST_GS_SELECTOR			0x00000C0A
    248 #define VMCS_HOST_TR_SELECTOR			0x00000C0C
    249 /* 64-bit control fields */
    250 #define VMCS_IO_BITMAP_A			0x00002000
    251 #define VMCS_IO_BITMAP_B			0x00002002
    252 #define VMCS_MSR_BITMAP				0x00002004
    253 #define VMCS_EXIT_MSR_STORE_ADDRESS		0x00002006
    254 #define VMCS_EXIT_MSR_LOAD_ADDRESS		0x00002008
    255 #define VMCS_ENTRY_MSR_LOAD_ADDRESS		0x0000200A
    256 #define VMCS_EXECUTIVE_VMCS			0x0000200C
    257 #define VMCS_PML_ADDRESS			0x0000200E
    258 #define VMCS_TSC_OFFSET				0x00002010
    259 #define VMCS_VIRTUAL_APIC			0x00002012
    260 #define VMCS_APIC_ACCESS			0x00002014
    261 #define VMCS_PIR_DESC				0x00002016
    262 #define VMCS_VM_CONTROL				0x00002018
    263 #define VMCS_EPTP				0x0000201A
    264 #define		EPTP_TYPE			__BITS(2,0)
    265 #define			EPTP_TYPE_UC		0
    266 #define			EPTP_TYPE_WB		6
    267 #define		EPTP_WALKLEN			__BITS(5,3)
    268 #define		EPTP_FLAGS_AD			__BIT(6)
    269 #define		EPTP_PHYSADDR			__BITS(63,12)
    270 #define VMCS_EOI_EXIT0				0x0000201C
    271 #define VMCS_EOI_EXIT1				0x0000201E
    272 #define VMCS_EOI_EXIT2				0x00002020
    273 #define VMCS_EOI_EXIT3				0x00002022
    274 #define VMCS_EPTP_LIST				0x00002024
    275 #define VMCS_VMREAD_BITMAP			0x00002026
    276 #define VMCS_VMWRITE_BITMAP			0x00002028
    277 #define VMCS_VIRTUAL_EXCEPTION			0x0000202A
    278 #define VMCS_XSS_EXIT_BITMAP			0x0000202C
    279 #define VMCS_ENCLS_EXIT_BITMAP			0x0000202E
    280 #define VMCS_SUBPAGE_PERM_TABLE_PTR		0x00002030
    281 #define VMCS_TSC_MULTIPLIER			0x00002032
    282 /* 64-bit read-only fields */
    283 #define VMCS_GUEST_PHYSICAL_ADDRESS		0x00002400
    284 /* 64-bit guest-state fields */
    285 #define VMCS_LINK_POINTER			0x00002800
    286 #define VMCS_GUEST_IA32_DEBUGCTL		0x00002802
    287 #define VMCS_GUEST_IA32_PAT			0x00002804
    288 #define VMCS_GUEST_IA32_EFER			0x00002806
    289 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL	0x00002808
    290 #define VMCS_GUEST_PDPTE0			0x0000280A
    291 #define VMCS_GUEST_PDPTE1			0x0000280C
    292 #define VMCS_GUEST_PDPTE2			0x0000280E
    293 #define VMCS_GUEST_PDPTE3			0x00002810
    294 #define VMCS_GUEST_BNDCFGS			0x00002812
    295 /* 64-bit host-state fields */
    296 #define VMCS_HOST_IA32_PAT			0x00002C00
    297 #define VMCS_HOST_IA32_EFER			0x00002C02
    298 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL		0x00002C04
    299 /* 32-bit control fields */
    300 #define VMCS_PINBASED_CTLS			0x00004000
    301 #define		PIN_CTLS_INT_EXITING		__BIT(0)
    302 #define		PIN_CTLS_NMI_EXITING		__BIT(3)
    303 #define		PIN_CTLS_VIRTUAL_NMIS		__BIT(5)
    304 #define		PIN_CTLS_ACTIVATE_PREEMPT_TIMER	__BIT(6)
    305 #define		PIN_CTLS_PROCESS_POSTED_INTS	__BIT(7)
    306 #define VMCS_PROCBASED_CTLS			0x00004002
    307 #define		PROC_CTLS_INT_WINDOW_EXITING	__BIT(2)
    308 #define		PROC_CTLS_USE_TSC_OFFSETTING	__BIT(3)
    309 #define		PROC_CTLS_HLT_EXITING		__BIT(7)
    310 #define		PROC_CTLS_INVLPG_EXITING	__BIT(9)
    311 #define		PROC_CTLS_MWAIT_EXITING		__BIT(10)
    312 #define		PROC_CTLS_RDPMC_EXITING		__BIT(11)
    313 #define		PROC_CTLS_RDTSC_EXITING		__BIT(12)
    314 #define		PROC_CTLS_RCR3_EXITING		__BIT(15)
    315 #define		PROC_CTLS_LCR3_EXITING		__BIT(16)
    316 #define		PROC_CTLS_RCR8_EXITING		__BIT(19)
    317 #define		PROC_CTLS_LCR8_EXITING		__BIT(20)
    318 #define		PROC_CTLS_USE_TPR_SHADOW	__BIT(21)
    319 #define		PROC_CTLS_NMI_WINDOW_EXITING	__BIT(22)
    320 #define		PROC_CTLS_DR_EXITING		__BIT(23)
    321 #define		PROC_CTLS_UNCOND_IO_EXITING	__BIT(24)
    322 #define		PROC_CTLS_USE_IO_BITMAPS	__BIT(25)
    323 #define		PROC_CTLS_MONITOR_TRAP_FLAG	__BIT(27)
    324 #define		PROC_CTLS_USE_MSR_BITMAPS	__BIT(28)
    325 #define		PROC_CTLS_MONITOR_EXITING	__BIT(29)
    326 #define		PROC_CTLS_PAUSE_EXITING		__BIT(30)
    327 #define		PROC_CTLS_ACTIVATE_CTLS2	__BIT(31)
    328 #define VMCS_EXCEPTION_BITMAP			0x00004004
    329 #define VMCS_PF_ERROR_MASK			0x00004006
    330 #define VMCS_PF_ERROR_MATCH			0x00004008
    331 #define VMCS_CR3_TARGET_COUNT			0x0000400A
    332 #define VMCS_EXIT_CTLS				0x0000400C
    333 #define		EXIT_CTLS_SAVE_DEBUG_CONTROLS	__BIT(2)
    334 #define		EXIT_CTLS_HOST_LONG_MODE	__BIT(9)
    335 #define		EXIT_CTLS_LOAD_PERFGLOBALCTRL	__BIT(12)
    336 #define		EXIT_CTLS_ACK_INTERRUPT		__BIT(15)
    337 #define		EXIT_CTLS_SAVE_PAT		__BIT(18)
    338 #define		EXIT_CTLS_LOAD_PAT		__BIT(19)
    339 #define		EXIT_CTLS_SAVE_EFER		__BIT(20)
    340 #define		EXIT_CTLS_LOAD_EFER		__BIT(21)
    341 #define		EXIT_CTLS_SAVE_PREEMPT_TIMER	__BIT(22)
    342 #define		EXIT_CTLS_CLEAR_BNDCFGS		__BIT(23)
    343 #define		EXIT_CTLS_CONCEAL_PT		__BIT(24)
    344 #define VMCS_EXIT_MSR_STORE_COUNT		0x0000400E
    345 #define VMCS_EXIT_MSR_LOAD_COUNT		0x00004010
    346 #define VMCS_ENTRY_CTLS				0x00004012
    347 #define		ENTRY_CTLS_LOAD_DEBUG_CONTROLS	__BIT(2)
    348 #define		ENTRY_CTLS_LONG_MODE		__BIT(9)
    349 #define		ENTRY_CTLS_SMM			__BIT(10)
    350 #define		ENTRY_CTLS_DISABLE_DUAL		__BIT(11)
    351 #define		ENTRY_CTLS_LOAD_PERFGLOBALCTRL	__BIT(13)
    352 #define		ENTRY_CTLS_LOAD_PAT		__BIT(14)
    353 #define		ENTRY_CTLS_LOAD_EFER		__BIT(15)
    354 #define		ENTRY_CTLS_LOAD_BNDCFGS		__BIT(16)
    355 #define		ENTRY_CTLS_CONCEAL_PT		__BIT(17)
    356 #define VMCS_ENTRY_MSR_LOAD_COUNT		0x00004014
    357 #define VMCS_ENTRY_INTR_INFO			0x00004016
    358 #define		INTR_INFO_VECTOR		__BITS(7,0)
    359 #define		INTR_INFO_TYPE			__BITS(10,8)
    360 #define			INTR_TYPE_EXT_INT	0
    361 #define			INTR_TYPE_NMI		2
    362 #define			INTR_TYPE_HW_EXC	3
    363 #define			INTR_TYPE_SW_INT	4
    364 #define			INTR_TYPE_PRIV_SW_EXC	5
    365 #define			INTR_TYPE_SW_EXC	6
    366 #define			INTR_TYPE_OTHER		7
    367 #define		INTR_INFO_ERROR			__BIT(11)
    368 #define		INTR_INFO_VALID			__BIT(31)
    369 #define VMCS_ENTRY_EXCEPTION_ERROR		0x00004018
    370 #define VMCS_ENTRY_INST_LENGTH			0x0000401A
    371 #define VMCS_TPR_THRESHOLD			0x0000401C
    372 #define VMCS_PROCBASED_CTLS2			0x0000401E
    373 #define		PROC_CTLS2_VIRT_APIC_ACCESSES	__BIT(0)
    374 #define		PROC_CTLS2_ENABLE_EPT		__BIT(1)
    375 #define		PROC_CTLS2_DESC_TABLE_EXITING	__BIT(2)
    376 #define		PROC_CTLS2_ENABLE_RDTSCP	__BIT(3)
    377 #define		PROC_CTLS2_VIRT_X2APIC		__BIT(4)
    378 #define		PROC_CTLS2_ENABLE_VPID		__BIT(5)
    379 #define		PROC_CTLS2_WBINVD_EXITING	__BIT(6)
    380 #define		PROC_CTLS2_UNRESTRICTED_GUEST	__BIT(7)
    381 #define		PROC_CTLS2_APIC_REG_VIRT	__BIT(8)
    382 #define		PROC_CTLS2_VIRT_INT_DELIVERY	__BIT(9)
    383 #define		PROC_CTLS2_PAUSE_LOOP_EXITING	__BIT(10)
    384 #define		PROC_CTLS2_RDRAND_EXITING	__BIT(11)
    385 #define		PROC_CTLS2_INVPCID_ENABLE	__BIT(12)
    386 #define		PROC_CTLS2_VMFUNC_ENABLE	__BIT(13)
    387 #define		PROC_CTLS2_VMCS_SHADOWING	__BIT(14)
    388 #define		PROC_CTLS2_ENCLS_EXITING	__BIT(15)
    389 #define		PROC_CTLS2_RDSEED_EXITING	__BIT(16)
    390 #define		PROC_CTLS2_PML_ENABLE		__BIT(17)
    391 #define		PROC_CTLS2_EPT_VIOLATION	__BIT(18)
    392 #define		PROC_CTLS2_CONCEAL_VMX_FROM_PT	__BIT(19)
    393 #define		PROC_CTLS2_XSAVES_ENABLE	__BIT(20)
    394 #define		PROC_CTLS2_MODE_BASED_EXEC_EPT	__BIT(22)
    395 #define		PROC_CTLS2_SUBPAGE_PERMISSIONS	__BIT(23)
    396 #define		PROC_CTLS2_USE_TSC_SCALING	__BIT(25)
    397 #define		PROC_CTLS2_ENCLV_EXITING	__BIT(28)
    398 #define VMCS_PLE_GAP				0x00004020
    399 #define VMCS_PLE_WINDOW				0x00004022
    400 /* 32-bit read-only data fields */
    401 #define VMCS_INSTRUCTION_ERROR			0x00004400
    402 #define VMCS_EXIT_REASON			0x00004402
    403 #define VMCS_EXIT_INTR_INFO			0x00004404
    404 #define VMCS_EXIT_INTR_ERRCODE			0x00004406
    405 #define VMCS_IDT_VECTORING_INFO			0x00004408
    406 #define VMCS_IDT_VECTORING_ERROR		0x0000440A
    407 #define VMCS_EXIT_INSTRUCTION_LENGTH		0x0000440C
    408 #define VMCS_EXIT_INSTRUCTION_INFO		0x0000440E
    409 /* 32-bit guest-state fields */
    410 #define VMCS_GUEST_ES_LIMIT			0x00004800
    411 #define VMCS_GUEST_CS_LIMIT			0x00004802
    412 #define VMCS_GUEST_SS_LIMIT			0x00004804
    413 #define VMCS_GUEST_DS_LIMIT			0x00004806
    414 #define VMCS_GUEST_FS_LIMIT			0x00004808
    415 #define VMCS_GUEST_GS_LIMIT			0x0000480A
    416 #define VMCS_GUEST_LDTR_LIMIT			0x0000480C
    417 #define VMCS_GUEST_TR_LIMIT			0x0000480E
    418 #define VMCS_GUEST_GDTR_LIMIT			0x00004810
    419 #define VMCS_GUEST_IDTR_LIMIT			0x00004812
    420 #define VMCS_GUEST_ES_ACCESS_RIGHTS		0x00004814
    421 #define VMCS_GUEST_CS_ACCESS_RIGHTS		0x00004816
    422 #define VMCS_GUEST_SS_ACCESS_RIGHTS		0x00004818
    423 #define VMCS_GUEST_DS_ACCESS_RIGHTS		0x0000481A
    424 #define VMCS_GUEST_FS_ACCESS_RIGHTS		0x0000481C
    425 #define VMCS_GUEST_GS_ACCESS_RIGHTS		0x0000481E
    426 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS		0x00004820
    427 #define VMCS_GUEST_TR_ACCESS_RIGHTS		0x00004822
    428 #define VMCS_GUEST_INTERRUPTIBILITY		0x00004824
    429 #define		INT_STATE_STI			__BIT(0)
    430 #define		INT_STATE_MOVSS			__BIT(1)
    431 #define		INT_STATE_SMI			__BIT(2)
    432 #define		INT_STATE_NMI			__BIT(3)
    433 #define		INT_STATE_ENCLAVE		__BIT(4)
    434 #define VMCS_GUEST_ACTIVITY			0x00004826
    435 #define VMCS_GUEST_SMBASE			0x00004828
    436 #define VMCS_GUEST_IA32_SYSENTER_CS		0x0000482A
    437 #define VMCS_PREEMPTION_TIMER_VALUE		0x0000482E
    438 /* 32-bit host state fields */
    439 #define VMCS_HOST_IA32_SYSENTER_CS		0x00004C00
    440 /* Natural-Width control fields */
    441 #define VMCS_CR0_MASK				0x00006000
    442 #define VMCS_CR4_MASK				0x00006002
    443 #define VMCS_CR0_SHADOW				0x00006004
    444 #define VMCS_CR4_SHADOW				0x00006006
    445 #define VMCS_CR3_TARGET0			0x00006008
    446 #define VMCS_CR3_TARGET1			0x0000600A
    447 #define VMCS_CR3_TARGET2			0x0000600C
    448 #define VMCS_CR3_TARGET3			0x0000600E
    449 /* Natural-Width read-only fields */
    450 #define VMCS_EXIT_QUALIFICATION			0x00006400
    451 #define VMCS_IO_RCX				0x00006402
    452 #define VMCS_IO_RSI				0x00006404
    453 #define VMCS_IO_RDI				0x00006406
    454 #define VMCS_IO_RIP				0x00006408
    455 #define VMCS_GUEST_LINEAR_ADDRESS		0x0000640A
    456 /* Natural-Width guest-state fields */
    457 #define VMCS_GUEST_CR0				0x00006800
    458 #define VMCS_GUEST_CR3				0x00006802
    459 #define VMCS_GUEST_CR4				0x00006804
    460 #define VMCS_GUEST_ES_BASE			0x00006806
    461 #define VMCS_GUEST_CS_BASE			0x00006808
    462 #define VMCS_GUEST_SS_BASE			0x0000680A
    463 #define VMCS_GUEST_DS_BASE			0x0000680C
    464 #define VMCS_GUEST_FS_BASE			0x0000680E
    465 #define VMCS_GUEST_GS_BASE			0x00006810
    466 #define VMCS_GUEST_LDTR_BASE			0x00006812
    467 #define VMCS_GUEST_TR_BASE			0x00006814
    468 #define VMCS_GUEST_GDTR_BASE			0x00006816
    469 #define VMCS_GUEST_IDTR_BASE			0x00006818
    470 #define VMCS_GUEST_DR7				0x0000681A
    471 #define VMCS_GUEST_RSP				0x0000681C
    472 #define VMCS_GUEST_RIP				0x0000681E
    473 #define VMCS_GUEST_RFLAGS			0x00006820
    474 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS	0x00006822
    475 #define VMCS_GUEST_IA32_SYSENTER_ESP		0x00006824
    476 #define VMCS_GUEST_IA32_SYSENTER_EIP		0x00006826
    477 /* Natural-Width host-state fields */
    478 #define VMCS_HOST_CR0				0x00006C00
    479 #define VMCS_HOST_CR3				0x00006C02
    480 #define VMCS_HOST_CR4				0x00006C04
    481 #define VMCS_HOST_FS_BASE			0x00006C06
    482 #define VMCS_HOST_GS_BASE			0x00006C08
    483 #define VMCS_HOST_TR_BASE			0x00006C0A
    484 #define VMCS_HOST_GDTR_BASE			0x00006C0C
    485 #define VMCS_HOST_IDTR_BASE			0x00006C0E
    486 #define VMCS_HOST_IA32_SYSENTER_ESP		0x00006C10
    487 #define VMCS_HOST_IA32_SYSENTER_EIP		0x00006C12
    488 #define VMCS_HOST_RSP				0x00006C14
    489 #define VMCS_HOST_RIP				0x00006c16
    490 
    491 /* VMX basic exit reasons. */
    492 #define VMCS_EXITCODE_EXC_NMI			0
    493 #define VMCS_EXITCODE_EXT_INT			1
    494 #define VMCS_EXITCODE_SHUTDOWN			2
    495 #define VMCS_EXITCODE_INIT			3
    496 #define VMCS_EXITCODE_SIPI			4
    497 #define VMCS_EXITCODE_SMI			5
    498 #define VMCS_EXITCODE_OTHER_SMI			6
    499 #define VMCS_EXITCODE_INT_WINDOW		7
    500 #define VMCS_EXITCODE_NMI_WINDOW		8
    501 #define VMCS_EXITCODE_TASK_SWITCH		9
    502 #define VMCS_EXITCODE_CPUID			10
    503 #define VMCS_EXITCODE_GETSEC			11
    504 #define VMCS_EXITCODE_HLT			12
    505 #define VMCS_EXITCODE_INVD			13
    506 #define VMCS_EXITCODE_INVLPG			14
    507 #define VMCS_EXITCODE_RDPMC			15
    508 #define VMCS_EXITCODE_RDTSC			16
    509 #define VMCS_EXITCODE_RSM			17
    510 #define VMCS_EXITCODE_VMCALL			18
    511 #define VMCS_EXITCODE_VMCLEAR			19
    512 #define VMCS_EXITCODE_VMLAUNCH			20
    513 #define VMCS_EXITCODE_VMPTRLD			21
    514 #define VMCS_EXITCODE_VMPTRST			22
    515 #define VMCS_EXITCODE_VMREAD			23
    516 #define VMCS_EXITCODE_VMRESUME			24
    517 #define VMCS_EXITCODE_VMWRITE			25
    518 #define VMCS_EXITCODE_VMXOFF			26
    519 #define VMCS_EXITCODE_VMXON			27
    520 #define VMCS_EXITCODE_CR			28
    521 #define VMCS_EXITCODE_DR			29
    522 #define VMCS_EXITCODE_IO			30
    523 #define VMCS_EXITCODE_RDMSR			31
    524 #define VMCS_EXITCODE_WRMSR			32
    525 #define VMCS_EXITCODE_FAIL_GUEST_INVALID	33
    526 #define VMCS_EXITCODE_FAIL_MSR_INVALID		34
    527 #define VMCS_EXITCODE_MWAIT			36
    528 #define VMCS_EXITCODE_TRAP_FLAG			37
    529 #define VMCS_EXITCODE_MONITOR			39
    530 #define VMCS_EXITCODE_PAUSE			40
    531 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK	41
    532 #define VMCS_EXITCODE_TPR_BELOW			43
    533 #define VMCS_EXITCODE_APIC_ACCESS		44
    534 #define VMCS_EXITCODE_VEOI			45
    535 #define VMCS_EXITCODE_GDTR_IDTR			46
    536 #define VMCS_EXITCODE_LDTR_TR			47
    537 #define VMCS_EXITCODE_EPT_VIOLATION		48
    538 #define VMCS_EXITCODE_EPT_MISCONFIG		49
    539 #define VMCS_EXITCODE_INVEPT			50
    540 #define VMCS_EXITCODE_RDTSCP			51
    541 #define VMCS_EXITCODE_PREEMPT_TIMEOUT		52
    542 #define VMCS_EXITCODE_INVVPID			53
    543 #define VMCS_EXITCODE_WBINVD			54
    544 #define VMCS_EXITCODE_XSETBV			55
    545 #define VMCS_EXITCODE_APIC_WRITE		56
    546 #define VMCS_EXITCODE_RDRAND			57
    547 #define VMCS_EXITCODE_INVPCID			58
    548 #define VMCS_EXITCODE_VMFUNC			59
    549 #define VMCS_EXITCODE_ENCLS			60
    550 #define VMCS_EXITCODE_RDSEED			61
    551 #define VMCS_EXITCODE_PAGE_LOG_FULL		62
    552 #define VMCS_EXITCODE_XSAVES			63
    553 #define VMCS_EXITCODE_XRSTORS			64
    554 
    555 /* -------------------------------------------------------------------------- */
    556 
    557 static void vmx_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
    558 static void vmx_vcpu_state_commit(struct nvmm_cpu *);
    559 
    560 #define VMX_MSRLIST_STAR		0
    561 #define VMX_MSRLIST_LSTAR		1
    562 #define VMX_MSRLIST_CSTAR		2
    563 #define VMX_MSRLIST_SFMASK		3
    564 #define VMX_MSRLIST_KERNELGSBASE	4
    565 #define VMX_MSRLIST_EXIT_NMSR		5
    566 #define VMX_MSRLIST_L1DFLUSH		5
    567 
    568 /* On entry, we may do +1 to include L1DFLUSH. */
    569 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR;
    570 
    571 struct vmxon {
    572 	uint32_t ident;
    573 #define VMXON_IDENT_REVISION	__BITS(30,0)
    574 
    575 	uint8_t data[PAGE_SIZE - 4];
    576 } __packed;
    577 
    578 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE);
    579 
    580 struct vmxoncpu {
    581 	vaddr_t va;
    582 	paddr_t pa;
    583 };
    584 
    585 static struct vmxoncpu vmxoncpu[MAXCPUS];
    586 
    587 struct vmcs {
    588 	uint32_t ident;
    589 #define VMCS_IDENT_REVISION	__BITS(30,0)
    590 #define VMCS_IDENT_SHADOW	__BIT(31)
    591 
    592 	uint32_t abort;
    593 	uint8_t data[PAGE_SIZE - 8];
    594 } __packed;
    595 
    596 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
    597 
    598 struct msr_entry {
    599 	uint32_t msr;
    600 	uint32_t rsvd;
    601 	uint64_t val;
    602 } __packed;
    603 
    604 #define VPID_MAX	0xFFFF
    605 
    606 /* Make sure we never run out of VPIDs. */
    607 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS);
    608 
    609 static uint64_t vmx_tlb_flush_op __read_mostly;
    610 static uint64_t vmx_ept_flush_op __read_mostly;
    611 static uint64_t vmx_eptp_type __read_mostly;
    612 
    613 static uint64_t vmx_pinbased_ctls __read_mostly;
    614 static uint64_t vmx_procbased_ctls __read_mostly;
    615 static uint64_t vmx_procbased_ctls2 __read_mostly;
    616 static uint64_t vmx_entry_ctls __read_mostly;
    617 static uint64_t vmx_exit_ctls __read_mostly;
    618 
    619 static uint64_t vmx_cr0_fixed0 __read_mostly;
    620 static uint64_t vmx_cr0_fixed1 __read_mostly;
    621 static uint64_t vmx_cr4_fixed0 __read_mostly;
    622 static uint64_t vmx_cr4_fixed1 __read_mostly;
    623 
    624 extern bool pmap_ept_has_ad;
    625 
    626 #define VMX_PINBASED_CTLS_ONE	\
    627 	(PIN_CTLS_INT_EXITING| \
    628 	 PIN_CTLS_NMI_EXITING| \
    629 	 PIN_CTLS_VIRTUAL_NMIS)
    630 
    631 #define VMX_PINBASED_CTLS_ZERO	0
    632 
    633 #define VMX_PROCBASED_CTLS_ONE	\
    634 	(PROC_CTLS_USE_TSC_OFFSETTING| \
    635 	 PROC_CTLS_HLT_EXITING| \
    636 	 PROC_CTLS_MWAIT_EXITING | \
    637 	 PROC_CTLS_RDPMC_EXITING | \
    638 	 PROC_CTLS_RCR8_EXITING | \
    639 	 PROC_CTLS_LCR8_EXITING | \
    640 	 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \
    641 	 PROC_CTLS_USE_MSR_BITMAPS | \
    642 	 PROC_CTLS_MONITOR_EXITING | \
    643 	 PROC_CTLS_ACTIVATE_CTLS2)
    644 
    645 #define VMX_PROCBASED_CTLS_ZERO	\
    646 	(PROC_CTLS_RCR3_EXITING| \
    647 	 PROC_CTLS_LCR3_EXITING)
    648 
    649 #define VMX_PROCBASED_CTLS2_ONE	\
    650 	(PROC_CTLS2_ENABLE_EPT| \
    651 	 PROC_CTLS2_ENABLE_VPID| \
    652 	 PROC_CTLS2_UNRESTRICTED_GUEST)
    653 
    654 #define VMX_PROCBASED_CTLS2_ZERO	0
    655 
    656 #define VMX_ENTRY_CTLS_ONE	\
    657 	(ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \
    658 	 ENTRY_CTLS_LOAD_EFER| \
    659 	 ENTRY_CTLS_LOAD_PAT)
    660 
    661 #define VMX_ENTRY_CTLS_ZERO	\
    662 	(ENTRY_CTLS_SMM| \
    663 	 ENTRY_CTLS_DISABLE_DUAL)
    664 
    665 #define VMX_EXIT_CTLS_ONE	\
    666 	(EXIT_CTLS_SAVE_DEBUG_CONTROLS| \
    667 	 EXIT_CTLS_HOST_LONG_MODE| \
    668 	 EXIT_CTLS_SAVE_PAT| \
    669 	 EXIT_CTLS_LOAD_PAT| \
    670 	 EXIT_CTLS_SAVE_EFER| \
    671 	 EXIT_CTLS_LOAD_EFER)
    672 
    673 #define VMX_EXIT_CTLS_ZERO	0
    674 
    675 static uint8_t *vmx_asidmap __read_mostly;
    676 static uint32_t vmx_maxasid __read_mostly;
    677 static kmutex_t vmx_asidlock __cacheline_aligned;
    678 
    679 #define VMX_XCR0_MASK_DEFAULT	(XCR0_X87|XCR0_SSE)
    680 static uint64_t vmx_xcr0_mask __read_mostly;
    681 
    682 #define VMX_NCPUIDS	32
    683 
    684 #define VMCS_NPAGES	1
    685 #define VMCS_SIZE	(VMCS_NPAGES * PAGE_SIZE)
    686 
    687 #define MSRBM_NPAGES	1
    688 #define MSRBM_SIZE	(MSRBM_NPAGES * PAGE_SIZE)
    689 
    690 #define EFER_TLB_FLUSH \
    691 	(EFER_NXE|EFER_LMA|EFER_LME)
    692 #define CR0_TLB_FLUSH \
    693 	(CR0_PG|CR0_WP|CR0_CD|CR0_NW)
    694 #define CR4_TLB_FLUSH \
    695 	(CR4_PGE|CR4_PAE|CR4_PSE)
    696 
    697 /* -------------------------------------------------------------------------- */
    698 
    699 struct vmx_machdata {
    700 	bool cpuidpresent[VMX_NCPUIDS];
    701 	struct nvmm_mach_conf_x86_cpuid cpuid[VMX_NCPUIDS];
    702 	volatile uint64_t mach_htlb_gen;
    703 };
    704 
    705 static const size_t vmx_conf_sizes[NVMM_X86_NCONF] = {
    706 	[NVMM_MACH_CONF_MD(NVMM_MACH_CONF_X86_CPUID)] =
    707 	    sizeof(struct nvmm_mach_conf_x86_cpuid)
    708 };
    709 
    710 struct vmx_cpudata {
    711 	/* General */
    712 	uint64_t asid;
    713 	bool gtlb_want_flush;
    714 	bool gtsc_want_update;
    715 	uint64_t vcpu_htlb_gen;
    716 	kcpuset_t *htlb_want_flush;
    717 
    718 	/* VMCS */
    719 	struct vmcs *vmcs;
    720 	paddr_t vmcs_pa;
    721 	size_t vmcs_refcnt;
    722 	struct cpu_info *vmcs_ci;
    723 	bool vmcs_launched;
    724 
    725 	/* MSR bitmap */
    726 	uint8_t *msrbm;
    727 	paddr_t msrbm_pa;
    728 
    729 	/* Host state */
    730 	uint64_t hxcr0;
    731 	uint64_t star;
    732 	uint64_t lstar;
    733 	uint64_t cstar;
    734 	uint64_t sfmask;
    735 	uint64_t kernelgsbase;
    736 	bool ts_set;
    737 	struct xsave_header hfpu __aligned(64);
    738 
    739 	/* Intr state */
    740 	bool int_window_exit;
    741 	bool nmi_window_exit;
    742 	bool evt_pending;
    743 
    744 	/* Guest state */
    745 	struct msr_entry *gmsr;
    746 	paddr_t gmsr_pa;
    747 	uint64_t gmsr_misc_enable;
    748 	uint64_t gcr2;
    749 	uint64_t gcr8;
    750 	uint64_t gxcr0;
    751 	uint64_t gprs[NVMM_X64_NGPR];
    752 	uint64_t drs[NVMM_X64_NDR];
    753 	uint64_t gtsc;
    754 	struct xsave_header gfpu __aligned(64);
    755 };
    756 
    757 static const struct {
    758 	uint64_t selector;
    759 	uint64_t attrib;
    760 	uint64_t limit;
    761 	uint64_t base;
    762 } vmx_guest_segs[NVMM_X64_NSEG] = {
    763 	[NVMM_X64_SEG_ES] = {
    764 		VMCS_GUEST_ES_SELECTOR,
    765 		VMCS_GUEST_ES_ACCESS_RIGHTS,
    766 		VMCS_GUEST_ES_LIMIT,
    767 		VMCS_GUEST_ES_BASE
    768 	},
    769 	[NVMM_X64_SEG_CS] = {
    770 		VMCS_GUEST_CS_SELECTOR,
    771 		VMCS_GUEST_CS_ACCESS_RIGHTS,
    772 		VMCS_GUEST_CS_LIMIT,
    773 		VMCS_GUEST_CS_BASE
    774 	},
    775 	[NVMM_X64_SEG_SS] = {
    776 		VMCS_GUEST_SS_SELECTOR,
    777 		VMCS_GUEST_SS_ACCESS_RIGHTS,
    778 		VMCS_GUEST_SS_LIMIT,
    779 		VMCS_GUEST_SS_BASE
    780 	},
    781 	[NVMM_X64_SEG_DS] = {
    782 		VMCS_GUEST_DS_SELECTOR,
    783 		VMCS_GUEST_DS_ACCESS_RIGHTS,
    784 		VMCS_GUEST_DS_LIMIT,
    785 		VMCS_GUEST_DS_BASE
    786 	},
    787 	[NVMM_X64_SEG_FS] = {
    788 		VMCS_GUEST_FS_SELECTOR,
    789 		VMCS_GUEST_FS_ACCESS_RIGHTS,
    790 		VMCS_GUEST_FS_LIMIT,
    791 		VMCS_GUEST_FS_BASE
    792 	},
    793 	[NVMM_X64_SEG_GS] = {
    794 		VMCS_GUEST_GS_SELECTOR,
    795 		VMCS_GUEST_GS_ACCESS_RIGHTS,
    796 		VMCS_GUEST_GS_LIMIT,
    797 		VMCS_GUEST_GS_BASE
    798 	},
    799 	[NVMM_X64_SEG_GDT] = {
    800 		0, /* doesn't exist */
    801 		0, /* doesn't exist */
    802 		VMCS_GUEST_GDTR_LIMIT,
    803 		VMCS_GUEST_GDTR_BASE
    804 	},
    805 	[NVMM_X64_SEG_IDT] = {
    806 		0, /* doesn't exist */
    807 		0, /* doesn't exist */
    808 		VMCS_GUEST_IDTR_LIMIT,
    809 		VMCS_GUEST_IDTR_BASE
    810 	},
    811 	[NVMM_X64_SEG_LDT] = {
    812 		VMCS_GUEST_LDTR_SELECTOR,
    813 		VMCS_GUEST_LDTR_ACCESS_RIGHTS,
    814 		VMCS_GUEST_LDTR_LIMIT,
    815 		VMCS_GUEST_LDTR_BASE
    816 	},
    817 	[NVMM_X64_SEG_TR] = {
    818 		VMCS_GUEST_TR_SELECTOR,
    819 		VMCS_GUEST_TR_ACCESS_RIGHTS,
    820 		VMCS_GUEST_TR_LIMIT,
    821 		VMCS_GUEST_TR_BASE
    822 	}
    823 };
    824 
    825 /* -------------------------------------------------------------------------- */
    826 
    827 static uint64_t
    828 vmx_get_revision(void)
    829 {
    830 	uint64_t msr;
    831 
    832 	msr = rdmsr(MSR_IA32_VMX_BASIC);
    833 	msr &= IA32_VMX_BASIC_IDENT;
    834 
    835 	return msr;
    836 }
    837 
    838 static void
    839 vmx_vmclear_ipi(void *arg1, void *arg2)
    840 {
    841 	paddr_t vmcs_pa = (paddr_t)arg1;
    842 	vmx_vmclear(&vmcs_pa);
    843 }
    844 
    845 static void
    846 vmx_vmclear_remote(struct cpu_info *ci, paddr_t vmcs_pa)
    847 {
    848 	uint64_t xc;
    849 	int bound;
    850 
    851 	KASSERT(kpreempt_disabled());
    852 
    853 	bound = curlwp_bind();
    854 	kpreempt_enable();
    855 
    856 	xc = xc_unicast(XC_HIGHPRI, vmx_vmclear_ipi, (void *)vmcs_pa, NULL, ci);
    857 	xc_wait(xc);
    858 
    859 	kpreempt_disable();
    860 	curlwp_bindx(bound);
    861 }
    862 
    863 static void
    864 vmx_vmcs_enter(struct nvmm_cpu *vcpu)
    865 {
    866 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    867 	struct cpu_info *vmcs_ci;
    868 	paddr_t oldpa __diagused;
    869 
    870 	cpudata->vmcs_refcnt++;
    871 	if (cpudata->vmcs_refcnt > 1) {
    872 #ifdef DIAGNOSTIC
    873 		KASSERT(kpreempt_disabled());
    874 		oldpa = vmx_vmptrst();
    875 		KASSERT(oldpa == cpudata->vmcs_pa);
    876 #endif
    877 		return;
    878 	}
    879 
    880 	vmcs_ci = cpudata->vmcs_ci;
    881 	cpudata->vmcs_ci = (void *)0x00FFFFFFFFFFFFFF; /* clobber */
    882 
    883 	kpreempt_disable();
    884 
    885 	if (vmcs_ci == NULL) {
    886 		/* This VMCS is loaded for the first time. */
    887 		vmx_vmclear(&cpudata->vmcs_pa);
    888 		cpudata->vmcs_launched = false;
    889 	} else if (vmcs_ci != curcpu()) {
    890 		/* This VMCS is active on a remote CPU. */
    891 		vmx_vmclear_remote(vmcs_ci, cpudata->vmcs_pa);
    892 		cpudata->vmcs_launched = false;
    893 	} else {
    894 		/* This VMCS is active on curcpu, nothing to do. */
    895 	}
    896 
    897 	vmx_vmptrld(&cpudata->vmcs_pa);
    898 }
    899 
    900 static void
    901 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
    902 {
    903 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    904 
    905 	KASSERT(kpreempt_disabled());
    906 #ifdef DIAGNOSTIC
    907 	KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
    908 #endif
    909 	KASSERT(cpudata->vmcs_refcnt > 0);
    910 	cpudata->vmcs_refcnt--;
    911 
    912 	if (cpudata->vmcs_refcnt > 0) {
    913 		return;
    914 	}
    915 
    916 	cpudata->vmcs_ci = curcpu();
    917 	kpreempt_enable();
    918 }
    919 
    920 static void
    921 vmx_vmcs_destroy(struct nvmm_cpu *vcpu)
    922 {
    923 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    924 
    925 	KASSERT(kpreempt_disabled());
    926 #ifdef DIAGNOSTIC
    927 	KASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
    928 #endif
    929 	KASSERT(cpudata->vmcs_refcnt == 1);
    930 	cpudata->vmcs_refcnt--;
    931 
    932 	vmx_vmclear(&cpudata->vmcs_pa);
    933 	kpreempt_enable();
    934 }
    935 
    936 /* -------------------------------------------------------------------------- */
    937 
    938 static void
    939 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
    940 {
    941 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    942 	uint64_t ctls1;
    943 
    944 	ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
    945 
    946 	if (nmi) {
    947 		// XXX INT_STATE_NMI?
    948 		ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING;
    949 		cpudata->nmi_window_exit = true;
    950 	} else {
    951 		ctls1 |= PROC_CTLS_INT_WINDOW_EXITING;
    952 		cpudata->int_window_exit = true;
    953 	}
    954 
    955 	vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
    956 }
    957 
    958 static void
    959 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
    960 {
    961 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    962 	uint64_t ctls1;
    963 
    964 	ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
    965 
    966 	if (nmi) {
    967 		ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
    968 		cpudata->nmi_window_exit = false;
    969 	} else {
    970 		ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING;
    971 		cpudata->int_window_exit = false;
    972 	}
    973 
    974 	vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
    975 }
    976 
    977 static inline int
    978 vmx_event_has_error(uint64_t vector)
    979 {
    980 	switch (vector) {
    981 	case 8:		/* #DF */
    982 	case 10:	/* #TS */
    983 	case 11:	/* #NP */
    984 	case 12:	/* #SS */
    985 	case 13:	/* #GP */
    986 	case 14:	/* #PF */
    987 	case 17:	/* #AC */
    988 	case 30:	/* #SX */
    989 		return 1;
    990 	default:
    991 		return 0;
    992 	}
    993 }
    994 
    995 static int
    996 vmx_vcpu_inject(struct nvmm_cpu *vcpu)
    997 {
    998 	struct nvmm_comm_page *comm = vcpu->comm;
    999 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1000 	int type = 0, err = 0, ret = EINVAL;
   1001 	enum nvmm_event_type evtype;
   1002 	uint64_t info, vector, error;
   1003 
   1004 	evtype = comm->event.type;
   1005 	vector = comm->event.vector;
   1006 	error = comm->event.u.error;
   1007 	__insn_barrier();
   1008 
   1009 	if (__predict_false(vector >= 256)) {
   1010 		return EINVAL;
   1011 	}
   1012 
   1013 	vmx_vmcs_enter(vcpu);
   1014 
   1015 	switch (evtype) {
   1016 	case NVMM_EVENT_INTERRUPT_HW:
   1017 		type = INTR_TYPE_EXT_INT;
   1018 		if (vector == 2) {
   1019 			type = INTR_TYPE_NMI;
   1020 			vmx_event_waitexit_enable(vcpu, true);
   1021 		}
   1022 		err = 0;
   1023 		break;
   1024 	case NVMM_EVENT_EXCEPTION:
   1025 		if (vector == 2 || vector >= 32)
   1026 			goto out;
   1027 		if (vector == 3 || vector == 0)
   1028 			goto out;
   1029 		type = INTR_TYPE_HW_EXC;
   1030 		err = vmx_event_has_error(vector);
   1031 		break;
   1032 	default:
   1033 		goto out;
   1034 	}
   1035 
   1036 	info =
   1037 	    __SHIFTIN(vector, INTR_INFO_VECTOR) |
   1038 	    __SHIFTIN(type, INTR_INFO_TYPE) |
   1039 	    __SHIFTIN(err, INTR_INFO_ERROR) |
   1040 	    __SHIFTIN(1, INTR_INFO_VALID);
   1041 	vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
   1042 	vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, error);
   1043 
   1044 	cpudata->evt_pending = true;
   1045 	ret = 0;
   1046 
   1047 out:
   1048 	vmx_vmcs_leave(vcpu);
   1049 	return ret;
   1050 }
   1051 
   1052 static void
   1053 vmx_inject_ud(struct nvmm_cpu *vcpu)
   1054 {
   1055 	struct nvmm_comm_page *comm = vcpu->comm;
   1056 	int ret __diagused;
   1057 
   1058 	comm->event.type = NVMM_EVENT_EXCEPTION;
   1059 	comm->event.vector = 6;
   1060 	comm->event.u.error = 0;
   1061 
   1062 	ret = vmx_vcpu_inject(vcpu);
   1063 	KASSERT(ret == 0);
   1064 }
   1065 
   1066 static void
   1067 vmx_inject_gp(struct nvmm_cpu *vcpu)
   1068 {
   1069 	struct nvmm_comm_page *comm = vcpu->comm;
   1070 	int ret __diagused;
   1071 
   1072 	comm->event.type = NVMM_EVENT_EXCEPTION;
   1073 	comm->event.vector = 13;
   1074 	comm->event.u.error = 0;
   1075 
   1076 	ret = vmx_vcpu_inject(vcpu);
   1077 	KASSERT(ret == 0);
   1078 }
   1079 
   1080 static inline int
   1081 vmx_vcpu_event_commit(struct nvmm_cpu *vcpu)
   1082 {
   1083 	if (__predict_true(!vcpu->comm->event_commit)) {
   1084 		return 0;
   1085 	}
   1086 	vcpu->comm->event_commit = false;
   1087 	return vmx_vcpu_inject(vcpu);
   1088 }
   1089 
   1090 static inline void
   1091 vmx_inkernel_advance(void)
   1092 {
   1093 	uint64_t rip, inslen, intstate;
   1094 
   1095 	/*
   1096 	 * Maybe we should also apply single-stepping and debug exceptions.
   1097 	 * Matters for guest-ring3, because it can execute 'cpuid' under a
   1098 	 * debugger.
   1099 	 */
   1100 	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
   1101 	rip = vmx_vmread(VMCS_GUEST_RIP);
   1102 	vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
   1103 	intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
   1104 	vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
   1105 	    intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
   1106 }
   1107 
   1108 static void
   1109 vmx_exit_invalid(struct nvmm_exit *exit, uint64_t code)
   1110 {
   1111 	exit->u.inv.hwcode = code;
   1112 	exit->reason = NVMM_EXIT_INVALID;
   1113 }
   1114 
   1115 static void
   1116 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1117     struct nvmm_exit *exit)
   1118 {
   1119 	uint64_t qual;
   1120 
   1121 	qual = vmx_vmread(VMCS_EXIT_INTR_INFO);
   1122 
   1123 	if ((qual & INTR_INFO_VALID) == 0) {
   1124 		goto error;
   1125 	}
   1126 	if (__SHIFTOUT(qual, INTR_INFO_TYPE) != INTR_TYPE_NMI) {
   1127 		goto error;
   1128 	}
   1129 
   1130 	exit->reason = NVMM_EXIT_NONE;
   1131 	return;
   1132 
   1133 error:
   1134 	vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI);
   1135 }
   1136 
   1137 static void
   1138 vmx_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx)
   1139 {
   1140 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1141 	uint64_t cr4;
   1142 
   1143 	switch (eax) {
   1144 	case 0x00000001:
   1145 		cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax;
   1146 
   1147 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
   1148 		cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
   1149 		    CPUID_LOCAL_APIC_ID);
   1150 
   1151 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
   1152 		cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
   1153 
   1154 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
   1155 
   1156 		/* CPUID2_OSXSAVE depends on CR4. */
   1157 		cr4 = vmx_vmread(VMCS_GUEST_CR4);
   1158 		if (!(cr4 & CR4_OSXSAVE)) {
   1159 			cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
   1160 		}
   1161 		break;
   1162 	case 0x00000005:
   1163 	case 0x00000006:
   1164 		cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
   1165 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1166 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
   1167 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
   1168 		break;
   1169 	case 0x00000007:
   1170 		cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000007.eax;
   1171 		cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
   1172 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
   1173 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
   1174 		break;
   1175 	case 0x0000000D:
   1176 		if (vmx_xcr0_mask == 0) {
   1177 			break;
   1178 		}
   1179 		switch (ecx) {
   1180 		case 0:
   1181 			cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF;
   1182 			if (cpudata->gxcr0 & XCR0_SSE) {
   1183 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
   1184 			} else {
   1185 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
   1186 			}
   1187 			cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
   1188 			cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64;
   1189 			cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32;
   1190 			break;
   1191 		case 1:
   1192 			cpudata->gprs[NVMM_X64_GPR_RAX] &= ~CPUID_PES1_XSAVES;
   1193 			break;
   1194 		}
   1195 		break;
   1196 	case 0x40000000:
   1197 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1198 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
   1199 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
   1200 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
   1201 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
   1202 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
   1203 		break;
   1204 	case 0x80000001:
   1205 		cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax;
   1206 		cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
   1207 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
   1208 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
   1209 		break;
   1210 	default:
   1211 		break;
   1212 	}
   1213 }
   1214 
   1215 static void
   1216 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1217     struct nvmm_exit *exit)
   1218 {
   1219 	struct vmx_machdata *machdata = mach->machdata;
   1220 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1221 	struct nvmm_mach_conf_x86_cpuid *cpuid;
   1222 	uint64_t eax, ecx;
   1223 	u_int descs[4];
   1224 	size_t i;
   1225 
   1226 	eax = cpudata->gprs[NVMM_X64_GPR_RAX];
   1227 	ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
   1228 	x86_cpuid2(eax, ecx, descs);
   1229 
   1230 	cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0];
   1231 	cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
   1232 	cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
   1233 	cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
   1234 
   1235 	vmx_inkernel_handle_cpuid(vcpu, eax, ecx);
   1236 
   1237 	for (i = 0; i < VMX_NCPUIDS; i++) {
   1238 		cpuid = &machdata->cpuid[i];
   1239 		if (!machdata->cpuidpresent[i]) {
   1240 			continue;
   1241 		}
   1242 		if (cpuid->leaf != eax) {
   1243 			continue;
   1244 		}
   1245 
   1246 		/* del */
   1247 		cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->del.eax;
   1248 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->del.ebx;
   1249 		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->del.ecx;
   1250 		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->del.edx;
   1251 
   1252 		/* set */
   1253 		cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->set.eax;
   1254 		cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->set.ebx;
   1255 		cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->set.ecx;
   1256 		cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->set.edx;
   1257 
   1258 		break;
   1259 	}
   1260 
   1261 	vmx_inkernel_advance();
   1262 	exit->reason = NVMM_EXIT_NONE;
   1263 }
   1264 
   1265 static void
   1266 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1267     struct nvmm_exit *exit)
   1268 {
   1269 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1270 	uint64_t rflags;
   1271 
   1272 	if (cpudata->int_window_exit) {
   1273 		rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
   1274 		if (rflags & PSL_I) {
   1275 			vmx_event_waitexit_disable(vcpu, false);
   1276 		}
   1277 	}
   1278 
   1279 	vmx_inkernel_advance();
   1280 	exit->reason = NVMM_EXIT_HALTED;
   1281 }
   1282 
   1283 #define VMX_QUAL_CR_NUM		__BITS(3,0)
   1284 #define VMX_QUAL_CR_TYPE	__BITS(5,4)
   1285 #define		CR_TYPE_WRITE	0
   1286 #define		CR_TYPE_READ	1
   1287 #define		CR_TYPE_CLTS	2
   1288 #define		CR_TYPE_LMSW	3
   1289 #define VMX_QUAL_CR_LMSW_OPMEM	__BIT(6)
   1290 #define VMX_QUAL_CR_GPR		__BITS(11,8)
   1291 #define VMX_QUAL_CR_LMSW_SRC	__BIT(31,16)
   1292 
   1293 static inline int
   1294 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1)
   1295 {
   1296 	/* Bits set to 1 in fixed0 are fixed to 1. */
   1297 	if ((crval & fixed0) != fixed0) {
   1298 		return -1;
   1299 	}
   1300 	/* Bits set to 0 in fixed1 are fixed to 0. */
   1301 	if (crval & ~fixed1) {
   1302 		return -1;
   1303 	}
   1304 	return 0;
   1305 }
   1306 
   1307 static int
   1308 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1309     uint64_t qual)
   1310 {
   1311 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1312 	uint64_t type, gpr, cr0;
   1313 	uint64_t efer, ctls1;
   1314 
   1315 	type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
   1316 	if (type != CR_TYPE_WRITE) {
   1317 		return -1;
   1318 	}
   1319 
   1320 	gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
   1321 	KASSERT(gpr < 16);
   1322 
   1323 	if (gpr == NVMM_X64_GPR_RSP) {
   1324 		gpr = vmx_vmread(VMCS_GUEST_RSP);
   1325 	} else {
   1326 		gpr = cpudata->gprs[gpr];
   1327 	}
   1328 
   1329 	cr0 = gpr | CR0_NE | CR0_ET;
   1330 	cr0 &= ~(CR0_NW|CR0_CD);
   1331 
   1332 	if (vmx_check_cr(cr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) {
   1333 		return -1;
   1334 	}
   1335 
   1336 	/*
   1337 	 * XXX Handle 32bit PAE paging, need to set PDPTEs, fetched manually
   1338 	 * from CR3.
   1339 	 */
   1340 
   1341 	if (cr0 & CR0_PG) {
   1342 		ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
   1343 		efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
   1344 		if (efer & EFER_LME) {
   1345 			ctls1 |= ENTRY_CTLS_LONG_MODE;
   1346 			efer |= EFER_LMA;
   1347 		} else {
   1348 			ctls1 &= ~ENTRY_CTLS_LONG_MODE;
   1349 			efer &= ~EFER_LMA;
   1350 		}
   1351 		vmx_vmwrite(VMCS_GUEST_IA32_EFER, efer);
   1352 		vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
   1353 	}
   1354 
   1355 	vmx_vmwrite(VMCS_GUEST_CR0, cr0);
   1356 	vmx_inkernel_advance();
   1357 	return 0;
   1358 }
   1359 
   1360 static int
   1361 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1362     uint64_t qual)
   1363 {
   1364 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1365 	uint64_t type, gpr, cr4;
   1366 
   1367 	type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
   1368 	if (type != CR_TYPE_WRITE) {
   1369 		return -1;
   1370 	}
   1371 
   1372 	gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
   1373 	KASSERT(gpr < 16);
   1374 
   1375 	if (gpr == NVMM_X64_GPR_RSP) {
   1376 		gpr = vmx_vmread(VMCS_GUEST_RSP);
   1377 	} else {
   1378 		gpr = cpudata->gprs[gpr];
   1379 	}
   1380 
   1381 	cr4 = gpr | CR4_VMXE;
   1382 
   1383 	if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) {
   1384 		return -1;
   1385 	}
   1386 
   1387 	vmx_vmwrite(VMCS_GUEST_CR4, cr4);
   1388 	vmx_inkernel_advance();
   1389 	return 0;
   1390 }
   1391 
   1392 static int
   1393 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1394     uint64_t qual)
   1395 {
   1396 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1397 	uint64_t type, gpr;
   1398 	bool write;
   1399 
   1400 	type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
   1401 	if (type == CR_TYPE_WRITE) {
   1402 		write = true;
   1403 	} else if (type == CR_TYPE_READ) {
   1404 		write = false;
   1405 	} else {
   1406 		return -1;
   1407 	}
   1408 
   1409 	gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
   1410 	KASSERT(gpr < 16);
   1411 
   1412 	if (write) {
   1413 		if (gpr == NVMM_X64_GPR_RSP) {
   1414 			cpudata->gcr8 = vmx_vmread(VMCS_GUEST_RSP);
   1415 		} else {
   1416 			cpudata->gcr8 = cpudata->gprs[gpr];
   1417 		}
   1418 	} else {
   1419 		if (gpr == NVMM_X64_GPR_RSP) {
   1420 			vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8);
   1421 		} else {
   1422 			cpudata->gprs[gpr] = cpudata->gcr8;
   1423 		}
   1424 	}
   1425 
   1426 	vmx_inkernel_advance();
   1427 	return 0;
   1428 }
   1429 
   1430 static void
   1431 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1432     struct nvmm_exit *exit)
   1433 {
   1434 	uint64_t qual;
   1435 	int ret;
   1436 
   1437 	qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
   1438 
   1439 	switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) {
   1440 	case 0:
   1441 		ret = vmx_inkernel_handle_cr0(mach, vcpu, qual);
   1442 		break;
   1443 	case 4:
   1444 		ret = vmx_inkernel_handle_cr4(mach, vcpu, qual);
   1445 		break;
   1446 	case 8:
   1447 		ret = vmx_inkernel_handle_cr8(mach, vcpu, qual);
   1448 		break;
   1449 	default:
   1450 		ret = -1;
   1451 		break;
   1452 	}
   1453 
   1454 	if (ret == -1) {
   1455 		vmx_inject_gp(vcpu);
   1456 	}
   1457 
   1458 	exit->reason = NVMM_EXIT_NONE;
   1459 }
   1460 
   1461 #define VMX_QUAL_IO_SIZE	__BITS(2,0)
   1462 #define		IO_SIZE_8	0
   1463 #define		IO_SIZE_16	1
   1464 #define		IO_SIZE_32	3
   1465 #define VMX_QUAL_IO_IN		__BIT(3)
   1466 #define VMX_QUAL_IO_STR		__BIT(4)
   1467 #define VMX_QUAL_IO_REP		__BIT(5)
   1468 #define VMX_QUAL_IO_DX		__BIT(6)
   1469 #define VMX_QUAL_IO_PORT	__BITS(31,16)
   1470 
   1471 #define VMX_INFO_IO_ADRSIZE	__BITS(9,7)
   1472 #define		IO_ADRSIZE_16	0
   1473 #define		IO_ADRSIZE_32	1
   1474 #define		IO_ADRSIZE_64	2
   1475 #define VMX_INFO_IO_SEG		__BITS(17,15)
   1476 
   1477 static void
   1478 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1479     struct nvmm_exit *exit)
   1480 {
   1481 	uint64_t qual, info, inslen, rip;
   1482 
   1483 	qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
   1484 	info = vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO);
   1485 
   1486 	exit->reason = NVMM_EXIT_IO;
   1487 
   1488 	if (qual & VMX_QUAL_IO_IN) {
   1489 		exit->u.io.type = NVMM_EXIT_IO_IN;
   1490 	} else {
   1491 		exit->u.io.type = NVMM_EXIT_IO_OUT;
   1492 	}
   1493 
   1494 	exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
   1495 
   1496 	KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
   1497 	exit->u.io.seg = __SHIFTOUT(info, VMX_INFO_IO_SEG);
   1498 
   1499 	if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) {
   1500 		exit->u.io.address_size = 8;
   1501 	} else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) {
   1502 		exit->u.io.address_size = 4;
   1503 	} else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) {
   1504 		exit->u.io.address_size = 2;
   1505 	}
   1506 
   1507 	if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) {
   1508 		exit->u.io.operand_size = 4;
   1509 	} else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) {
   1510 		exit->u.io.operand_size = 2;
   1511 	} else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) {
   1512 		exit->u.io.operand_size = 1;
   1513 	}
   1514 
   1515 	exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
   1516 	exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
   1517 
   1518 	if ((exit->u.io.type == NVMM_EXIT_IO_IN) && exit->u.io.str) {
   1519 		exit->u.io.seg = NVMM_X64_SEG_ES;
   1520 	}
   1521 
   1522 	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
   1523 	rip = vmx_vmread(VMCS_GUEST_RIP);
   1524 	exit->u.io.npc = rip + inslen;
   1525 
   1526 	vmx_vcpu_state_provide(vcpu,
   1527 	    NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
   1528 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
   1529 }
   1530 
   1531 static const uint64_t msr_ignore_list[] = {
   1532 	MSR_BIOS_SIGN,
   1533 	MSR_IA32_PLATFORM_ID
   1534 };
   1535 
   1536 static bool
   1537 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1538     struct nvmm_exit *exit)
   1539 {
   1540 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1541 	uint64_t val;
   1542 	size_t i;
   1543 
   1544 	switch (exit->u.msr.type) {
   1545 	case NVMM_EXIT_MSR_RDMSR:
   1546 		if (exit->u.msr.msr == MSR_CR_PAT) {
   1547 			val = vmx_vmread(VMCS_GUEST_IA32_PAT);
   1548 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
   1549 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1550 			goto handled;
   1551 		}
   1552 		if (exit->u.msr.msr == MSR_MISC_ENABLE) {
   1553 			val = cpudata->gmsr_misc_enable;
   1554 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
   1555 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1556 			goto handled;
   1557 		}
   1558 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
   1559 			if (msr_ignore_list[i] != exit->u.msr.msr)
   1560 				continue;
   1561 			val = 0;
   1562 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
   1563 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1564 			goto handled;
   1565 		}
   1566 		break;
   1567 	case NVMM_EXIT_MSR_WRMSR:
   1568 		if (exit->u.msr.msr == MSR_TSC) {
   1569 			cpudata->gtsc = exit->u.msr.val;
   1570 			cpudata->gtsc_want_update = true;
   1571 			goto handled;
   1572 		}
   1573 		if (exit->u.msr.msr == MSR_CR_PAT) {
   1574 			val = exit->u.msr.val;
   1575 			if (__predict_false(!nvmm_x86_pat_validate(val))) {
   1576 				goto error;
   1577 			}
   1578 			vmx_vmwrite(VMCS_GUEST_IA32_PAT, val);
   1579 			goto handled;
   1580 		}
   1581 		if (exit->u.msr.msr == MSR_MISC_ENABLE) {
   1582 			/* Don't care. */
   1583 			goto handled;
   1584 		}
   1585 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
   1586 			if (msr_ignore_list[i] != exit->u.msr.msr)
   1587 				continue;
   1588 			goto handled;
   1589 		}
   1590 		break;
   1591 	}
   1592 
   1593 	return false;
   1594 
   1595 handled:
   1596 	vmx_inkernel_advance();
   1597 	return true;
   1598 
   1599 error:
   1600 	vmx_inject_gp(vcpu);
   1601 	return true;
   1602 }
   1603 
   1604 static void
   1605 vmx_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1606     struct nvmm_exit *exit, bool rdmsr)
   1607 {
   1608 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1609 	uint64_t inslen, rip;
   1610 
   1611 	if (rdmsr) {
   1612 		exit->u.msr.type = NVMM_EXIT_MSR_RDMSR;
   1613 	} else {
   1614 		exit->u.msr.type = NVMM_EXIT_MSR_WRMSR;
   1615 	}
   1616 
   1617 	exit->u.msr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
   1618 
   1619 	if (rdmsr) {
   1620 		exit->u.msr.val = 0;
   1621 	} else {
   1622 		uint64_t rdx, rax;
   1623 		rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
   1624 		rax = cpudata->gprs[NVMM_X64_GPR_RAX];
   1625 		exit->u.msr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
   1626 	}
   1627 
   1628 	if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
   1629 		exit->reason = NVMM_EXIT_NONE;
   1630 		return;
   1631 	}
   1632 
   1633 	exit->reason = NVMM_EXIT_MSR;
   1634 	inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
   1635 	rip = vmx_vmread(VMCS_GUEST_RIP);
   1636 	exit->u.msr.npc = rip + inslen;
   1637 
   1638 	vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
   1639 }
   1640 
   1641 static void
   1642 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1643     struct nvmm_exit *exit)
   1644 {
   1645 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1646 	uint16_t val;
   1647 
   1648 	exit->reason = NVMM_EXIT_NONE;
   1649 
   1650 	val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
   1651 	    (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
   1652 
   1653 	if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
   1654 		goto error;
   1655 	} else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
   1656 		goto error;
   1657 	} else if (__predict_false((val & XCR0_X87) == 0)) {
   1658 		goto error;
   1659 	}
   1660 
   1661 	cpudata->gxcr0 = val;
   1662 
   1663 	vmx_inkernel_advance();
   1664 	return;
   1665 
   1666 error:
   1667 	vmx_inject_gp(vcpu);
   1668 }
   1669 
   1670 #define VMX_EPT_VIOLATION_READ		__BIT(0)
   1671 #define VMX_EPT_VIOLATION_WRITE		__BIT(1)
   1672 #define VMX_EPT_VIOLATION_EXECUTE	__BIT(2)
   1673 
   1674 static void
   1675 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1676     struct nvmm_exit *exit)
   1677 {
   1678 	uint64_t perm;
   1679 	gpaddr_t gpa;
   1680 
   1681 	gpa = vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS);
   1682 
   1683 	exit->reason = NVMM_EXIT_MEMORY;
   1684 	perm = vmx_vmread(VMCS_EXIT_QUALIFICATION);
   1685 	if (perm & VMX_EPT_VIOLATION_WRITE)
   1686 		exit->u.mem.prot = PROT_WRITE;
   1687 	else if (perm & VMX_EPT_VIOLATION_EXECUTE)
   1688 		exit->u.mem.prot = PROT_EXEC;
   1689 	else
   1690 		exit->u.mem.prot = PROT_READ;
   1691 	exit->u.mem.gpa = gpa;
   1692 	exit->u.mem.inst_len = 0;
   1693 
   1694 	vmx_vcpu_state_provide(vcpu,
   1695 	    NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
   1696 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
   1697 }
   1698 
   1699 /* -------------------------------------------------------------------------- */
   1700 
   1701 static void
   1702 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
   1703 {
   1704 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1705 
   1706 	cpudata->ts_set = (rcr0() & CR0_TS) != 0;
   1707 
   1708 	fpu_area_save(&cpudata->hfpu, vmx_xcr0_mask);
   1709 	fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask);
   1710 
   1711 	if (vmx_xcr0_mask != 0) {
   1712 		cpudata->hxcr0 = rdxcr(0);
   1713 		wrxcr(0, cpudata->gxcr0);
   1714 	}
   1715 }
   1716 
   1717 static void
   1718 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
   1719 {
   1720 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1721 
   1722 	if (vmx_xcr0_mask != 0) {
   1723 		cpudata->gxcr0 = rdxcr(0);
   1724 		wrxcr(0, cpudata->hxcr0);
   1725 	}
   1726 
   1727 	fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask);
   1728 	fpu_area_restore(&cpudata->hfpu, vmx_xcr0_mask);
   1729 
   1730 	if (cpudata->ts_set) {
   1731 		stts();
   1732 	}
   1733 }
   1734 
   1735 static void
   1736 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
   1737 {
   1738 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1739 
   1740 	x86_dbregs_save(curlwp);
   1741 
   1742 	ldr7(0);
   1743 
   1744 	ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
   1745 	ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
   1746 	ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
   1747 	ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
   1748 	ldr6(cpudata->drs[NVMM_X64_DR_DR6]);
   1749 }
   1750 
   1751 static void
   1752 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
   1753 {
   1754 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1755 
   1756 	cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
   1757 	cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
   1758 	cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
   1759 	cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
   1760 	cpudata->drs[NVMM_X64_DR_DR6] = rdr6();
   1761 
   1762 	x86_dbregs_restore(curlwp);
   1763 }
   1764 
   1765 static void
   1766 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
   1767 {
   1768 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1769 
   1770 	/* This gets restored automatically by the CPU. */
   1771 	vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE));
   1772 	vmx_vmwrite(VMCS_HOST_CR3, rcr3());
   1773 	vmx_vmwrite(VMCS_HOST_CR4, rcr4());
   1774 
   1775 	cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
   1776 }
   1777 
   1778 static void
   1779 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
   1780 {
   1781 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1782 
   1783 	wrmsr(MSR_STAR, cpudata->star);
   1784 	wrmsr(MSR_LSTAR, cpudata->lstar);
   1785 	wrmsr(MSR_CSTAR, cpudata->cstar);
   1786 	wrmsr(MSR_SFMASK, cpudata->sfmask);
   1787 	wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
   1788 }
   1789 
   1790 /* -------------------------------------------------------------------------- */
   1791 
   1792 #define VMX_INVVPID_ADDRESS		0
   1793 #define VMX_INVVPID_CONTEXT		1
   1794 #define VMX_INVVPID_ALL			2
   1795 #define VMX_INVVPID_CONTEXT_NOGLOBAL	3
   1796 
   1797 #define VMX_INVEPT_CONTEXT		1
   1798 #define VMX_INVEPT_ALL			2
   1799 
   1800 static inline void
   1801 vmx_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1802 {
   1803 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1804 
   1805 	if (vcpu->hcpu_last != hcpu) {
   1806 		cpudata->gtlb_want_flush = true;
   1807 	}
   1808 }
   1809 
   1810 static inline void
   1811 vmx_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1812 {
   1813 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1814 	struct ept_desc ept_desc;
   1815 
   1816 	if (__predict_true(!kcpuset_isset(cpudata->htlb_want_flush, hcpu))) {
   1817 		return;
   1818 	}
   1819 
   1820 	ept_desc.eptp = vmx_vmread(VMCS_EPTP);
   1821 	ept_desc.mbz = 0;
   1822 	vmx_invept(vmx_ept_flush_op, &ept_desc);
   1823 	kcpuset_clear(cpudata->htlb_want_flush, hcpu);
   1824 }
   1825 
   1826 static inline uint64_t
   1827 vmx_htlb_flush(struct vmx_machdata *machdata, struct vmx_cpudata *cpudata)
   1828 {
   1829 	struct ept_desc ept_desc;
   1830 	uint64_t machgen;
   1831 
   1832 	machgen = machdata->mach_htlb_gen;
   1833 	if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
   1834 		return machgen;
   1835 	}
   1836 
   1837 	kcpuset_copy(cpudata->htlb_want_flush, kcpuset_running);
   1838 
   1839 	ept_desc.eptp = vmx_vmread(VMCS_EPTP);
   1840 	ept_desc.mbz = 0;
   1841 	vmx_invept(vmx_ept_flush_op, &ept_desc);
   1842 
   1843 	return machgen;
   1844 }
   1845 
   1846 static inline void
   1847 vmx_htlb_flush_ack(struct vmx_cpudata *cpudata, uint64_t machgen)
   1848 {
   1849 	cpudata->vcpu_htlb_gen = machgen;
   1850 	kcpuset_clear(cpudata->htlb_want_flush, cpu_number());
   1851 }
   1852 
   1853 static inline void
   1854 vmx_exit_evt(struct vmx_cpudata *cpudata)
   1855 {
   1856 	uint64_t info, err;
   1857 
   1858 	cpudata->evt_pending = false;
   1859 
   1860 	info = vmx_vmread(VMCS_IDT_VECTORING_INFO);
   1861 	if (__predict_true((info & INTR_INFO_VALID) == 0)) {
   1862 		return;
   1863 	}
   1864 	err = vmx_vmread(VMCS_IDT_VECTORING_ERROR);
   1865 
   1866 	vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
   1867 	vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, err);
   1868 
   1869 	cpudata->evt_pending = true;
   1870 }
   1871 
   1872 static int
   1873 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1874     struct nvmm_exit *exit)
   1875 {
   1876 	struct nvmm_comm_page *comm = vcpu->comm;
   1877 	struct vmx_machdata *machdata = mach->machdata;
   1878 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1879 	struct vpid_desc vpid_desc;
   1880 	struct cpu_info *ci;
   1881 	uint64_t exitcode;
   1882 	uint64_t intstate;
   1883 	uint64_t machgen;
   1884 	int hcpu, s, ret;
   1885 	bool launched;
   1886 
   1887 	vmx_vmcs_enter(vcpu);
   1888 
   1889 	if (__predict_false(vmx_vcpu_event_commit(vcpu) != 0)) {
   1890 		vmx_vmcs_leave(vcpu);
   1891 		return EINVAL;
   1892 	}
   1893 	vmx_vcpu_state_commit(vcpu);
   1894 	comm->state_cached = 0;
   1895 
   1896 	ci = curcpu();
   1897 	hcpu = cpu_number();
   1898 	launched = cpudata->vmcs_launched;
   1899 
   1900 	vmx_gtlb_catchup(vcpu, hcpu);
   1901 	vmx_htlb_catchup(vcpu, hcpu);
   1902 
   1903 	if (vcpu->hcpu_last != hcpu) {
   1904 		vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel);
   1905 		vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss);
   1906 		vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt);
   1907 		vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
   1908 		cpudata->gtsc_want_update = true;
   1909 		vcpu->hcpu_last = hcpu;
   1910 	}
   1911 
   1912 	vmx_vcpu_guest_dbregs_enter(vcpu);
   1913 	vmx_vcpu_guest_misc_enter(vcpu);
   1914 
   1915 	while (1) {
   1916 		if (cpudata->gtlb_want_flush) {
   1917 			vpid_desc.vpid = cpudata->asid;
   1918 			vpid_desc.addr = 0;
   1919 			vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
   1920 			cpudata->gtlb_want_flush = false;
   1921 		}
   1922 
   1923 		if (__predict_false(cpudata->gtsc_want_update)) {
   1924 			vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc - rdtsc());
   1925 			cpudata->gtsc_want_update = false;
   1926 		}
   1927 
   1928 		s = splhigh();
   1929 		machgen = vmx_htlb_flush(machdata, cpudata);
   1930 		vmx_vcpu_guest_fpu_enter(vcpu);
   1931 		lcr2(cpudata->gcr2);
   1932 		if (launched) {
   1933 			ret = vmx_vmresume(cpudata->gprs);
   1934 		} else {
   1935 			ret = vmx_vmlaunch(cpudata->gprs);
   1936 		}
   1937 		cpudata->gcr2 = rcr2();
   1938 		vmx_vcpu_guest_fpu_leave(vcpu);
   1939 		vmx_htlb_flush_ack(cpudata, machgen);
   1940 		splx(s);
   1941 
   1942 		if (__predict_false(ret != 0)) {
   1943 			vmx_exit_invalid(exit, -1);
   1944 			break;
   1945 		}
   1946 		vmx_exit_evt(cpudata);
   1947 
   1948 		launched = true;
   1949 
   1950 		exitcode = vmx_vmread(VMCS_EXIT_REASON);
   1951 		exitcode &= __BITS(15,0);
   1952 
   1953 		switch (exitcode) {
   1954 		case VMCS_EXITCODE_EXC_NMI:
   1955 			vmx_exit_exc_nmi(mach, vcpu, exit);
   1956 			break;
   1957 		case VMCS_EXITCODE_EXT_INT:
   1958 			exit->reason = NVMM_EXIT_NONE;
   1959 			break;
   1960 		case VMCS_EXITCODE_CPUID:
   1961 			vmx_exit_cpuid(mach, vcpu, exit);
   1962 			break;
   1963 		case VMCS_EXITCODE_HLT:
   1964 			vmx_exit_hlt(mach, vcpu, exit);
   1965 			break;
   1966 		case VMCS_EXITCODE_CR:
   1967 			vmx_exit_cr(mach, vcpu, exit);
   1968 			break;
   1969 		case VMCS_EXITCODE_IO:
   1970 			vmx_exit_io(mach, vcpu, exit);
   1971 			break;
   1972 		case VMCS_EXITCODE_RDMSR:
   1973 			vmx_exit_msr(mach, vcpu, exit, true);
   1974 			break;
   1975 		case VMCS_EXITCODE_WRMSR:
   1976 			vmx_exit_msr(mach, vcpu, exit, false);
   1977 			break;
   1978 		case VMCS_EXITCODE_SHUTDOWN:
   1979 			exit->reason = NVMM_EXIT_SHUTDOWN;
   1980 			break;
   1981 		case VMCS_EXITCODE_MONITOR:
   1982 			exit->reason = NVMM_EXIT_MONITOR;
   1983 			break;
   1984 		case VMCS_EXITCODE_MWAIT:
   1985 			exit->reason = NVMM_EXIT_MWAIT;
   1986 			break;
   1987 		case VMCS_EXITCODE_XSETBV:
   1988 			vmx_exit_xsetbv(mach, vcpu, exit);
   1989 			break;
   1990 		case VMCS_EXITCODE_RDPMC:
   1991 		case VMCS_EXITCODE_RDTSCP:
   1992 		case VMCS_EXITCODE_INVVPID:
   1993 		case VMCS_EXITCODE_INVEPT:
   1994 		case VMCS_EXITCODE_VMCALL:
   1995 		case VMCS_EXITCODE_VMCLEAR:
   1996 		case VMCS_EXITCODE_VMLAUNCH:
   1997 		case VMCS_EXITCODE_VMPTRLD:
   1998 		case VMCS_EXITCODE_VMPTRST:
   1999 		case VMCS_EXITCODE_VMREAD:
   2000 		case VMCS_EXITCODE_VMRESUME:
   2001 		case VMCS_EXITCODE_VMWRITE:
   2002 		case VMCS_EXITCODE_VMXOFF:
   2003 		case VMCS_EXITCODE_VMXON:
   2004 			vmx_inject_ud(vcpu);
   2005 			exit->reason = NVMM_EXIT_NONE;
   2006 			break;
   2007 		case VMCS_EXITCODE_EPT_VIOLATION:
   2008 			vmx_exit_epf(mach, vcpu, exit);
   2009 			break;
   2010 		case VMCS_EXITCODE_INT_WINDOW:
   2011 			vmx_event_waitexit_disable(vcpu, false);
   2012 			exit->reason = NVMM_EXIT_INT_READY;
   2013 			break;
   2014 		case VMCS_EXITCODE_NMI_WINDOW:
   2015 			vmx_event_waitexit_disable(vcpu, true);
   2016 			exit->reason = NVMM_EXIT_NMI_READY;
   2017 			break;
   2018 		default:
   2019 			vmx_exit_invalid(exit, exitcode);
   2020 			break;
   2021 		}
   2022 
   2023 		/* If no reason to return to userland, keep rolling. */
   2024 		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
   2025 			break;
   2026 		}
   2027 		if (curcpu()->ci_data.cpu_softints != 0) {
   2028 			break;
   2029 		}
   2030 		if (curlwp->l_flag & LW_USERRET) {
   2031 			break;
   2032 		}
   2033 		if (exit->reason != NVMM_EXIT_NONE) {
   2034 			break;
   2035 		}
   2036 	}
   2037 
   2038 	cpudata->vmcs_launched = launched;
   2039 
   2040 	cpudata->gtsc = vmx_vmread(VMCS_TSC_OFFSET) + rdtsc();
   2041 
   2042 	vmx_vcpu_guest_misc_leave(vcpu);
   2043 	vmx_vcpu_guest_dbregs_leave(vcpu);
   2044 
   2045 	exit->exitstate[NVMM_X64_EXITSTATE_CR8] = cpudata->gcr8;
   2046 	exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS] =
   2047 	    vmx_vmread(VMCS_GUEST_RFLAGS);
   2048 	intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
   2049 	exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] =
   2050 	    (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
   2051 	exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] =
   2052 	    cpudata->int_window_exit;
   2053 	exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT] =
   2054 	    cpudata->nmi_window_exit;
   2055 	exit->exitstate[NVMM_X64_EXITSTATE_EVT_PENDING] =
   2056 	    cpudata->evt_pending;
   2057 
   2058 	vmx_vmcs_leave(vcpu);
   2059 
   2060 	return 0;
   2061 }
   2062 
   2063 /* -------------------------------------------------------------------------- */
   2064 
   2065 static int
   2066 vmx_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
   2067 {
   2068 	struct pglist pglist;
   2069 	paddr_t _pa;
   2070 	vaddr_t _va;
   2071 	size_t i;
   2072 	int ret;
   2073 
   2074 	ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
   2075 	    &pglist, 1, 0);
   2076 	if (ret != 0)
   2077 		return ENOMEM;
   2078 	_pa = TAILQ_FIRST(&pglist)->phys_addr;
   2079 	_va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
   2080 	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
   2081 	if (_va == 0)
   2082 		goto error;
   2083 
   2084 	for (i = 0; i < npages; i++) {
   2085 		pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
   2086 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
   2087 	}
   2088 	pmap_update(pmap_kernel());
   2089 
   2090 	memset((void *)_va, 0, npages * PAGE_SIZE);
   2091 
   2092 	*pa = _pa;
   2093 	*va = _va;
   2094 	return 0;
   2095 
   2096 error:
   2097 	for (i = 0; i < npages; i++) {
   2098 		uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
   2099 	}
   2100 	return ENOMEM;
   2101 }
   2102 
   2103 static void
   2104 vmx_memfree(paddr_t pa, vaddr_t va, size_t npages)
   2105 {
   2106 	size_t i;
   2107 
   2108 	pmap_kremove(va, npages * PAGE_SIZE);
   2109 	pmap_update(pmap_kernel());
   2110 	uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
   2111 	for (i = 0; i < npages; i++) {
   2112 		uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
   2113 	}
   2114 }
   2115 
   2116 /* -------------------------------------------------------------------------- */
   2117 
   2118 static void
   2119 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
   2120 {
   2121 	uint64_t byte;
   2122 	uint8_t bitoff;
   2123 
   2124 	if (msr < 0x00002000) {
   2125 		/* Range 1 */
   2126 		byte = ((msr - 0x00000000) / 8) + 0;
   2127 	} else if (msr >= 0xC0000000 && msr < 0xC0002000) {
   2128 		/* Range 2 */
   2129 		byte = ((msr - 0xC0000000) / 8) + 1024;
   2130 	} else {
   2131 		panic("%s: wrong range", __func__);
   2132 	}
   2133 
   2134 	bitoff = (msr & 0x7);
   2135 
   2136 	if (read) {
   2137 		bitmap[byte] &= ~__BIT(bitoff);
   2138 	}
   2139 	if (write) {
   2140 		bitmap[2048 + byte] &= ~__BIT(bitoff);
   2141 	}
   2142 }
   2143 
   2144 #define VMX_SEG_ATTRIB_TYPE		__BITS(3,0)
   2145 #define VMX_SEG_ATTRIB_S		__BIT(4)
   2146 #define VMX_SEG_ATTRIB_DPL		__BITS(6,5)
   2147 #define VMX_SEG_ATTRIB_P		__BIT(7)
   2148 #define VMX_SEG_ATTRIB_AVL		__BIT(12)
   2149 #define VMX_SEG_ATTRIB_L		__BIT(13)
   2150 #define VMX_SEG_ATTRIB_DEF		__BIT(14)
   2151 #define VMX_SEG_ATTRIB_G		__BIT(15)
   2152 #define VMX_SEG_ATTRIB_UNUSABLE		__BIT(16)
   2153 
   2154 static void
   2155 vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg *segs, int idx)
   2156 {
   2157 	uint64_t attrib;
   2158 
   2159 	attrib =
   2160 	    __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) |
   2161 	    __SHIFTIN(segs[idx].attrib.s, VMX_SEG_ATTRIB_S) |
   2162 	    __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) |
   2163 	    __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) |
   2164 	    __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) |
   2165 	    __SHIFTIN(segs[idx].attrib.l, VMX_SEG_ATTRIB_L) |
   2166 	    __SHIFTIN(segs[idx].attrib.def, VMX_SEG_ATTRIB_DEF) |
   2167 	    __SHIFTIN(segs[idx].attrib.g, VMX_SEG_ATTRIB_G) |
   2168 	    (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0);
   2169 
   2170 	if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
   2171 		vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector);
   2172 		vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib);
   2173 	}
   2174 	vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit);
   2175 	vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base);
   2176 }
   2177 
   2178 static void
   2179 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx)
   2180 {
   2181 	uint64_t selector = 0, attrib = 0, base, limit;
   2182 
   2183 	if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
   2184 		selector = vmx_vmread(vmx_guest_segs[idx].selector);
   2185 		attrib = vmx_vmread(vmx_guest_segs[idx].attrib);
   2186 	}
   2187 	limit = vmx_vmread(vmx_guest_segs[idx].limit);
   2188 	base = vmx_vmread(vmx_guest_segs[idx].base);
   2189 
   2190 	segs[idx].selector = selector;
   2191 	segs[idx].limit = limit;
   2192 	segs[idx].base = base;
   2193 	segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE);
   2194 	segs[idx].attrib.s = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_S);
   2195 	segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL);
   2196 	segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P);
   2197 	segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL);
   2198 	segs[idx].attrib.l = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_L);
   2199 	segs[idx].attrib.def = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF);
   2200 	segs[idx].attrib.g = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_G);
   2201 	if (attrib & VMX_SEG_ATTRIB_UNUSABLE) {
   2202 		segs[idx].attrib.p = 0;
   2203 	}
   2204 }
   2205 
   2206 static inline bool
   2207 vmx_state_tlb_flush(const struct nvmm_x64_state *state, uint64_t flags)
   2208 {
   2209 	uint64_t cr0, cr3, cr4, efer;
   2210 
   2211 	if (flags & NVMM_X64_STATE_CRS) {
   2212 		cr0 = vmx_vmread(VMCS_GUEST_CR0);
   2213 		if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
   2214 			return true;
   2215 		}
   2216 		cr3 = vmx_vmread(VMCS_GUEST_CR3);
   2217 		if (cr3 != state->crs[NVMM_X64_CR_CR3]) {
   2218 			return true;
   2219 		}
   2220 		cr4 = vmx_vmread(VMCS_GUEST_CR4);
   2221 		if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
   2222 			return true;
   2223 		}
   2224 	}
   2225 
   2226 	if (flags & NVMM_X64_STATE_MSRS) {
   2227 		efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
   2228 		if ((efer ^
   2229 		     state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
   2230 			return true;
   2231 		}
   2232 	}
   2233 
   2234 	return false;
   2235 }
   2236 
   2237 static void
   2238 vmx_vcpu_setstate(struct nvmm_cpu *vcpu)
   2239 {
   2240 	struct nvmm_comm_page *comm = vcpu->comm;
   2241 	const struct nvmm_x64_state *state = &comm->state;
   2242 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2243 	struct fxsave *fpustate;
   2244 	uint64_t ctls1, intstate;
   2245 	uint64_t flags;
   2246 
   2247 	flags = comm->state_wanted;
   2248 
   2249 	vmx_vmcs_enter(vcpu);
   2250 
   2251 	if (vmx_state_tlb_flush(state, flags)) {
   2252 		cpudata->gtlb_want_flush = true;
   2253 	}
   2254 
   2255 	if (flags & NVMM_X64_STATE_SEGS) {
   2256 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS);
   2257 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS);
   2258 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES);
   2259 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS);
   2260 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS);
   2261 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS);
   2262 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT);
   2263 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT);
   2264 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT);
   2265 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR);
   2266 	}
   2267 
   2268 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   2269 	if (flags & NVMM_X64_STATE_GPRS) {
   2270 		memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
   2271 
   2272 		vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]);
   2273 		vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]);
   2274 		vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]);
   2275 	}
   2276 
   2277 	if (flags & NVMM_X64_STATE_CRS) {
   2278 		/*
   2279 		 * CR0_NE and CR4_VMXE are mandatory.
   2280 		 */
   2281 		vmx_vmwrite(VMCS_GUEST_CR0,
   2282 		    state->crs[NVMM_X64_CR_CR0] | CR0_NE);
   2283 		cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2];
   2284 		vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]); // XXX PDPTE?
   2285 		vmx_vmwrite(VMCS_GUEST_CR4,
   2286 		    state->crs[NVMM_X64_CR_CR4] | CR4_VMXE);
   2287 		cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8];
   2288 
   2289 		if (vmx_xcr0_mask != 0) {
   2290 			/* Clear illegal XCR0 bits, set mandatory X87 bit. */
   2291 			cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
   2292 			cpudata->gxcr0 &= vmx_xcr0_mask;
   2293 			cpudata->gxcr0 |= XCR0_X87;
   2294 		}
   2295 	}
   2296 
   2297 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   2298 	if (flags & NVMM_X64_STATE_DRS) {
   2299 		memcpy(cpudata->drs, state->drs, sizeof(state->drs));
   2300 
   2301 		cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF;
   2302 		vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]);
   2303 	}
   2304 
   2305 	if (flags & NVMM_X64_STATE_MSRS) {
   2306 		cpudata->gmsr[VMX_MSRLIST_STAR].val =
   2307 		    state->msrs[NVMM_X64_MSR_STAR];
   2308 		cpudata->gmsr[VMX_MSRLIST_LSTAR].val =
   2309 		    state->msrs[NVMM_X64_MSR_LSTAR];
   2310 		cpudata->gmsr[VMX_MSRLIST_CSTAR].val =
   2311 		    state->msrs[NVMM_X64_MSR_CSTAR];
   2312 		cpudata->gmsr[VMX_MSRLIST_SFMASK].val =
   2313 		    state->msrs[NVMM_X64_MSR_SFMASK];
   2314 		cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val =
   2315 		    state->msrs[NVMM_X64_MSR_KERNELGSBASE];
   2316 
   2317 		vmx_vmwrite(VMCS_GUEST_IA32_EFER,
   2318 		    state->msrs[NVMM_X64_MSR_EFER]);
   2319 		vmx_vmwrite(VMCS_GUEST_IA32_PAT,
   2320 		    state->msrs[NVMM_X64_MSR_PAT]);
   2321 		vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS,
   2322 		    state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
   2323 		vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP,
   2324 		    state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
   2325 		vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP,
   2326 		    state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
   2327 
   2328 		cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC];
   2329 		cpudata->gtsc_want_update = true;
   2330 
   2331 		/* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */
   2332 		ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
   2333 		if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) {
   2334 			ctls1 |= ENTRY_CTLS_LONG_MODE;
   2335 		} else {
   2336 			ctls1 &= ~ENTRY_CTLS_LONG_MODE;
   2337 		}
   2338 		vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
   2339 	}
   2340 
   2341 	if (flags & NVMM_X64_STATE_INTR) {
   2342 		intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
   2343 		intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS);
   2344 		if (state->intr.int_shadow) {
   2345 			intstate |= INT_STATE_MOVSS;
   2346 		}
   2347 		vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate);
   2348 
   2349 		if (state->intr.int_window_exiting) {
   2350 			vmx_event_waitexit_enable(vcpu, false);
   2351 		} else {
   2352 			vmx_event_waitexit_disable(vcpu, false);
   2353 		}
   2354 
   2355 		if (state->intr.nmi_window_exiting) {
   2356 			vmx_event_waitexit_enable(vcpu, true);
   2357 		} else {
   2358 			vmx_event_waitexit_disable(vcpu, true);
   2359 		}
   2360 	}
   2361 
   2362 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   2363 	if (flags & NVMM_X64_STATE_FPU) {
   2364 		memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
   2365 		    sizeof(state->fpu));
   2366 
   2367 		fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
   2368 		fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
   2369 		fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
   2370 
   2371 		if (vmx_xcr0_mask != 0) {
   2372 			/* Reset XSTATE_BV, to force a reload. */
   2373 			cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
   2374 		}
   2375 	}
   2376 
   2377 	vmx_vmcs_leave(vcpu);
   2378 
   2379 	comm->state_wanted = 0;
   2380 	comm->state_cached |= flags;
   2381 }
   2382 
   2383 static void
   2384 vmx_vcpu_getstate(struct nvmm_cpu *vcpu)
   2385 {
   2386 	struct nvmm_comm_page *comm = vcpu->comm;
   2387 	struct nvmm_x64_state *state = &comm->state;
   2388 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2389 	uint64_t intstate, flags;
   2390 
   2391 	flags = comm->state_wanted;
   2392 
   2393 	vmx_vmcs_enter(vcpu);
   2394 
   2395 	if (flags & NVMM_X64_STATE_SEGS) {
   2396 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS);
   2397 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS);
   2398 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES);
   2399 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS);
   2400 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS);
   2401 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS);
   2402 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT);
   2403 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT);
   2404 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT);
   2405 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR);
   2406 	}
   2407 
   2408 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   2409 	if (flags & NVMM_X64_STATE_GPRS) {
   2410 		memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
   2411 
   2412 		state->gprs[NVMM_X64_GPR_RIP] = vmx_vmread(VMCS_GUEST_RIP);
   2413 		state->gprs[NVMM_X64_GPR_RSP] = vmx_vmread(VMCS_GUEST_RSP);
   2414 		state->gprs[NVMM_X64_GPR_RFLAGS] = vmx_vmread(VMCS_GUEST_RFLAGS);
   2415 	}
   2416 
   2417 	if (flags & NVMM_X64_STATE_CRS) {
   2418 		state->crs[NVMM_X64_CR_CR0] = vmx_vmread(VMCS_GUEST_CR0);
   2419 		state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2;
   2420 		state->crs[NVMM_X64_CR_CR3] = vmx_vmread(VMCS_GUEST_CR3);
   2421 		state->crs[NVMM_X64_CR_CR4] = vmx_vmread(VMCS_GUEST_CR4);
   2422 		state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8;
   2423 		state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
   2424 
   2425 		/* Hide VMXE. */
   2426 		state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE;
   2427 	}
   2428 
   2429 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   2430 	if (flags & NVMM_X64_STATE_DRS) {
   2431 		memcpy(state->drs, cpudata->drs, sizeof(state->drs));
   2432 
   2433 		state->drs[NVMM_X64_DR_DR7] = vmx_vmread(VMCS_GUEST_DR7);
   2434 	}
   2435 
   2436 	if (flags & NVMM_X64_STATE_MSRS) {
   2437 		state->msrs[NVMM_X64_MSR_STAR] =
   2438 		    cpudata->gmsr[VMX_MSRLIST_STAR].val;
   2439 		state->msrs[NVMM_X64_MSR_LSTAR] =
   2440 		    cpudata->gmsr[VMX_MSRLIST_LSTAR].val;
   2441 		state->msrs[NVMM_X64_MSR_CSTAR] =
   2442 		    cpudata->gmsr[VMX_MSRLIST_CSTAR].val;
   2443 		state->msrs[NVMM_X64_MSR_SFMASK] =
   2444 		    cpudata->gmsr[VMX_MSRLIST_SFMASK].val;
   2445 		state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
   2446 		    cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val;
   2447 		state->msrs[NVMM_X64_MSR_EFER] =
   2448 		    vmx_vmread(VMCS_GUEST_IA32_EFER);
   2449 		state->msrs[NVMM_X64_MSR_PAT] =
   2450 		    vmx_vmread(VMCS_GUEST_IA32_PAT);
   2451 		state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
   2452 		    vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS);
   2453 		state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
   2454 		    vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP);
   2455 		state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
   2456 		    vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP);
   2457 		state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc;
   2458 	}
   2459 
   2460 	if (flags & NVMM_X64_STATE_INTR) {
   2461 		intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
   2462 		state->intr.int_shadow =
   2463 		    (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
   2464 		state->intr.int_window_exiting = cpudata->int_window_exit;
   2465 		state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
   2466 		state->intr.evt_pending = cpudata->evt_pending;
   2467 	}
   2468 
   2469 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   2470 	if (flags & NVMM_X64_STATE_FPU) {
   2471 		memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
   2472 		    sizeof(state->fpu));
   2473 	}
   2474 
   2475 	vmx_vmcs_leave(vcpu);
   2476 
   2477 	comm->state_wanted = 0;
   2478 	comm->state_cached |= flags;
   2479 }
   2480 
   2481 static void
   2482 vmx_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
   2483 {
   2484 	vcpu->comm->state_wanted = flags;
   2485 	vmx_vcpu_getstate(vcpu);
   2486 }
   2487 
   2488 static void
   2489 vmx_vcpu_state_commit(struct nvmm_cpu *vcpu)
   2490 {
   2491 	vcpu->comm->state_wanted = vcpu->comm->state_commit;
   2492 	vcpu->comm->state_commit = 0;
   2493 	vmx_vcpu_setstate(vcpu);
   2494 }
   2495 
   2496 /* -------------------------------------------------------------------------- */
   2497 
   2498 static void
   2499 vmx_asid_alloc(struct nvmm_cpu *vcpu)
   2500 {
   2501 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2502 	size_t i, oct, bit;
   2503 
   2504 	mutex_enter(&vmx_asidlock);
   2505 
   2506 	for (i = 0; i < vmx_maxasid; i++) {
   2507 		oct = i / 8;
   2508 		bit = i % 8;
   2509 
   2510 		if (vmx_asidmap[oct] & __BIT(bit)) {
   2511 			continue;
   2512 		}
   2513 
   2514 		cpudata->asid = i;
   2515 
   2516 		vmx_asidmap[oct] |= __BIT(bit);
   2517 		vmx_vmwrite(VMCS_VPID, i);
   2518 		mutex_exit(&vmx_asidlock);
   2519 		return;
   2520 	}
   2521 
   2522 	mutex_exit(&vmx_asidlock);
   2523 
   2524 	panic("%s: impossible", __func__);
   2525 }
   2526 
   2527 static void
   2528 vmx_asid_free(struct nvmm_cpu *vcpu)
   2529 {
   2530 	size_t oct, bit;
   2531 	uint64_t asid;
   2532 
   2533 	asid = vmx_vmread(VMCS_VPID);
   2534 
   2535 	oct = asid / 8;
   2536 	bit = asid % 8;
   2537 
   2538 	mutex_enter(&vmx_asidlock);
   2539 	vmx_asidmap[oct] &= ~__BIT(bit);
   2540 	mutex_exit(&vmx_asidlock);
   2541 }
   2542 
   2543 static void
   2544 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2545 {
   2546 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2547 	struct vmcs *vmcs = cpudata->vmcs;
   2548 	struct msr_entry *gmsr = cpudata->gmsr;
   2549 	extern uint8_t vmx_resume_rip;
   2550 	uint64_t rev, eptp;
   2551 
   2552 	rev = vmx_get_revision();
   2553 
   2554 	memset(vmcs, 0, VMCS_SIZE);
   2555 	vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION);
   2556 	vmcs->abort = 0;
   2557 
   2558 	vmx_vmcs_enter(vcpu);
   2559 
   2560 	/* No link pointer. */
   2561 	vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF);
   2562 
   2563 	/* Install the CTLSs. */
   2564 	vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls);
   2565 	vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls);
   2566 	vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2);
   2567 	vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls);
   2568 	vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls);
   2569 
   2570 	/* Allow direct access to certain MSRs. */
   2571 	memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
   2572 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true);
   2573 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
   2574 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
   2575 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
   2576 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
   2577 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
   2578 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
   2579 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
   2580 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
   2581 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
   2582 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
   2583 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
   2584 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_IA32_ARCH_CAPABILITIES,
   2585 	    true, false);
   2586 	vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa);
   2587 
   2588 	/*
   2589 	 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This
   2590 	 * includes the L1D_FLUSH MSR, to mitigate L1TF.
   2591 	 */
   2592 	gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR;
   2593 	gmsr[VMX_MSRLIST_STAR].val = 0;
   2594 	gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR;
   2595 	gmsr[VMX_MSRLIST_LSTAR].val = 0;
   2596 	gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR;
   2597 	gmsr[VMX_MSRLIST_CSTAR].val = 0;
   2598 	gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK;
   2599 	gmsr[VMX_MSRLIST_SFMASK].val = 0;
   2600 	gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE;
   2601 	gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0;
   2602 	gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD;
   2603 	gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH;
   2604 	vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa);
   2605 	vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa);
   2606 	vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr);
   2607 	vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR);
   2608 
   2609 	/* Force CR0_NW and CR0_CD to zero, CR0_ET to one. */
   2610 	vmx_vmwrite(VMCS_CR0_MASK, CR0_NW|CR0_CD|CR0_ET);
   2611 	vmx_vmwrite(VMCS_CR0_SHADOW, CR0_ET);
   2612 
   2613 	/* Force CR4_VMXE to zero. */
   2614 	vmx_vmwrite(VMCS_CR4_MASK, CR4_VMXE);
   2615 
   2616 	/* Set the Host state for resuming. */
   2617 	vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)&vmx_resume_rip);
   2618 	vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL));
   2619 	vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
   2620 	vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
   2621 	vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
   2622 	vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0);
   2623 	vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0);
   2624 	vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0);
   2625 	vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0);
   2626 	vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0);
   2627 	vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)idt);
   2628 	vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT));
   2629 	vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER));
   2630 	vmx_vmwrite(VMCS_HOST_CR0, rcr0());
   2631 
   2632 	/* Generate ASID. */
   2633 	vmx_asid_alloc(vcpu);
   2634 
   2635 	/* Enable Extended Paging, 4-Level. */
   2636 	eptp =
   2637 	    __SHIFTIN(vmx_eptp_type, EPTP_TYPE) |
   2638 	    __SHIFTIN(4-1, EPTP_WALKLEN) |
   2639 	    (pmap_ept_has_ad ? EPTP_FLAGS_AD : 0) |
   2640 	    mach->vm->vm_map.pmap->pm_pdirpa[0];
   2641 	vmx_vmwrite(VMCS_EPTP, eptp);
   2642 
   2643 	/* Init IA32_MISC_ENABLE. */
   2644 	cpudata->gmsr_misc_enable = rdmsr(MSR_MISC_ENABLE);
   2645 	cpudata->gmsr_misc_enable &=
   2646 	    ~(IA32_MISC_PERFMON_EN|IA32_MISC_EISST_EN|IA32_MISC_MWAIT_EN);
   2647 	cpudata->gmsr_misc_enable |=
   2648 	    (IA32_MISC_BTS_UNAVAIL|IA32_MISC_PEBS_UNAVAIL);
   2649 
   2650 	/* Init XSAVE header. */
   2651 	cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
   2652 	cpudata->gfpu.xsh_xcomp_bv = 0;
   2653 
   2654 	/* These MSRs are static. */
   2655 	cpudata->star = rdmsr(MSR_STAR);
   2656 	cpudata->lstar = rdmsr(MSR_LSTAR);
   2657 	cpudata->cstar = rdmsr(MSR_CSTAR);
   2658 	cpudata->sfmask = rdmsr(MSR_SFMASK);
   2659 
   2660 	/* Install the RESET state. */
   2661 	memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
   2662 	    sizeof(nvmm_x86_reset_state));
   2663 	vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
   2664 	vcpu->comm->state_cached = 0;
   2665 	vmx_vcpu_setstate(vcpu);
   2666 
   2667 	vmx_vmcs_leave(vcpu);
   2668 }
   2669 
   2670 static int
   2671 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2672 {
   2673 	struct vmx_cpudata *cpudata;
   2674 	int error;
   2675 
   2676 	/* Allocate the VMX cpudata. */
   2677 	cpudata = (struct vmx_cpudata *)uvm_km_alloc(kernel_map,
   2678 	    roundup(sizeof(*cpudata), PAGE_SIZE), 0,
   2679 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
   2680 	vcpu->cpudata = cpudata;
   2681 
   2682 	/* VMCS */
   2683 	error = vmx_memalloc(&cpudata->vmcs_pa, (vaddr_t *)&cpudata->vmcs,
   2684 	    VMCS_NPAGES);
   2685 	if (error)
   2686 		goto error;
   2687 
   2688 	/* MSR Bitmap */
   2689 	error = vmx_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
   2690 	    MSRBM_NPAGES);
   2691 	if (error)
   2692 		goto error;
   2693 
   2694 	/* Guest MSR List */
   2695 	error = vmx_memalloc(&cpudata->gmsr_pa, (vaddr_t *)&cpudata->gmsr, 1);
   2696 	if (error)
   2697 		goto error;
   2698 
   2699 	kcpuset_create(&cpudata->htlb_want_flush, true);
   2700 
   2701 	/* Init the VCPU info. */
   2702 	vmx_vcpu_init(mach, vcpu);
   2703 
   2704 	return 0;
   2705 
   2706 error:
   2707 	if (cpudata->vmcs_pa) {
   2708 		vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
   2709 		    VMCS_NPAGES);
   2710 	}
   2711 	if (cpudata->msrbm_pa) {
   2712 		vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
   2713 		    MSRBM_NPAGES);
   2714 	}
   2715 	if (cpudata->gmsr_pa) {
   2716 		vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
   2717 	}
   2718 
   2719 	kmem_free(cpudata, sizeof(*cpudata));
   2720 	return error;
   2721 }
   2722 
   2723 static void
   2724 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2725 {
   2726 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2727 
   2728 	vmx_vmcs_enter(vcpu);
   2729 	vmx_asid_free(vcpu);
   2730 	vmx_vmcs_destroy(vcpu);
   2731 
   2732 	kcpuset_destroy(cpudata->htlb_want_flush);
   2733 
   2734 	vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, VMCS_NPAGES);
   2735 	vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
   2736 	vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
   2737 	uvm_km_free(kernel_map, (vaddr_t)cpudata,
   2738 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
   2739 }
   2740 
   2741 /* -------------------------------------------------------------------------- */
   2742 
   2743 static void
   2744 vmx_tlb_flush(struct pmap *pm)
   2745 {
   2746 	struct nvmm_machine *mach = pm->pm_data;
   2747 	struct vmx_machdata *machdata = mach->machdata;
   2748 
   2749 	atomic_inc_64(&machdata->mach_htlb_gen);
   2750 
   2751 	/* Generates IPIs, which cause #VMEXITs. */
   2752 	pmap_tlb_shootdown(pmap_kernel(), -1, PG_G, TLBSHOOT_UPDATE);
   2753 }
   2754 
   2755 static void
   2756 vmx_machine_create(struct nvmm_machine *mach)
   2757 {
   2758 	struct pmap *pmap = mach->vm->vm_map.pmap;
   2759 	struct vmx_machdata *machdata;
   2760 
   2761 	/* Convert to EPT. */
   2762 	pmap_ept_transform(pmap);
   2763 
   2764 	/* Fill in pmap info. */
   2765 	pmap->pm_data = (void *)mach;
   2766 	pmap->pm_tlb_flush = vmx_tlb_flush;
   2767 
   2768 	machdata = kmem_zalloc(sizeof(struct vmx_machdata), KM_SLEEP);
   2769 	mach->machdata = machdata;
   2770 
   2771 	/* Start with an hTLB flush everywhere. */
   2772 	machdata->mach_htlb_gen = 1;
   2773 }
   2774 
   2775 static void
   2776 vmx_machine_destroy(struct nvmm_machine *mach)
   2777 {
   2778 	struct vmx_machdata *machdata = mach->machdata;
   2779 
   2780 	kmem_free(machdata, sizeof(struct vmx_machdata));
   2781 }
   2782 
   2783 static int
   2784 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
   2785 {
   2786 	struct nvmm_mach_conf_x86_cpuid *cpuid = data;
   2787 	struct vmx_machdata *machdata = (struct vmx_machdata *)mach->machdata;
   2788 	size_t i;
   2789 
   2790 	if (__predict_false(op != NVMM_MACH_CONF_MD(NVMM_MACH_CONF_X86_CPUID))) {
   2791 		return EINVAL;
   2792 	}
   2793 
   2794 	if (__predict_false((cpuid->set.eax & cpuid->del.eax) ||
   2795 	    (cpuid->set.ebx & cpuid->del.ebx) ||
   2796 	    (cpuid->set.ecx & cpuid->del.ecx) ||
   2797 	    (cpuid->set.edx & cpuid->del.edx))) {
   2798 		return EINVAL;
   2799 	}
   2800 
   2801 	/* If already here, replace. */
   2802 	for (i = 0; i < VMX_NCPUIDS; i++) {
   2803 		if (!machdata->cpuidpresent[i]) {
   2804 			continue;
   2805 		}
   2806 		if (machdata->cpuid[i].leaf == cpuid->leaf) {
   2807 			memcpy(&machdata->cpuid[i], cpuid,
   2808 			    sizeof(struct nvmm_mach_conf_x86_cpuid));
   2809 			return 0;
   2810 		}
   2811 	}
   2812 
   2813 	/* Not here, insert. */
   2814 	for (i = 0; i < VMX_NCPUIDS; i++) {
   2815 		if (!machdata->cpuidpresent[i]) {
   2816 			machdata->cpuidpresent[i] = true;
   2817 			memcpy(&machdata->cpuid[i], cpuid,
   2818 			    sizeof(struct nvmm_mach_conf_x86_cpuid));
   2819 			return 0;
   2820 		}
   2821 	}
   2822 
   2823 	return ENOBUFS;
   2824 }
   2825 
   2826 /* -------------------------------------------------------------------------- */
   2827 
   2828 static int
   2829 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls,
   2830     uint64_t set_one, uint64_t set_zero, uint64_t *res)
   2831 {
   2832 	uint64_t basic, val, true_val;
   2833 	bool one_allowed, zero_allowed, has_true;
   2834 	size_t i;
   2835 
   2836 	basic = rdmsr(MSR_IA32_VMX_BASIC);
   2837 	has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
   2838 
   2839 	val = rdmsr(msr_ctls);
   2840 	if (has_true) {
   2841 		true_val = rdmsr(msr_true_ctls);
   2842 	} else {
   2843 		true_val = val;
   2844 	}
   2845 
   2846 #define ONE_ALLOWED(msrval, bitoff) \
   2847 	((msrval & __BIT(32 + bitoff)) != 0)
   2848 #define ZERO_ALLOWED(msrval, bitoff) \
   2849 	((msrval & __BIT(bitoff)) == 0)
   2850 
   2851 	for (i = 0; i < 32; i++) {
   2852 		one_allowed = ONE_ALLOWED(true_val, i);
   2853 		zero_allowed = ZERO_ALLOWED(true_val, i);
   2854 
   2855 		if (zero_allowed && !one_allowed) {
   2856 			if (set_one & __BIT(i))
   2857 				return -1;
   2858 			*res &= ~__BIT(i);
   2859 		} else if (one_allowed && !zero_allowed) {
   2860 			if (set_zero & __BIT(i))
   2861 				return -1;
   2862 			*res |= __BIT(i);
   2863 		} else {
   2864 			if (set_zero & __BIT(i)) {
   2865 				*res &= ~__BIT(i);
   2866 			} else if (set_one & __BIT(i)) {
   2867 				*res |= __BIT(i);
   2868 			} else if (!has_true) {
   2869 				*res &= ~__BIT(i);
   2870 			} else if (ZERO_ALLOWED(val, i)) {
   2871 				*res &= ~__BIT(i);
   2872 			} else if (ONE_ALLOWED(val, i)) {
   2873 				*res |= __BIT(i);
   2874 			} else {
   2875 				return -1;
   2876 			}
   2877 		}
   2878 	}
   2879 
   2880 	return 0;
   2881 }
   2882 
   2883 static bool
   2884 vmx_ident(void)
   2885 {
   2886 	uint64_t msr;
   2887 	int ret;
   2888 
   2889 	if (!(cpu_feature[1] & CPUID2_VMX)) {
   2890 		return false;
   2891 	}
   2892 
   2893 	msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
   2894 	if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) {
   2895 		return false;
   2896 	}
   2897 	if ((msr & IA32_FEATURE_CONTROL_OUT_SMX) == 0) {
   2898 		return false;
   2899 	}
   2900 
   2901 	msr = rdmsr(MSR_IA32_VMX_BASIC);
   2902 	if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) {
   2903 		return false;
   2904 	}
   2905 	if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) {
   2906 		return false;
   2907 	}
   2908 
   2909 	/* PG and PE are reported, even if Unrestricted Guests is supported. */
   2910 	vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE);
   2911 	vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE);
   2912 	ret = vmx_check_cr(rcr0(), vmx_cr0_fixed0, vmx_cr0_fixed1);
   2913 	if (ret == -1) {
   2914 		return false;
   2915 	}
   2916 
   2917 	vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
   2918 	vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
   2919 	ret = vmx_check_cr(rcr4() | CR4_VMXE, vmx_cr4_fixed0, vmx_cr4_fixed1);
   2920 	if (ret == -1) {
   2921 		return false;
   2922 	}
   2923 
   2924 	/* Init the CTLSs right now, and check for errors. */
   2925 	ret = vmx_init_ctls(
   2926 	    MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
   2927 	    VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO,
   2928 	    &vmx_pinbased_ctls);
   2929 	if (ret == -1) {
   2930 		return false;
   2931 	}
   2932 	ret = vmx_init_ctls(
   2933 	    MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
   2934 	    VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO,
   2935 	    &vmx_procbased_ctls);
   2936 	if (ret == -1) {
   2937 		return false;
   2938 	}
   2939 	ret = vmx_init_ctls(
   2940 	    MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
   2941 	    VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO,
   2942 	    &vmx_procbased_ctls2);
   2943 	if (ret == -1) {
   2944 		return false;
   2945 	}
   2946 	ret = vmx_init_ctls(
   2947 	    MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
   2948 	    VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO,
   2949 	    &vmx_entry_ctls);
   2950 	if (ret == -1) {
   2951 		return false;
   2952 	}
   2953 	ret = vmx_init_ctls(
   2954 	    MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS,
   2955 	    VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO,
   2956 	    &vmx_exit_ctls);
   2957 	if (ret == -1) {
   2958 		return false;
   2959 	}
   2960 
   2961 	msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
   2962 	if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) {
   2963 		return false;
   2964 	}
   2965 	if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) {
   2966 		return false;
   2967 	}
   2968 	if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) {
   2969 		return false;
   2970 	}
   2971 	if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) != 0) {
   2972 		pmap_ept_has_ad = true;
   2973 	} else {
   2974 		pmap_ept_has_ad = false;
   2975 	}
   2976 	if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) {
   2977 		return false;
   2978 	}
   2979 
   2980 	return true;
   2981 }
   2982 
   2983 static void
   2984 vmx_init_asid(uint32_t maxasid)
   2985 {
   2986 	size_t allocsz;
   2987 
   2988 	mutex_init(&vmx_asidlock, MUTEX_DEFAULT, IPL_NONE);
   2989 
   2990 	vmx_maxasid = maxasid;
   2991 	allocsz = roundup(maxasid, 8) / 8;
   2992 	vmx_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
   2993 
   2994 	/* ASID 0 is reserved for the host. */
   2995 	vmx_asidmap[0] |= __BIT(0);
   2996 }
   2997 
   2998 static void
   2999 vmx_change_cpu(void *arg1, void *arg2)
   3000 {
   3001 	struct cpu_info *ci = curcpu();
   3002 	bool enable = (bool)arg1;
   3003 	uint64_t cr4;
   3004 
   3005 	if (!enable) {
   3006 		vmx_vmxoff();
   3007 	}
   3008 
   3009 	cr4 = rcr4();
   3010 	if (enable) {
   3011 		cr4 |= CR4_VMXE;
   3012 	} else {
   3013 		cr4 &= ~CR4_VMXE;
   3014 	}
   3015 	lcr4(cr4);
   3016 
   3017 	if (enable) {
   3018 		vmx_vmxon(&vmxoncpu[cpu_index(ci)].pa);
   3019 	}
   3020 }
   3021 
   3022 static void
   3023 vmx_init_l1tf(void)
   3024 {
   3025 	u_int descs[4];
   3026 	uint64_t msr;
   3027 
   3028 	if (cpuid_level < 7) {
   3029 		return;
   3030 	}
   3031 
   3032 	x86_cpuid(7, descs);
   3033 
   3034 	if (descs[3] & CPUID_SEF_ARCH_CAP) {
   3035 		msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
   3036 		if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) {
   3037 			/* No mitigation needed. */
   3038 			return;
   3039 		}
   3040 	}
   3041 
   3042 	if (descs[3] & CPUID_SEF_L1D_FLUSH) {
   3043 		/* Enable hardware mitigation. */
   3044 		vmx_msrlist_entry_nmsr += 1;
   3045 	}
   3046 }
   3047 
   3048 static void
   3049 vmx_init(void)
   3050 {
   3051 	CPU_INFO_ITERATOR cii;
   3052 	struct cpu_info *ci;
   3053 	uint64_t xc, msr;
   3054 	struct vmxon *vmxon;
   3055 	uint32_t revision;
   3056 	paddr_t pa;
   3057 	vaddr_t va;
   3058 	int error;
   3059 
   3060 	/* Init the ASID bitmap (VPID). */
   3061 	vmx_init_asid(VPID_MAX);
   3062 
   3063 	/* Init the XCR0 mask. */
   3064 	vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
   3065 
   3066 	/* Init the TLB flush op, the EPT flush op and the EPTP type. */
   3067 	msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
   3068 	if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
   3069 		vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
   3070 	} else {
   3071 		vmx_tlb_flush_op = VMX_INVVPID_ALL;
   3072 	}
   3073 	if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
   3074 		vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
   3075 	} else {
   3076 		vmx_ept_flush_op = VMX_INVEPT_ALL;
   3077 	}
   3078 	if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {
   3079 		vmx_eptp_type = EPTP_TYPE_WB;
   3080 	} else {
   3081 		vmx_eptp_type = EPTP_TYPE_UC;
   3082 	}
   3083 
   3084 	/* Init the L1TF mitigation. */
   3085 	vmx_init_l1tf();
   3086 
   3087 	memset(vmxoncpu, 0, sizeof(vmxoncpu));
   3088 	revision = vmx_get_revision();
   3089 
   3090 	for (CPU_INFO_FOREACH(cii, ci)) {
   3091 		error = vmx_memalloc(&pa, &va, 1);
   3092 		if (error) {
   3093 			panic("%s: out of memory", __func__);
   3094 		}
   3095 		vmxoncpu[cpu_index(ci)].pa = pa;
   3096 		vmxoncpu[cpu_index(ci)].va = va;
   3097 
   3098 		vmxon = (struct vmxon *)vmxoncpu[cpu_index(ci)].va;
   3099 		vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION);
   3100 	}
   3101 
   3102 	xc = xc_broadcast(0, vmx_change_cpu, (void *)true, NULL);
   3103 	xc_wait(xc);
   3104 }
   3105 
   3106 static void
   3107 vmx_fini_asid(void)
   3108 {
   3109 	size_t allocsz;
   3110 
   3111 	allocsz = roundup(vmx_maxasid, 8) / 8;
   3112 	kmem_free(vmx_asidmap, allocsz);
   3113 
   3114 	mutex_destroy(&vmx_asidlock);
   3115 }
   3116 
   3117 static void
   3118 vmx_fini(void)
   3119 {
   3120 	uint64_t xc;
   3121 	size_t i;
   3122 
   3123 	xc = xc_broadcast(0, vmx_change_cpu, (void *)false, NULL);
   3124 	xc_wait(xc);
   3125 
   3126 	for (i = 0; i < MAXCPUS; i++) {
   3127 		if (vmxoncpu[i].pa != 0)
   3128 			vmx_memfree(vmxoncpu[i].pa, vmxoncpu[i].va, 1);
   3129 	}
   3130 
   3131 	vmx_fini_asid();
   3132 }
   3133 
   3134 static void
   3135 vmx_capability(struct nvmm_capability *cap)
   3136 {
   3137 	cap->arch.xcr0_mask = vmx_xcr0_mask;
   3138 	cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
   3139 	cap->arch.conf_cpuid_maxops = VMX_NCPUIDS;
   3140 }
   3141 
   3142 const struct nvmm_impl nvmm_x86_vmx = {
   3143 	.ident = vmx_ident,
   3144 	.init = vmx_init,
   3145 	.fini = vmx_fini,
   3146 	.capability = vmx_capability,
   3147 	.conf_max = NVMM_X86_NCONF,
   3148 	.conf_sizes = vmx_conf_sizes,
   3149 	.state_size = sizeof(struct nvmm_x64_state),
   3150 	.machine_create = vmx_machine_create,
   3151 	.machine_destroy = vmx_machine_destroy,
   3152 	.machine_configure = vmx_machine_configure,
   3153 	.vcpu_create = vmx_vcpu_create,
   3154 	.vcpu_destroy = vmx_vcpu_destroy,
   3155 	.vcpu_setstate = vmx_vcpu_setstate,
   3156 	.vcpu_getstate = vmx_vcpu_getstate,
   3157 	.vcpu_inject = vmx_vcpu_inject,
   3158 	.vcpu_run = vmx_vcpu_run
   3159 };
   3160