Home | History | Annotate | Line # | Download | only in x86
nvmm_x86_vmx.c revision 1.17
      1 /*	$NetBSD: nvmm_x86_vmx.c,v 1.17 2019/03/07 15:06:37 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.17 2019/03/07 15:06:37 maxv Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 #include <sys/kmem.h>
     39 #include <sys/cpu.h>
     40 #include <sys/xcall.h>
     41 
     42 #include <uvm/uvm.h>
     43 #include <uvm/uvm_page.h>
     44 
     45 #include <x86/cputypes.h>
     46 #include <x86/specialreg.h>
     47 #include <x86/pmap.h>
     48 #include <x86/dbregs.h>
     49 #include <x86/cpu_counter.h>
     50 #include <machine/cpuvar.h>
     51 
     52 #include <dev/nvmm/nvmm.h>
     53 #include <dev/nvmm/nvmm_internal.h>
     54 #include <dev/nvmm/x86/nvmm_x86.h>
     55 
     56 int _vmx_vmxon(paddr_t *pa);
     57 int _vmx_vmxoff(void);
     58 int _vmx_invept(uint64_t op, void *desc);
     59 int _vmx_invvpid(uint64_t op, void *desc);
     60 int _vmx_vmread(uint64_t op, uint64_t *val);
     61 int _vmx_vmwrite(uint64_t op, uint64_t val);
     62 int _vmx_vmptrld(paddr_t *pa);
     63 int _vmx_vmptrst(paddr_t *pa);
     64 int _vmx_vmclear(paddr_t *pa);
     65 int vmx_vmlaunch(uint64_t *gprs);
     66 int vmx_vmresume(uint64_t *gprs);
     67 
     68 #define vmx_vmxon(a) \
     69 	if (__predict_false(_vmx_vmxon(a) != 0)) { \
     70 		panic("%s: VMXON failed", __func__); \
     71 	}
     72 #define vmx_vmxoff() \
     73 	if (__predict_false(_vmx_vmxoff() != 0)) { \
     74 		panic("%s: VMXOFF failed", __func__); \
     75 	}
     76 #define vmx_invept(a, b) \
     77 	if (__predict_false(_vmx_invept(a, b) != 0)) { \
     78 		panic("%s: INVEPT failed", __func__); \
     79 	}
     80 #define vmx_invvpid(a, b) \
     81 	if (__predict_false(_vmx_invvpid(a, b) != 0)) { \
     82 		panic("%s: INVVPID failed", __func__); \
     83 	}
     84 #define vmx_vmread(a, b) \
     85 	if (__predict_false(_vmx_vmread(a, b) != 0)) { \
     86 		panic("%s: VMREAD failed", __func__); \
     87 	}
     88 #define vmx_vmwrite(a, b) \
     89 	if (__predict_false(_vmx_vmwrite(a, b) != 0)) { \
     90 		panic("%s: VMWRITE failed", __func__); \
     91 	}
     92 #define vmx_vmptrld(a) \
     93 	if (__predict_false(_vmx_vmptrld(a) != 0)) { \
     94 		panic("%s: VMPTRLD failed", __func__); \
     95 	}
     96 #define vmx_vmptrst(a) \
     97 	if (__predict_false(_vmx_vmptrst(a) != 0)) { \
     98 		panic("%s: VMPTRST failed", __func__); \
     99 	}
    100 #define vmx_vmclear(a) \
    101 	if (__predict_false(_vmx_vmclear(a) != 0)) { \
    102 		panic("%s: VMCLEAR failed", __func__); \
    103 	}
    104 
    105 #define MSR_IA32_FEATURE_CONTROL	0x003A
    106 #define		IA32_FEATURE_CONTROL_LOCK	__BIT(0)
    107 #define		IA32_FEATURE_CONTROL_IN_SMX	__BIT(1)
    108 #define		IA32_FEATURE_CONTROL_OUT_SMX	__BIT(2)
    109 
    110 #define MSR_IA32_VMX_BASIC		0x0480
    111 #define		IA32_VMX_BASIC_IDENT		__BITS(30,0)
    112 #define		IA32_VMX_BASIC_DATA_SIZE	__BITS(44,32)
    113 #define		IA32_VMX_BASIC_MEM_WIDTH	__BIT(48)
    114 #define		IA32_VMX_BASIC_DUAL		__BIT(49)
    115 #define		IA32_VMX_BASIC_MEM_TYPE		__BITS(53,50)
    116 #define			MEM_TYPE_UC		0
    117 #define			MEM_TYPE_WB		6
    118 #define		IA32_VMX_BASIC_IO_REPORT	__BIT(54)
    119 #define		IA32_VMX_BASIC_TRUE_CTLS	__BIT(55)
    120 
    121 #define MSR_IA32_VMX_PINBASED_CTLS		0x0481
    122 #define MSR_IA32_VMX_PROCBASED_CTLS		0x0482
    123 #define MSR_IA32_VMX_EXIT_CTLS			0x0483
    124 #define MSR_IA32_VMX_ENTRY_CTLS			0x0484
    125 #define MSR_IA32_VMX_PROCBASED_CTLS2		0x048B
    126 
    127 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS		0x048D
    128 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS	0x048E
    129 #define MSR_IA32_VMX_TRUE_EXIT_CTLS		0x048F
    130 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS		0x0490
    131 
    132 #define MSR_IA32_VMX_CR0_FIXED0			0x0486
    133 #define MSR_IA32_VMX_CR0_FIXED1			0x0487
    134 #define MSR_IA32_VMX_CR4_FIXED0			0x0488
    135 #define MSR_IA32_VMX_CR4_FIXED1			0x0489
    136 
    137 #define MSR_IA32_VMX_EPT_VPID_CAP	0x048C
    138 #define		IA32_VMX_EPT_VPID_WALKLENGTH_4		__BIT(6)
    139 #define		IA32_VMX_EPT_VPID_UC			__BIT(8)
    140 #define		IA32_VMX_EPT_VPID_WB			__BIT(14)
    141 #define		IA32_VMX_EPT_VPID_INVEPT		__BIT(20)
    142 #define		IA32_VMX_EPT_VPID_FLAGS_AD		__BIT(21)
    143 #define		IA32_VMX_EPT_VPID_INVEPT_CONTEXT	__BIT(25)
    144 #define		IA32_VMX_EPT_VPID_INVEPT_ALL		__BIT(26)
    145 #define		IA32_VMX_EPT_VPID_INVVPID		__BIT(32)
    146 #define		IA32_VMX_EPT_VPID_INVVPID_ADDR		__BIT(40)
    147 #define		IA32_VMX_EPT_VPID_INVVPID_CONTEXT	__BIT(41)
    148 #define		IA32_VMX_EPT_VPID_INVVPID_ALL		__BIT(42)
    149 #define		IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG	__BIT(43)
    150 
    151 /* -------------------------------------------------------------------------- */
    152 
    153 /* 16-bit control fields */
    154 #define VMCS_VPID				0x00000000
    155 #define VMCS_PIR_VECTOR				0x00000002
    156 #define VMCS_EPTP_INDEX				0x00000004
    157 /* 16-bit guest-state fields */
    158 #define VMCS_GUEST_ES_SELECTOR			0x00000800
    159 #define VMCS_GUEST_CS_SELECTOR			0x00000802
    160 #define VMCS_GUEST_SS_SELECTOR			0x00000804
    161 #define VMCS_GUEST_DS_SELECTOR			0x00000806
    162 #define VMCS_GUEST_FS_SELECTOR			0x00000808
    163 #define VMCS_GUEST_GS_SELECTOR			0x0000080A
    164 #define VMCS_GUEST_LDTR_SELECTOR		0x0000080C
    165 #define VMCS_GUEST_TR_SELECTOR			0x0000080E
    166 #define VMCS_GUEST_INTR_STATUS			0x00000810
    167 #define VMCS_PML_INDEX				0x00000812
    168 /* 16-bit host-state fields */
    169 #define VMCS_HOST_ES_SELECTOR			0x00000C00
    170 #define VMCS_HOST_CS_SELECTOR			0x00000C02
    171 #define VMCS_HOST_SS_SELECTOR			0x00000C04
    172 #define VMCS_HOST_DS_SELECTOR			0x00000C06
    173 #define VMCS_HOST_FS_SELECTOR			0x00000C08
    174 #define VMCS_HOST_GS_SELECTOR			0x00000C0A
    175 #define VMCS_HOST_TR_SELECTOR			0x00000C0C
    176 /* 64-bit control fields */
    177 #define VMCS_IO_BITMAP_A			0x00002000
    178 #define VMCS_IO_BITMAP_B			0x00002002
    179 #define VMCS_MSR_BITMAP				0x00002004
    180 #define VMCS_EXIT_MSR_STORE_ADDRESS		0x00002006
    181 #define VMCS_EXIT_MSR_LOAD_ADDRESS		0x00002008
    182 #define VMCS_ENTRY_MSR_LOAD_ADDRESS		0x0000200A
    183 #define VMCS_EXECUTIVE_VMCS			0x0000200C
    184 #define VMCS_PML_ADDRESS			0x0000200E
    185 #define VMCS_TSC_OFFSET				0x00002010
    186 #define VMCS_VIRTUAL_APIC			0x00002012
    187 #define VMCS_APIC_ACCESS			0x00002014
    188 #define VMCS_PIR_DESC				0x00002016
    189 #define VMCS_VM_CONTROL				0x00002018
    190 #define VMCS_EPTP				0x0000201A
    191 #define		EPTP_TYPE			__BITS(2,0)
    192 #define			EPTP_TYPE_UC		0
    193 #define			EPTP_TYPE_WB		6
    194 #define		EPTP_WALKLEN			__BITS(5,3)
    195 #define		EPTP_FLAGS_AD			__BIT(6)
    196 #define		EPTP_PHYSADDR			__BITS(63,12)
    197 #define VMCS_EOI_EXIT0				0x0000201C
    198 #define VMCS_EOI_EXIT1				0x0000201E
    199 #define VMCS_EOI_EXIT2				0x00002020
    200 #define VMCS_EOI_EXIT3				0x00002022
    201 #define VMCS_EPTP_LIST				0x00002024
    202 #define VMCS_VMREAD_BITMAP			0x00002026
    203 #define VMCS_VMWRITE_BITMAP			0x00002028
    204 #define VMCS_VIRTUAL_EXCEPTION			0x0000202A
    205 #define VMCS_XSS_EXIT_BITMAP			0x0000202C
    206 #define VMCS_ENCLS_EXIT_BITMAP			0x0000202E
    207 #define VMCS_TSC_MULTIPLIER			0x00002032
    208 /* 64-bit read-only fields */
    209 #define VMCS_GUEST_PHYSICAL_ADDRESS		0x00002400
    210 /* 64-bit guest-state fields */
    211 #define VMCS_LINK_POINTER			0x00002800
    212 #define VMCS_GUEST_IA32_DEBUGCTL		0x00002802
    213 #define VMCS_GUEST_IA32_PAT			0x00002804
    214 #define VMCS_GUEST_IA32_EFER			0x00002806
    215 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL	0x00002808
    216 #define VMCS_GUEST_PDPTE0			0x0000280A
    217 #define VMCS_GUEST_PDPTE1			0x0000280C
    218 #define VMCS_GUEST_PDPTE2			0x0000280E
    219 #define VMCS_GUEST_PDPTE3			0x00002810
    220 #define VMCS_GUEST_BNDCFGS			0x00002812
    221 /* 64-bit host-state fields */
    222 #define VMCS_HOST_IA32_PAT			0x00002C00
    223 #define VMCS_HOST_IA32_EFER			0x00002C02
    224 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL		0x00002C04
    225 /* 32-bit control fields */
    226 #define VMCS_PINBASED_CTLS			0x00004000
    227 #define		PIN_CTLS_INT_EXITING		__BIT(0)
    228 #define		PIN_CTLS_NMI_EXITING		__BIT(3)
    229 #define		PIN_CTLS_VIRTUAL_NMIS		__BIT(5)
    230 #define		PIN_CTLS_ACTIVATE_PREEMPT_TIMER	__BIT(6)
    231 #define		PIN_CTLS_PROCESS_POSTEd_INTS	__BIT(7)
    232 #define VMCS_PROCBASED_CTLS			0x00004002
    233 #define		PROC_CTLS_INT_WINDOW_EXITING	__BIT(2)
    234 #define		PROC_CTLS_USE_TSC_OFFSETTING	__BIT(3)
    235 #define		PROC_CTLS_HLT_EXITING		__BIT(7)
    236 #define		PROC_CTLS_INVLPG_EXITING	__BIT(9)
    237 #define		PROC_CTLS_MWAIT_EXITING		__BIT(10)
    238 #define		PROC_CTLS_RDPMC_EXITING		__BIT(11)
    239 #define		PROC_CTLS_RDTSC_EXITING		__BIT(12)
    240 #define		PROC_CTLS_RCR3_EXITING		__BIT(15)
    241 #define		PROC_CTLS_LCR3_EXITING		__BIT(16)
    242 #define		PROC_CTLS_RCR8_EXITING		__BIT(19)
    243 #define		PROC_CTLS_LCR8_EXITING		__BIT(20)
    244 #define		PROC_CTLS_USE_TPR_SHADOW	__BIT(21)
    245 #define		PROC_CTLS_NMI_WINDOW_EXITING	__BIT(22)
    246 #define		PROC_CTLS_DR_EXITING		__BIT(23)
    247 #define		PROC_CTLS_UNCOND_IO_EXITING	__BIT(24)
    248 #define		PROC_CTLS_USE_IO_BITMAPS	__BIT(25)
    249 #define		PROC_CTLS_MONITOR_TRAP_FLAG	__BIT(27)
    250 #define		PROC_CTLS_USE_MSR_BITMAPS	__BIT(28)
    251 #define		PROC_CTLS_MONITOR_EXITING	__BIT(29)
    252 #define		PROC_CTLS_PAUSE_EXITING		__BIT(30)
    253 #define		PROC_CTLS_ACTIVATE_CTLS2	__BIT(31)
    254 #define VMCS_EXCEPTION_BITMAP			0x00004004
    255 #define VMCS_PF_ERROR_MASK			0x00004006
    256 #define VMCS_PF_ERROR_MATCH			0x00004008
    257 #define VMCS_CR3_TARGET_COUNT			0x0000400A
    258 #define VMCS_EXIT_CTLS				0x0000400C
    259 #define		EXIT_CTLS_SAVE_DEBUG_CONTROLS	__BIT(2)
    260 #define		EXIT_CTLS_HOST_LONG_MODE	__BIT(9)
    261 #define		EXIT_CTLS_LOAD_PERFGLOBALCTRL	__BIT(12)
    262 #define		EXIT_CTLS_ACK_INTERRUPT		__BIT(15)
    263 #define		EXIT_CTLS_SAVE_PAT		__BIT(18)
    264 #define		EXIT_CTLS_LOAD_PAT		__BIT(19)
    265 #define		EXIT_CTLS_SAVE_EFER		__BIT(20)
    266 #define		EXIT_CTLS_LOAD_EFER		__BIT(21)
    267 #define		EXIT_CTLS_SAVE_PREEMPT_TIMER	__BIT(22)
    268 #define		EXIT_CTLS_CLEAR_BNDCFGS		__BIT(23)
    269 #define		EXIT_CTLS_CONCEAL_PT		__BIT(24)
    270 #define VMCS_EXIT_MSR_STORE_COUNT		0x0000400E
    271 #define VMCS_EXIT_MSR_LOAD_COUNT		0x00004010
    272 #define VMCS_ENTRY_CTLS				0x00004012
    273 #define		ENTRY_CTLS_LOAD_DEBUG_CONTROLS	__BIT(2)
    274 #define		ENTRY_CTLS_LONG_MODE		__BIT(9)
    275 #define		ENTRY_CTLS_SMM			__BIT(10)
    276 #define		ENTRY_CTLS_DISABLE_DUAL		__BIT(11)
    277 #define		ENTRY_CTLS_LOAD_PERFGLOBALCTRL	__BIT(13)
    278 #define		ENTRY_CTLS_LOAD_PAT		__BIT(14)
    279 #define		ENTRY_CTLS_LOAD_EFER		__BIT(15)
    280 #define		ENTRY_CTLS_LOAD_BNDCFGS		__BIT(16)
    281 #define		ENTRY_CTLS_CONCEAL_PT		__BIT(17)
    282 #define VMCS_ENTRY_MSR_LOAD_COUNT		0x00004014
    283 #define VMCS_ENTRY_INTR_INFO			0x00004016
    284 #define		INTR_INFO_VECTOR		__BITS(7,0)
    285 #define		INTR_INFO_TYPE			__BITS(10,8)
    286 #define			INTR_TYPE_EXT_INT	0
    287 #define			INTR_TYPE_NMI		2
    288 #define			INTR_TYPE_HW_EXC	3
    289 #define			INTR_TYPE_SW_INT	4
    290 #define			INTR_TYPE_PRIV_SW_EXC	5
    291 #define			INTR_TYPE_SW_EXC	6
    292 #define			INTR_TYPE_OTHER		7
    293 #define		INTR_INFO_ERROR			__BIT(11)
    294 #define		INTR_INFO_VALID			__BIT(31)
    295 #define VMCS_ENTRY_EXCEPTION_ERROR		0x00004018
    296 #define VMCS_ENTRY_INST_LENGTH			0x0000401A
    297 #define VMCS_TPR_THRESHOLD			0x0000401C
    298 #define VMCS_PROCBASED_CTLS2			0x0000401E
    299 #define		PROC_CTLS2_VIRT_APIC_ACCESSES	__BIT(0)
    300 #define		PROC_CTLS2_ENABLE_EPT		__BIT(1)
    301 #define		PROC_CTLS2_DESC_TABLE_EXITING	__BIT(2)
    302 #define		PROC_CTLS2_ENABLE_RDTSCP	__BIT(3)
    303 #define		PROC_CTLS2_VIRT_X2APIC		__BIT(4)
    304 #define		PROC_CTLS2_ENABLE_VPID		__BIT(5)
    305 #define		PROC_CTLS2_WBINVD_EXITING	__BIT(6)
    306 #define		PROC_CTLS2_UNRESTRICTED_GUEST	__BIT(7)
    307 #define		PROC_CTLS2_APIC_REG_VIRT	__BIT(8)
    308 #define		PROC_CTLS2_VIRT_INT_DELIVERY	__BIT(9)
    309 #define		PROC_CTLS2_PAUSE_LOOP_EXITING	__BIT(10)
    310 #define		PROC_CTLS2_RDRAND_EXITING	__BIT(11)
    311 #define		PROC_CTLS2_INVPCID_ENABLE	__BIT(12)
    312 #define		PROC_CTLS2_VMFUNC_ENABLE	__BIT(13)
    313 #define		PROC_CTLS2_VMCS_SHADOWING	__BIT(14)
    314 #define		PROC_CTLS2_ENCLS_EXITING	__BIT(15)
    315 #define		PROC_CTLS2_RDSEED_EXITING	__BIT(16)
    316 #define		PROC_CTLS2_PML_ENABLE		__BIT(17)
    317 #define		PROC_CTLS2_EPT_VIOLATION	__BIT(18)
    318 #define		PROC_CTLS2_CONCEAL_VMX_FROM_PT	__BIT(19)
    319 #define		PROC_CTLS2_XSAVES_ENABLE	__BIT(20)
    320 #define		PROC_CTLS2_MODE_BASED_EXEC_EPT	__BIT(22)
    321 #define		PROC_CTLS2_USE_TSC_SCALING	__BIT(25)
    322 #define VMCS_PLE_GAP				0x00004020
    323 #define VMCS_PLE_WINDOW				0x00004022
    324 /* 32-bit read-only data fields */
    325 #define VMCS_INSTRUCTION_ERROR			0x00004400
    326 #define VMCS_EXIT_REASON			0x00004402
    327 #define VMCS_EXIT_INTR_INFO			0x00004404
    328 #define VMCS_EXIT_INTR_ERRCODE			0x00004406
    329 #define VMCS_IDT_VECTORING_INFO			0x00004408
    330 #define VMCS_IDT_VECTORING_ERROR		0x0000440A
    331 #define VMCS_EXIT_INSTRUCTION_LENGTH		0x0000440C
    332 #define VMCS_EXIT_INSTRUCTION_INFO		0x0000440E
    333 /* 32-bit guest-state fields */
    334 #define VMCS_GUEST_ES_LIMIT			0x00004800
    335 #define VMCS_GUEST_CS_LIMIT			0x00004802
    336 #define VMCS_GUEST_SS_LIMIT			0x00004804
    337 #define VMCS_GUEST_DS_LIMIT			0x00004806
    338 #define VMCS_GUEST_FS_LIMIT			0x00004808
    339 #define VMCS_GUEST_GS_LIMIT			0x0000480A
    340 #define VMCS_GUEST_LDTR_LIMIT			0x0000480C
    341 #define VMCS_GUEST_TR_LIMIT			0x0000480E
    342 #define VMCS_GUEST_GDTR_LIMIT			0x00004810
    343 #define VMCS_GUEST_IDTR_LIMIT			0x00004812
    344 #define VMCS_GUEST_ES_ACCESS_RIGHTS		0x00004814
    345 #define VMCS_GUEST_CS_ACCESS_RIGHTS		0x00004816
    346 #define VMCS_GUEST_SS_ACCESS_RIGHTS		0x00004818
    347 #define VMCS_GUEST_DS_ACCESS_RIGHTS		0x0000481A
    348 #define VMCS_GUEST_FS_ACCESS_RIGHTS		0x0000481C
    349 #define VMCS_GUEST_GS_ACCESS_RIGHTS		0x0000481E
    350 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS		0x00004820
    351 #define VMCS_GUEST_TR_ACCESS_RIGHTS		0x00004822
    352 #define VMCS_GUEST_INTERRUPTIBILITY		0x00004824
    353 #define		INT_STATE_STI			__BIT(0)
    354 #define		INT_STATE_MOVSS			__BIT(1)
    355 #define		INT_STATE_SMI			__BIT(2)
    356 #define		INT_STATE_NMI			__BIT(3)
    357 #define		INT_STATE_ENCLAVE		__BIT(4)
    358 #define VMCS_GUEST_ACTIVITY			0x00004826
    359 #define VMCS_GUEST_SMBASE			0x00004828
    360 #define VMCS_GUEST_IA32_SYSENTER_CS		0x0000482A
    361 #define VMCS_PREEMPTION_TIMER_VALUE		0x0000482E
    362 /* 32-bit host state fields */
    363 #define VMCS_HOST_IA32_SYSENTER_CS		0x00004C00
    364 /* Natural-Width control fields */
    365 #define VMCS_CR0_MASK				0x00006000
    366 #define VMCS_CR4_MASK				0x00006002
    367 #define VMCS_CR0_SHADOW				0x00006004
    368 #define VMCS_CR4_SHADOW				0x00006006
    369 #define VMCS_CR3_TARGET0			0x00006008
    370 #define VMCS_CR3_TARGET1			0x0000600A
    371 #define VMCS_CR3_TARGET2			0x0000600C
    372 #define VMCS_CR3_TARGET3			0x0000600E
    373 /* Natural-Width read-only fields */
    374 #define VMCS_EXIT_QUALIFICATION			0x00006400
    375 #define VMCS_IO_RCX				0x00006402
    376 #define VMCS_IO_RSI				0x00006404
    377 #define VMCS_IO_RDI				0x00006406
    378 #define VMCS_IO_RIP				0x00006408
    379 #define VMCS_GUEST_LINEAR_ADDRESS		0x0000640A
    380 /* Natural-Width guest-state fields */
    381 #define VMCS_GUEST_CR0				0x00006800
    382 #define VMCS_GUEST_CR3				0x00006802
    383 #define VMCS_GUEST_CR4				0x00006804
    384 #define VMCS_GUEST_ES_BASE			0x00006806
    385 #define VMCS_GUEST_CS_BASE			0x00006808
    386 #define VMCS_GUEST_SS_BASE			0x0000680A
    387 #define VMCS_GUEST_DS_BASE			0x0000680C
    388 #define VMCS_GUEST_FS_BASE			0x0000680E
    389 #define VMCS_GUEST_GS_BASE			0x00006810
    390 #define VMCS_GUEST_LDTR_BASE			0x00006812
    391 #define VMCS_GUEST_TR_BASE			0x00006814
    392 #define VMCS_GUEST_GDTR_BASE			0x00006816
    393 #define VMCS_GUEST_IDTR_BASE			0x00006818
    394 #define VMCS_GUEST_DR7				0x0000681A
    395 #define VMCS_GUEST_RSP				0x0000681C
    396 #define VMCS_GUEST_RIP				0x0000681E
    397 #define VMCS_GUEST_RFLAGS			0x00006820
    398 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS	0x00006822
    399 #define VMCS_GUEST_IA32_SYSENTER_ESP		0x00006824
    400 #define VMCS_GUEST_IA32_SYSENTER_EIP		0x00006826
    401 /* Natural-Width host-state fields */
    402 #define VMCS_HOST_CR0				0x00006C00
    403 #define VMCS_HOST_CR3				0x00006C02
    404 #define VMCS_HOST_CR4				0x00006C04
    405 #define VMCS_HOST_FS_BASE			0x00006C06
    406 #define VMCS_HOST_GS_BASE			0x00006C08
    407 #define VMCS_HOST_TR_BASE			0x00006C0A
    408 #define VMCS_HOST_GDTR_BASE			0x00006C0C
    409 #define VMCS_HOST_IDTR_BASE			0x00006C0E
    410 #define VMCS_HOST_IA32_SYSENTER_ESP		0x00006C10
    411 #define VMCS_HOST_IA32_SYSENTER_EIP		0x00006C12
    412 #define VMCS_HOST_RSP				0x00006C14
    413 #define VMCS_HOST_RIP				0x00006c16
    414 
    415 /* VMX basic exit reasons. */
    416 #define VMCS_EXITCODE_EXC_NMI			0
    417 #define VMCS_EXITCODE_EXT_INT			1
    418 #define VMCS_EXITCODE_SHUTDOWN			2
    419 #define VMCS_EXITCODE_INIT			3
    420 #define VMCS_EXITCODE_SIPI			4
    421 #define VMCS_EXITCODE_SMI			5
    422 #define VMCS_EXITCODE_OTHER_SMI			6
    423 #define VMCS_EXITCODE_INT_WINDOW		7
    424 #define VMCS_EXITCODE_NMI_WINDOW		8
    425 #define VMCS_EXITCODE_TASK_SWITCH		9
    426 #define VMCS_EXITCODE_CPUID			10
    427 #define VMCS_EXITCODE_GETSEC			11
    428 #define VMCS_EXITCODE_HLT			12
    429 #define VMCS_EXITCODE_INVD			13
    430 #define VMCS_EXITCODE_INVLPG			14
    431 #define VMCS_EXITCODE_RDPMC			15
    432 #define VMCS_EXITCODE_RDTSC			16
    433 #define VMCS_EXITCODE_RSM			17
    434 #define VMCS_EXITCODE_VMCALL			18
    435 #define VMCS_EXITCODE_VMCLEAR			19
    436 #define VMCS_EXITCODE_VMLAUNCH			20
    437 #define VMCS_EXITCODE_VMPTRLD			21
    438 #define VMCS_EXITCODE_VMPTRST			22
    439 #define VMCS_EXITCODE_VMREAD			23
    440 #define VMCS_EXITCODE_VMRESUME			24
    441 #define VMCS_EXITCODE_VMWRITE			25
    442 #define VMCS_EXITCODE_VMXOFF			26
    443 #define VMCS_EXITCODE_VMXON			27
    444 #define VMCS_EXITCODE_CR			28
    445 #define VMCS_EXITCODE_DR			29
    446 #define VMCS_EXITCODE_IO			30
    447 #define VMCS_EXITCODE_RDMSR			31
    448 #define VMCS_EXITCODE_WRMSR			32
    449 #define VMCS_EXITCODE_FAIL_GUEST_INVALID	33
    450 #define VMCS_EXITCODE_FAIL_MSR_INVALID		34
    451 #define VMCS_EXITCODE_MWAIT			36
    452 #define VMCS_EXITCODE_TRAP_FLAG			37
    453 #define VMCS_EXITCODE_MONITOR			39
    454 #define VMCS_EXITCODE_PAUSE			40
    455 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK	41
    456 #define VMCS_EXITCODE_TPR_BELOW			43
    457 #define VMCS_EXITCODE_APIC_ACCESS		44
    458 #define VMCS_EXITCODE_VEOI			45
    459 #define VMCS_EXITCODE_GDTR_IDTR			46
    460 #define VMCS_EXITCODE_LDTR_TR			47
    461 #define VMCS_EXITCODE_EPT_VIOLATION		48
    462 #define VMCS_EXITCODE_EPT_MISCONFIG		49
    463 #define VMCS_EXITCODE_INVEPT			50
    464 #define VMCS_EXITCODE_RDTSCP			51
    465 #define VMCS_EXITCODE_PREEMPT_TIMEOUT		52
    466 #define VMCS_EXITCODE_INVVPID			53
    467 #define VMCS_EXITCODE_WBINVD			54
    468 #define VMCS_EXITCODE_XSETBV			55
    469 #define VMCS_EXITCODE_APIC_WRITE		56
    470 #define VMCS_EXITCODE_RDRAND			57
    471 #define VMCS_EXITCODE_INVPCID			58
    472 #define VMCS_EXITCODE_VMFUNC			59
    473 #define VMCS_EXITCODE_ENCLS			60
    474 #define VMCS_EXITCODE_RDSEED			61
    475 #define VMCS_EXITCODE_PAGE_LOG_FULL		62
    476 #define VMCS_EXITCODE_XSAVES			63
    477 #define VMCS_EXITCODE_XRSTORS			64
    478 
    479 /* -------------------------------------------------------------------------- */
    480 
    481 #define VMX_MSRLIST_STAR		0
    482 #define VMX_MSRLIST_LSTAR		1
    483 #define VMX_MSRLIST_CSTAR		2
    484 #define VMX_MSRLIST_SFMASK		3
    485 #define VMX_MSRLIST_KERNELGSBASE	4
    486 #define VMX_MSRLIST_EXIT_NMSR		5
    487 #define VMX_MSRLIST_L1DFLUSH		5
    488 
    489 /* On entry, we may do +1 to include L1DFLUSH. */
    490 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR;
    491 
    492 struct vmxon {
    493 	uint32_t ident;
    494 #define VMXON_IDENT_REVISION	__BITS(30,0)
    495 
    496 	uint8_t data[PAGE_SIZE - 4];
    497 } __packed;
    498 
    499 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE);
    500 
    501 struct vmxoncpu {
    502 	vaddr_t va;
    503 	paddr_t pa;
    504 };
    505 
    506 static struct vmxoncpu vmxoncpu[MAXCPUS];
    507 
    508 struct vmcs {
    509 	uint32_t ident;
    510 #define VMCS_IDENT_REVISION	__BITS(30,0)
    511 #define VMCS_IDENT_SHADOW	__BIT(31)
    512 
    513 	uint32_t abort;
    514 	uint8_t data[PAGE_SIZE - 8];
    515 } __packed;
    516 
    517 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
    518 
    519 struct msr_entry {
    520 	uint32_t msr;
    521 	uint32_t rsvd;
    522 	uint64_t val;
    523 } __packed;
    524 
    525 struct ept_desc {
    526 	uint64_t eptp;
    527 	uint64_t mbz;
    528 } __packed;
    529 
    530 struct vpid_desc {
    531 	uint64_t vpid;
    532 	uint64_t addr;
    533 } __packed;
    534 
    535 #define VPID_MAX	0xFFFF
    536 
    537 /* Make sure we never run out of VPIDs. */
    538 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS);
    539 
    540 static uint64_t vmx_tlb_flush_op __read_mostly;
    541 static uint64_t vmx_ept_flush_op __read_mostly;
    542 static uint64_t vmx_eptp_type __read_mostly;
    543 
    544 static uint64_t vmx_pinbased_ctls __read_mostly;
    545 static uint64_t vmx_procbased_ctls __read_mostly;
    546 static uint64_t vmx_procbased_ctls2 __read_mostly;
    547 static uint64_t vmx_entry_ctls __read_mostly;
    548 static uint64_t vmx_exit_ctls __read_mostly;
    549 
    550 static uint64_t vmx_cr0_fixed0 __read_mostly;
    551 static uint64_t vmx_cr0_fixed1 __read_mostly;
    552 static uint64_t vmx_cr4_fixed0 __read_mostly;
    553 static uint64_t vmx_cr4_fixed1 __read_mostly;
    554 
    555 extern bool pmap_ept_has_ad;
    556 
    557 #define VMX_PINBASED_CTLS_ONE	\
    558 	(PIN_CTLS_INT_EXITING| \
    559 	 PIN_CTLS_NMI_EXITING| \
    560 	 PIN_CTLS_VIRTUAL_NMIS)
    561 
    562 #define VMX_PINBASED_CTLS_ZERO	0
    563 
    564 #define VMX_PROCBASED_CTLS_ONE	\
    565 	(PROC_CTLS_USE_TSC_OFFSETTING| \
    566 	 PROC_CTLS_HLT_EXITING| \
    567 	 PROC_CTLS_MWAIT_EXITING | \
    568 	 PROC_CTLS_RDPMC_EXITING | \
    569 	 PROC_CTLS_RCR8_EXITING | \
    570 	 PROC_CTLS_LCR8_EXITING | \
    571 	 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \
    572 	 PROC_CTLS_USE_MSR_BITMAPS | \
    573 	 PROC_CTLS_MONITOR_EXITING | \
    574 	 PROC_CTLS_ACTIVATE_CTLS2)
    575 
    576 #define VMX_PROCBASED_CTLS_ZERO	\
    577 	(PROC_CTLS_RCR3_EXITING| \
    578 	 PROC_CTLS_LCR3_EXITING)
    579 
    580 #define VMX_PROCBASED_CTLS2_ONE	\
    581 	(PROC_CTLS2_ENABLE_EPT| \
    582 	 PROC_CTLS2_ENABLE_VPID| \
    583 	 PROC_CTLS2_UNRESTRICTED_GUEST)
    584 
    585 #define VMX_PROCBASED_CTLS2_ZERO	0
    586 
    587 #define VMX_ENTRY_CTLS_ONE	\
    588 	(ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \
    589 	 ENTRY_CTLS_LOAD_EFER| \
    590 	 ENTRY_CTLS_LOAD_PAT)
    591 
    592 #define VMX_ENTRY_CTLS_ZERO	\
    593 	(ENTRY_CTLS_SMM| \
    594 	 ENTRY_CTLS_DISABLE_DUAL)
    595 
    596 #define VMX_EXIT_CTLS_ONE	\
    597 	(EXIT_CTLS_SAVE_DEBUG_CONTROLS| \
    598 	 EXIT_CTLS_HOST_LONG_MODE| \
    599 	 EXIT_CTLS_SAVE_PAT| \
    600 	 EXIT_CTLS_LOAD_PAT| \
    601 	 EXIT_CTLS_SAVE_EFER| \
    602 	 EXIT_CTLS_LOAD_EFER)
    603 
    604 #define VMX_EXIT_CTLS_ZERO	0
    605 
    606 static uint8_t *vmx_asidmap __read_mostly;
    607 static uint32_t vmx_maxasid __read_mostly;
    608 static kmutex_t vmx_asidlock __cacheline_aligned;
    609 
    610 #define VMX_XCR0_MASK_DEFAULT	(XCR0_X87|XCR0_SSE)
    611 static uint64_t vmx_xcr0_mask __read_mostly;
    612 
    613 #define VMX_NCPUIDS	32
    614 
    615 #define VMCS_NPAGES	1
    616 #define VMCS_SIZE	(VMCS_NPAGES * PAGE_SIZE)
    617 
    618 #define MSRBM_NPAGES	1
    619 #define MSRBM_SIZE	(MSRBM_NPAGES * PAGE_SIZE)
    620 
    621 #define EFER_TLB_FLUSH \
    622 	(EFER_NXE|EFER_LMA|EFER_LME)
    623 #define CR0_TLB_FLUSH \
    624 	(CR0_PG|CR0_WP|CR0_CD|CR0_NW)
    625 #define CR4_TLB_FLUSH \
    626 	(CR4_PGE|CR4_PAE|CR4_PSE)
    627 
    628 /* -------------------------------------------------------------------------- */
    629 
    630 struct vmx_machdata {
    631 	bool cpuidpresent[VMX_NCPUIDS];
    632 	struct nvmm_x86_conf_cpuid cpuid[VMX_NCPUIDS];
    633 	volatile uint64_t mach_htlb_gen;
    634 };
    635 
    636 static const size_t vmx_conf_sizes[NVMM_X86_NCONF] = {
    637 	[NVMM_X86_CONF_CPUID] = sizeof(struct nvmm_x86_conf_cpuid)
    638 };
    639 
    640 struct vmx_cpudata {
    641 	/* General */
    642 	uint64_t asid;
    643 	bool gtlb_want_flush;
    644 	uint64_t vcpu_htlb_gen;
    645 	kcpuset_t *htlb_want_flush;
    646 
    647 	/* VMCS */
    648 	struct vmcs *vmcs;
    649 	paddr_t vmcs_pa;
    650 	size_t vmcs_refcnt;
    651 
    652 	/* MSR bitmap */
    653 	uint8_t *msrbm;
    654 	paddr_t msrbm_pa;
    655 
    656 	/* Host state */
    657 	uint64_t hxcr0;
    658 	uint64_t star;
    659 	uint64_t lstar;
    660 	uint64_t cstar;
    661 	uint64_t sfmask;
    662 	uint64_t kernelgsbase;
    663 	bool ts_set;
    664 	struct xsave_header hfpu __aligned(64);
    665 
    666 	/* Event state */
    667 	bool int_window_exit;
    668 	bool nmi_window_exit;
    669 
    670 	/* Guest state */
    671 	struct msr_entry *gmsr;
    672 	paddr_t gmsr_pa;
    673 	uint64_t gmsr_misc_enable;
    674 	uint64_t gcr2;
    675 	uint64_t gcr8;
    676 	uint64_t gxcr0;
    677 	uint64_t gprs[NVMM_X64_NGPR];
    678 	uint64_t drs[NVMM_X64_NDR];
    679 	uint64_t tsc_offset;
    680 	struct xsave_header gfpu __aligned(64);
    681 };
    682 
    683 static const struct {
    684 	uint64_t selector;
    685 	uint64_t attrib;
    686 	uint64_t limit;
    687 	uint64_t base;
    688 } vmx_guest_segs[NVMM_X64_NSEG] = {
    689 	[NVMM_X64_SEG_ES] = {
    690 		VMCS_GUEST_ES_SELECTOR,
    691 		VMCS_GUEST_ES_ACCESS_RIGHTS,
    692 		VMCS_GUEST_ES_LIMIT,
    693 		VMCS_GUEST_ES_BASE
    694 	},
    695 	[NVMM_X64_SEG_CS] = {
    696 		VMCS_GUEST_CS_SELECTOR,
    697 		VMCS_GUEST_CS_ACCESS_RIGHTS,
    698 		VMCS_GUEST_CS_LIMIT,
    699 		VMCS_GUEST_CS_BASE
    700 	},
    701 	[NVMM_X64_SEG_SS] = {
    702 		VMCS_GUEST_SS_SELECTOR,
    703 		VMCS_GUEST_SS_ACCESS_RIGHTS,
    704 		VMCS_GUEST_SS_LIMIT,
    705 		VMCS_GUEST_SS_BASE
    706 	},
    707 	[NVMM_X64_SEG_DS] = {
    708 		VMCS_GUEST_DS_SELECTOR,
    709 		VMCS_GUEST_DS_ACCESS_RIGHTS,
    710 		VMCS_GUEST_DS_LIMIT,
    711 		VMCS_GUEST_DS_BASE
    712 	},
    713 	[NVMM_X64_SEG_FS] = {
    714 		VMCS_GUEST_FS_SELECTOR,
    715 		VMCS_GUEST_FS_ACCESS_RIGHTS,
    716 		VMCS_GUEST_FS_LIMIT,
    717 		VMCS_GUEST_FS_BASE
    718 	},
    719 	[NVMM_X64_SEG_GS] = {
    720 		VMCS_GUEST_GS_SELECTOR,
    721 		VMCS_GUEST_GS_ACCESS_RIGHTS,
    722 		VMCS_GUEST_GS_LIMIT,
    723 		VMCS_GUEST_GS_BASE
    724 	},
    725 	[NVMM_X64_SEG_GDT] = {
    726 		0, /* doesn't exist */
    727 		0, /* doesn't exist */
    728 		VMCS_GUEST_GDTR_LIMIT,
    729 		VMCS_GUEST_GDTR_BASE
    730 	},
    731 	[NVMM_X64_SEG_IDT] = {
    732 		0, /* doesn't exist */
    733 		0, /* doesn't exist */
    734 		VMCS_GUEST_IDTR_LIMIT,
    735 		VMCS_GUEST_IDTR_BASE
    736 	},
    737 	[NVMM_X64_SEG_LDT] = {
    738 		VMCS_GUEST_LDTR_SELECTOR,
    739 		VMCS_GUEST_LDTR_ACCESS_RIGHTS,
    740 		VMCS_GUEST_LDTR_LIMIT,
    741 		VMCS_GUEST_LDTR_BASE
    742 	},
    743 	[NVMM_X64_SEG_TR] = {
    744 		VMCS_GUEST_TR_SELECTOR,
    745 		VMCS_GUEST_TR_ACCESS_RIGHTS,
    746 		VMCS_GUEST_TR_LIMIT,
    747 		VMCS_GUEST_TR_BASE
    748 	}
    749 };
    750 
    751 /* -------------------------------------------------------------------------- */
    752 
    753 static uint64_t
    754 vmx_get_revision(void)
    755 {
    756 	uint64_t msr;
    757 
    758 	msr = rdmsr(MSR_IA32_VMX_BASIC);
    759 	msr &= IA32_VMX_BASIC_IDENT;
    760 
    761 	return msr;
    762 }
    763 
    764 static void
    765 vmx_vmcs_enter(struct nvmm_cpu *vcpu)
    766 {
    767 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    768 	paddr_t oldpa __diagused;
    769 
    770 	cpudata->vmcs_refcnt++;
    771 	if (cpudata->vmcs_refcnt > 1) {
    772 #ifdef DIAGNOSTIC
    773 		KASSERT(kpreempt_disabled());
    774 		vmx_vmptrst(&oldpa);
    775 		KASSERT(oldpa == cpudata->vmcs_pa);
    776 #endif
    777 		return;
    778 	}
    779 
    780 	kpreempt_disable();
    781 
    782 #ifdef DIAGNOSTIC
    783 	vmx_vmptrst(&oldpa);
    784 	KASSERT(oldpa == 0xFFFFFFFFFFFFFFFF);
    785 #endif
    786 
    787 	vmx_vmptrld(&cpudata->vmcs_pa);
    788 }
    789 
    790 static void
    791 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
    792 {
    793 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    794 	paddr_t oldpa __diagused;
    795 
    796 	KASSERT(kpreempt_disabled());
    797 	KASSERT(cpudata->vmcs_refcnt > 0);
    798 	cpudata->vmcs_refcnt--;
    799 
    800 	if (cpudata->vmcs_refcnt > 0) {
    801 #ifdef DIAGNOSTIC
    802 		vmx_vmptrst(&oldpa);
    803 		KASSERT(oldpa == cpudata->vmcs_pa);
    804 #endif
    805 		return;
    806 	}
    807 
    808 	vmx_vmclear(&cpudata->vmcs_pa);
    809 	kpreempt_enable();
    810 }
    811 
    812 /* -------------------------------------------------------------------------- */
    813 
    814 static void
    815 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
    816 {
    817 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    818 	uint64_t ctls1;
    819 
    820 	vmx_vmread(VMCS_PROCBASED_CTLS, &ctls1);
    821 
    822 	if (nmi) {
    823 		// XXX INT_STATE_NMI?
    824 		ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING;
    825 		cpudata->nmi_window_exit = true;
    826 	} else {
    827 		ctls1 |= PROC_CTLS_INT_WINDOW_EXITING;
    828 		cpudata->int_window_exit = true;
    829 	}
    830 
    831 	vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
    832 }
    833 
    834 static void
    835 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
    836 {
    837 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    838 	uint64_t ctls1;
    839 
    840 	vmx_vmread(VMCS_PROCBASED_CTLS, &ctls1);
    841 
    842 	if (nmi) {
    843 		ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
    844 		cpudata->nmi_window_exit = false;
    845 	} else {
    846 		ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING;
    847 		cpudata->int_window_exit = false;
    848 	}
    849 
    850 	vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
    851 }
    852 
    853 static inline int
    854 vmx_event_has_error(uint64_t vector)
    855 {
    856 	switch (vector) {
    857 	case 8:		/* #DF */
    858 	case 10:	/* #TS */
    859 	case 11:	/* #NP */
    860 	case 12:	/* #SS */
    861 	case 13:	/* #GP */
    862 	case 14:	/* #PF */
    863 	case 17:	/* #AC */
    864 	case 30:	/* #SX */
    865 		return 1;
    866 	default:
    867 		return 0;
    868 	}
    869 }
    870 
    871 static int
    872 vmx_vcpu_inject(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    873     struct nvmm_event *event)
    874 {
    875 	struct vmx_cpudata *cpudata = vcpu->cpudata;
    876 	int type = 0, err = 0, ret = 0;
    877 	uint64_t info, intstate, rflags;
    878 
    879 	if (event->vector >= 256) {
    880 		return EINVAL;
    881 	}
    882 
    883 	vmx_vmcs_enter(vcpu);
    884 
    885 	switch (event->type) {
    886 	case NVMM_EVENT_INTERRUPT_HW:
    887 		type = INTR_TYPE_EXT_INT;
    888 		if (event->vector == 2) {
    889 			type = INTR_TYPE_NMI;
    890 		}
    891 		vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
    892 		if (type == INTR_TYPE_NMI) {
    893 			if (cpudata->nmi_window_exit) {
    894 				ret = EAGAIN;
    895 				goto out;
    896 			}
    897 			vmx_event_waitexit_enable(vcpu, true);
    898 		} else {
    899 			vmx_vmread(VMCS_GUEST_RFLAGS, &rflags);
    900 			if ((rflags & PSL_I) == 0 ||
    901 			    (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0) {
    902 				vmx_event_waitexit_enable(vcpu, false);
    903 				ret = EAGAIN;
    904 				goto out;
    905 			}
    906 		}
    907 		err = 0;
    908 		break;
    909 	case NVMM_EVENT_INTERRUPT_SW:
    910 		ret = EINVAL;
    911 		goto out;
    912 	case NVMM_EVENT_EXCEPTION:
    913 		if (event->vector == 2 || event->vector >= 32) {
    914 			ret = EINVAL;
    915 			goto out;
    916 		}
    917 		if (event->vector == 3 || event->vector == 0) {
    918 			ret = EINVAL;
    919 			goto out;
    920 		}
    921 		type = INTR_TYPE_HW_EXC;
    922 		err = vmx_event_has_error(event->vector);
    923 		break;
    924 	default:
    925 		ret = EAGAIN;
    926 		goto out;
    927 	}
    928 
    929 	info =
    930 	    __SHIFTIN(event->vector, INTR_INFO_VECTOR) |
    931 	    __SHIFTIN(type, INTR_INFO_TYPE) |
    932 	    __SHIFTIN(err, INTR_INFO_ERROR) |
    933 	    __SHIFTIN(1, INTR_INFO_VALID);
    934 	vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
    935 	vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, event->u.error);
    936 
    937 out:
    938 	vmx_vmcs_leave(vcpu);
    939 	return ret;
    940 }
    941 
    942 static void
    943 vmx_inject_ud(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
    944 {
    945 	struct nvmm_event event;
    946 	int ret __diagused;
    947 
    948 	event.type = NVMM_EVENT_EXCEPTION;
    949 	event.vector = 6;
    950 	event.u.error = 0;
    951 
    952 	ret = vmx_vcpu_inject(mach, vcpu, &event);
    953 	KASSERT(ret == 0);
    954 }
    955 
    956 static void
    957 vmx_inject_gp(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
    958 {
    959 	struct nvmm_event event;
    960 	int ret __diagused;
    961 
    962 	event.type = NVMM_EVENT_EXCEPTION;
    963 	event.vector = 13;
    964 	event.u.error = 0;
    965 
    966 	ret = vmx_vcpu_inject(mach, vcpu, &event);
    967 	KASSERT(ret == 0);
    968 }
    969 
    970 static inline void
    971 vmx_inkernel_advance(void)
    972 {
    973 	uint64_t rip, inslen, intstate;
    974 
    975 	/*
    976 	 * Maybe we should also apply single-stepping and debug exceptions.
    977 	 * Matters for guest-ring3, because it can execute 'cpuid' under a
    978 	 * debugger.
    979 	 */
    980 	vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH, &inslen);
    981 	vmx_vmread(VMCS_GUEST_RIP, &rip);
    982 	vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
    983 	vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
    984 	vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
    985 	    intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
    986 }
    987 
    988 static void
    989 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    990     struct nvmm_exit *exit)
    991 {
    992 	uint64_t qual;
    993 
    994 	vmx_vmread(VMCS_EXIT_INTR_INFO, &qual);
    995 
    996 	if ((qual & INTR_INFO_VALID) == 0) {
    997 		goto error;
    998 	}
    999 	if (__SHIFTOUT(qual, INTR_INFO_TYPE) != INTR_TYPE_NMI) {
   1000 		goto error;
   1001 	}
   1002 
   1003 	exit->reason = NVMM_EXIT_NONE;
   1004 	return;
   1005 
   1006 error:
   1007 	exit->reason = NVMM_EXIT_INVALID;
   1008 }
   1009 
   1010 static void
   1011 vmx_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx)
   1012 {
   1013 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1014 	uint64_t cr4;
   1015 
   1016 	switch (eax) {
   1017 	case 0x00000001:
   1018 		cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax;
   1019 
   1020 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
   1021 		cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
   1022 		    CPUID_LOCAL_APIC_ID);
   1023 
   1024 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
   1025 		cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
   1026 
   1027 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
   1028 
   1029 		/* CPUID2_OSXSAVE depends on CR4. */
   1030 		vmx_vmread(VMCS_GUEST_CR4, &cr4);
   1031 		if (!(cr4 & CR4_OSXSAVE)) {
   1032 			cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
   1033 		}
   1034 		break;
   1035 	case 0x00000005:
   1036 	case 0x00000006:
   1037 		cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
   1038 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1039 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
   1040 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
   1041 		break;
   1042 	case 0x00000007:
   1043 		cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000007.eax;
   1044 		cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
   1045 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
   1046 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
   1047 		break;
   1048 	case 0x0000000D:
   1049 		if (vmx_xcr0_mask == 0) {
   1050 			break;
   1051 		}
   1052 		switch (ecx) {
   1053 		case 0:
   1054 			cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF;
   1055 			if (cpudata->gxcr0 & XCR0_SSE) {
   1056 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
   1057 			} else {
   1058 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
   1059 			}
   1060 			cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
   1061 			cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave);
   1062 			cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32;
   1063 			break;
   1064 		case 1:
   1065 			cpudata->gprs[NVMM_X64_GPR_RAX] &= ~CPUID_PES1_XSAVES;
   1066 			break;
   1067 		}
   1068 		break;
   1069 	case 0x40000000:
   1070 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
   1071 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
   1072 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
   1073 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
   1074 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
   1075 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
   1076 		break;
   1077 	case 0x80000001:
   1078 		cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax;
   1079 		cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
   1080 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
   1081 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
   1082 		break;
   1083 	default:
   1084 		break;
   1085 	}
   1086 }
   1087 
   1088 static void
   1089 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1090     struct nvmm_exit *exit)
   1091 {
   1092 	struct vmx_machdata *machdata = mach->machdata;
   1093 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1094 	struct nvmm_x86_conf_cpuid *cpuid;
   1095 	uint64_t eax, ecx;
   1096 	u_int descs[4];
   1097 	size_t i;
   1098 
   1099 	eax = cpudata->gprs[NVMM_X64_GPR_RAX];
   1100 	ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
   1101 	x86_cpuid2(eax, ecx, descs);
   1102 
   1103 	cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0];
   1104 	cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
   1105 	cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
   1106 	cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
   1107 
   1108 	for (i = 0; i < VMX_NCPUIDS; i++) {
   1109 		cpuid = &machdata->cpuid[i];
   1110 		if (!machdata->cpuidpresent[i]) {
   1111 			continue;
   1112 		}
   1113 		if (cpuid->leaf != eax) {
   1114 			continue;
   1115 		}
   1116 
   1117 		/* del */
   1118 		cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->del.eax;
   1119 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->del.ebx;
   1120 		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->del.ecx;
   1121 		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->del.edx;
   1122 
   1123 		/* set */
   1124 		cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->set.eax;
   1125 		cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->set.ebx;
   1126 		cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->set.ecx;
   1127 		cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->set.edx;
   1128 
   1129 		break;
   1130 	}
   1131 
   1132 	/* Overwrite non-tunable leaves. */
   1133 	vmx_inkernel_handle_cpuid(vcpu, eax, ecx);
   1134 
   1135 	vmx_inkernel_advance();
   1136 	exit->reason = NVMM_EXIT_NONE;
   1137 }
   1138 
   1139 static void
   1140 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1141     struct nvmm_exit *exit)
   1142 {
   1143 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1144 	uint64_t rflags;
   1145 
   1146 	if (cpudata->int_window_exit) {
   1147 		vmx_vmread(VMCS_GUEST_RFLAGS, &rflags);
   1148 		if (rflags & PSL_I) {
   1149 			vmx_event_waitexit_disable(vcpu, false);
   1150 		}
   1151 	}
   1152 
   1153 	vmx_inkernel_advance();
   1154 	exit->reason = NVMM_EXIT_HALTED;
   1155 }
   1156 
   1157 #define VMX_QUAL_CR_NUM		__BITS(3,0)
   1158 #define VMX_QUAL_CR_TYPE	__BITS(5,4)
   1159 #define		CR_TYPE_WRITE	0
   1160 #define		CR_TYPE_READ	1
   1161 #define		CR_TYPE_CLTS	2
   1162 #define		CR_TYPE_LMSW	3
   1163 #define VMX_QUAL_CR_LMSW_OPMEM	__BIT(6)
   1164 #define VMX_QUAL_CR_GPR		__BITS(11,8)
   1165 #define VMX_QUAL_CR_LMSW_SRC	__BIT(31,16)
   1166 
   1167 static inline int
   1168 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1)
   1169 {
   1170 	/* Bits set to 1 in fixed0 are fixed to 1. */
   1171 	if ((crval & fixed0) != fixed0) {
   1172 		return -1;
   1173 	}
   1174 	/* Bits set to 0 in fixed1 are fixed to 0. */
   1175 	if (crval & ~fixed1) {
   1176 		return -1;
   1177 	}
   1178 	return 0;
   1179 }
   1180 
   1181 static int
   1182 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1183     uint64_t qual)
   1184 {
   1185 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1186 	uint64_t type, gpr, cr0;
   1187 	uint64_t efer, ctls1;
   1188 
   1189 	type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
   1190 	if (type != CR_TYPE_WRITE) {
   1191 		return -1;
   1192 	}
   1193 
   1194 	gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
   1195 	KASSERT(gpr < 16);
   1196 
   1197 	if (gpr == NVMM_X64_GPR_RSP) {
   1198 		vmx_vmread(VMCS_GUEST_RSP, &gpr);
   1199 	} else {
   1200 		gpr = cpudata->gprs[gpr];
   1201 	}
   1202 
   1203 	cr0 = gpr | CR0_NE | CR0_ET;
   1204 	cr0 &= ~(CR0_NW|CR0_CD);
   1205 
   1206 	if (vmx_check_cr(cr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) {
   1207 		return -1;
   1208 	}
   1209 
   1210 	/*
   1211 	 * XXX Handle 32bit PAE paging, need to set PDPTEs, fetched manually
   1212 	 * from CR3.
   1213 	 */
   1214 
   1215 	if (cr0 & CR0_PG) {
   1216 		vmx_vmread(VMCS_ENTRY_CTLS, &ctls1);
   1217 		vmx_vmread(VMCS_GUEST_IA32_EFER, &efer);
   1218 		if (efer & EFER_LME) {
   1219 			ctls1 |= ENTRY_CTLS_LONG_MODE;
   1220 			efer |= EFER_LMA;
   1221 		} else {
   1222 			ctls1 &= ~ENTRY_CTLS_LONG_MODE;
   1223 			efer &= ~EFER_LMA;
   1224 		}
   1225 		vmx_vmwrite(VMCS_GUEST_IA32_EFER, efer);
   1226 		vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
   1227 	}
   1228 
   1229 	vmx_vmwrite(VMCS_GUEST_CR0, cr0);
   1230 	vmx_inkernel_advance();
   1231 	return 0;
   1232 }
   1233 
   1234 static int
   1235 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1236     uint64_t qual)
   1237 {
   1238 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1239 	uint64_t type, gpr, cr4;
   1240 
   1241 	type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
   1242 	if (type != CR_TYPE_WRITE) {
   1243 		return -1;
   1244 	}
   1245 
   1246 	gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
   1247 	KASSERT(gpr < 16);
   1248 
   1249 	if (gpr == NVMM_X64_GPR_RSP) {
   1250 		vmx_vmread(VMCS_GUEST_RSP, &gpr);
   1251 	} else {
   1252 		gpr = cpudata->gprs[gpr];
   1253 	}
   1254 
   1255 	cr4 = gpr | CR4_VMXE;
   1256 
   1257 	if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) {
   1258 		return -1;
   1259 	}
   1260 
   1261 	vmx_vmwrite(VMCS_GUEST_CR4, cr4);
   1262 	vmx_inkernel_advance();
   1263 	return 0;
   1264 }
   1265 
   1266 static int
   1267 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1268     uint64_t qual)
   1269 {
   1270 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1271 	uint64_t type, gpr;
   1272 	bool write;
   1273 
   1274 	type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
   1275 	if (type == CR_TYPE_WRITE) {
   1276 		write = true;
   1277 	} else if (type == CR_TYPE_READ) {
   1278 		write = false;
   1279 	} else {
   1280 		return -1;
   1281 	}
   1282 
   1283 	gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
   1284 	KASSERT(gpr < 16);
   1285 
   1286 	if (write) {
   1287 		if (gpr == NVMM_X64_GPR_RSP) {
   1288 			vmx_vmread(VMCS_GUEST_RSP, &cpudata->gcr8);
   1289 		} else {
   1290 			cpudata->gcr8 = cpudata->gprs[gpr];
   1291 		}
   1292 	} else {
   1293 		if (gpr == NVMM_X64_GPR_RSP) {
   1294 			vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8);
   1295 		} else {
   1296 			cpudata->gprs[gpr] = cpudata->gcr8;
   1297 		}
   1298 	}
   1299 
   1300 	vmx_inkernel_advance();
   1301 	return 0;
   1302 }
   1303 
   1304 static void
   1305 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1306     struct nvmm_exit *exit)
   1307 {
   1308 	uint64_t qual;
   1309 	int ret;
   1310 
   1311 	vmx_vmread(VMCS_EXIT_QUALIFICATION, &qual);
   1312 
   1313 	switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) {
   1314 	case 0:
   1315 		ret = vmx_inkernel_handle_cr0(mach, vcpu, qual);
   1316 		break;
   1317 	case 4:
   1318 		ret = vmx_inkernel_handle_cr4(mach, vcpu, qual);
   1319 		break;
   1320 	case 8:
   1321 		ret = vmx_inkernel_handle_cr8(mach, vcpu, qual);
   1322 		break;
   1323 	default:
   1324 		ret = -1;
   1325 		break;
   1326 	}
   1327 
   1328 	if (ret == -1) {
   1329 		vmx_inject_gp(mach, vcpu);
   1330 	}
   1331 
   1332 	exit->reason = NVMM_EXIT_NONE;
   1333 }
   1334 
   1335 #define VMX_QUAL_IO_SIZE	__BITS(2,0)
   1336 #define		IO_SIZE_8	0
   1337 #define		IO_SIZE_16	1
   1338 #define		IO_SIZE_32	3
   1339 #define VMX_QUAL_IO_IN		__BIT(3)
   1340 #define VMX_QUAL_IO_STR		__BIT(4)
   1341 #define VMX_QUAL_IO_REP		__BIT(5)
   1342 #define VMX_QUAL_IO_DX		__BIT(6)
   1343 #define VMX_QUAL_IO_PORT	__BITS(31,16)
   1344 
   1345 #define VMX_INFO_IO_ADRSIZE	__BITS(9,7)
   1346 #define		IO_ADRSIZE_16	0
   1347 #define		IO_ADRSIZE_32	1
   1348 #define		IO_ADRSIZE_64	2
   1349 #define VMX_INFO_IO_SEG		__BITS(17,15)
   1350 
   1351 static void
   1352 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1353     struct nvmm_exit *exit)
   1354 {
   1355 	uint64_t qual, info, inslen, rip;
   1356 
   1357 	vmx_vmread(VMCS_EXIT_QUALIFICATION, &qual);
   1358 	vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO, &info);
   1359 
   1360 	exit->reason = NVMM_EXIT_IO;
   1361 
   1362 	if (qual & VMX_QUAL_IO_IN) {
   1363 		exit->u.io.type = NVMM_EXIT_IO_IN;
   1364 	} else {
   1365 		exit->u.io.type = NVMM_EXIT_IO_OUT;
   1366 	}
   1367 
   1368 	exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
   1369 
   1370 	KASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
   1371 	exit->u.io.seg = __SHIFTOUT(info, VMX_INFO_IO_SEG);
   1372 
   1373 	if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) {
   1374 		exit->u.io.address_size = 8;
   1375 	} else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) {
   1376 		exit->u.io.address_size = 4;
   1377 	} else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) {
   1378 		exit->u.io.address_size = 2;
   1379 	}
   1380 
   1381 	if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) {
   1382 		exit->u.io.operand_size = 4;
   1383 	} else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) {
   1384 		exit->u.io.operand_size = 2;
   1385 	} else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) {
   1386 		exit->u.io.operand_size = 1;
   1387 	}
   1388 
   1389 	exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
   1390 	exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
   1391 
   1392 	if ((exit->u.io.type == NVMM_EXIT_IO_IN) && exit->u.io.str) {
   1393 		exit->u.io.seg = NVMM_X64_SEG_ES;
   1394 	}
   1395 
   1396 	vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH, &inslen);
   1397 	vmx_vmread(VMCS_GUEST_RIP, &rip);
   1398 	exit->u.io.npc = rip + inslen;
   1399 }
   1400 
   1401 static const uint64_t msr_ignore_list[] = {
   1402 	MSR_BIOS_SIGN,
   1403 	MSR_IA32_PLATFORM_ID
   1404 };
   1405 
   1406 static bool
   1407 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1408     struct nvmm_exit *exit)
   1409 {
   1410 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1411 	uint64_t val;
   1412 	size_t i;
   1413 
   1414 	switch (exit->u.msr.type) {
   1415 	case NVMM_EXIT_MSR_RDMSR:
   1416 		if (exit->u.msr.msr == MSR_CR_PAT) {
   1417 			vmx_vmread(VMCS_GUEST_IA32_PAT, &val);
   1418 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
   1419 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1420 			goto handled;
   1421 		}
   1422 		if (exit->u.msr.msr == MSR_MISC_ENABLE) {
   1423 			val = cpudata->gmsr_misc_enable;
   1424 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
   1425 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1426 			goto handled;
   1427 		}
   1428 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
   1429 			if (msr_ignore_list[i] != exit->u.msr.msr)
   1430 				continue;
   1431 			val = 0;
   1432 			cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
   1433 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1434 			goto handled;
   1435 		}
   1436 		break;
   1437 	case NVMM_EXIT_MSR_WRMSR:
   1438 		if (exit->u.msr.msr == MSR_TSC) {
   1439 			cpudata->tsc_offset = exit->u.msr.val - cpu_counter();
   1440 			vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->tsc_offset +
   1441 			    curcpu()->ci_data.cpu_cc_skew);
   1442 			goto handled;
   1443 		}
   1444 		if (exit->u.msr.msr == MSR_CR_PAT) {
   1445 			vmx_vmwrite(VMCS_GUEST_IA32_PAT, exit->u.msr.val);
   1446 			goto handled;
   1447 		}
   1448 		if (exit->u.msr.msr == MSR_MISC_ENABLE) {
   1449 			/* Don't care. */
   1450 			goto handled;
   1451 		}
   1452 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
   1453 			if (msr_ignore_list[i] != exit->u.msr.msr)
   1454 				continue;
   1455 			goto handled;
   1456 		}
   1457 		break;
   1458 	}
   1459 
   1460 	return false;
   1461 
   1462 handled:
   1463 	vmx_inkernel_advance();
   1464 	return true;
   1465 }
   1466 
   1467 static void
   1468 vmx_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1469     struct nvmm_exit *exit, bool rdmsr)
   1470 {
   1471 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1472 	uint64_t inslen, rip;
   1473 
   1474 	if (rdmsr) {
   1475 		exit->u.msr.type = NVMM_EXIT_MSR_RDMSR;
   1476 	} else {
   1477 		exit->u.msr.type = NVMM_EXIT_MSR_WRMSR;
   1478 	}
   1479 
   1480 	exit->u.msr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
   1481 
   1482 	if (rdmsr) {
   1483 		exit->u.msr.val = 0;
   1484 	} else {
   1485 		uint64_t rdx, rax;
   1486 		rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
   1487 		rax = cpudata->gprs[NVMM_X64_GPR_RAX];
   1488 		exit->u.msr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
   1489 	}
   1490 
   1491 	if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
   1492 		exit->reason = NVMM_EXIT_NONE;
   1493 		return;
   1494 	}
   1495 
   1496 	exit->reason = NVMM_EXIT_MSR;
   1497 	vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH, &inslen);
   1498 	vmx_vmread(VMCS_GUEST_RIP, &rip);
   1499 	exit->u.msr.npc = rip + inslen;
   1500 }
   1501 
   1502 static void
   1503 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1504     struct nvmm_exit *exit)
   1505 {
   1506 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1507 	uint16_t val;
   1508 
   1509 	exit->reason = NVMM_EXIT_NONE;
   1510 
   1511 	val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
   1512 	    (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
   1513 
   1514 	if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
   1515 		goto error;
   1516 	} else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
   1517 		goto error;
   1518 	} else if (__predict_false((val & XCR0_X87) == 0)) {
   1519 		goto error;
   1520 	}
   1521 
   1522 	cpudata->gxcr0 = val;
   1523 
   1524 	vmx_inkernel_advance();
   1525 	return;
   1526 
   1527 error:
   1528 	vmx_inject_gp(mach, vcpu);
   1529 }
   1530 
   1531 #define VMX_EPT_VIOLATION_READ		__BIT(0)
   1532 #define VMX_EPT_VIOLATION_WRITE		__BIT(1)
   1533 #define VMX_EPT_VIOLATION_EXECUTE	__BIT(2)
   1534 
   1535 static void
   1536 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1537     struct nvmm_exit *exit)
   1538 {
   1539 	uint64_t perm;
   1540 	gpaddr_t gpa;
   1541 
   1542 	vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS, &gpa);
   1543 
   1544 	exit->reason = NVMM_EXIT_MEMORY;
   1545 	vmx_vmread(VMCS_EXIT_QUALIFICATION, &perm);
   1546 	if (perm & VMX_EPT_VIOLATION_WRITE)
   1547 		exit->u.mem.perm = NVMM_EXIT_MEMORY_WRITE;
   1548 	else if (perm & VMX_EPT_VIOLATION_EXECUTE)
   1549 		exit->u.mem.perm = NVMM_EXIT_MEMORY_EXEC;
   1550 	else
   1551 		exit->u.mem.perm = NVMM_EXIT_MEMORY_READ;
   1552 	exit->u.mem.gpa = gpa;
   1553 	exit->u.mem.inst_len = 0;
   1554 }
   1555 
   1556 /* -------------------------------------------------------------------------- */
   1557 
   1558 static void
   1559 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
   1560 {
   1561 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1562 
   1563 	cpudata->ts_set = (rcr0() & CR0_TS) != 0;
   1564 
   1565 	fpu_area_save(&cpudata->hfpu, vmx_xcr0_mask);
   1566 	fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask);
   1567 
   1568 	if (vmx_xcr0_mask != 0) {
   1569 		cpudata->hxcr0 = rdxcr(0);
   1570 		wrxcr(0, cpudata->gxcr0);
   1571 	}
   1572 }
   1573 
   1574 static void
   1575 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
   1576 {
   1577 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1578 
   1579 	if (vmx_xcr0_mask != 0) {
   1580 		cpudata->gxcr0 = rdxcr(0);
   1581 		wrxcr(0, cpudata->hxcr0);
   1582 	}
   1583 
   1584 	fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask);
   1585 	fpu_area_restore(&cpudata->hfpu, vmx_xcr0_mask);
   1586 
   1587 	if (cpudata->ts_set) {
   1588 		stts();
   1589 	}
   1590 }
   1591 
   1592 static void
   1593 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
   1594 {
   1595 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1596 
   1597 	x86_dbregs_save(curlwp);
   1598 
   1599 	ldr7(0);
   1600 
   1601 	ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
   1602 	ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
   1603 	ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
   1604 	ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
   1605 	ldr6(cpudata->drs[NVMM_X64_DR_DR6]);
   1606 }
   1607 
   1608 static void
   1609 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
   1610 {
   1611 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1612 
   1613 	cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
   1614 	cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
   1615 	cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
   1616 	cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
   1617 	cpudata->drs[NVMM_X64_DR_DR6] = rdr6();
   1618 
   1619 	x86_dbregs_restore(curlwp);
   1620 }
   1621 
   1622 static void
   1623 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
   1624 {
   1625 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1626 
   1627 	/* This gets restored automatically by the CPU. */
   1628 	vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE));
   1629 	vmx_vmwrite(VMCS_HOST_CR3, rcr3());
   1630 	vmx_vmwrite(VMCS_HOST_CR4, rcr4());
   1631 
   1632 	/* Note: MSR_LSTAR is not static, because of SVS. */
   1633 	cpudata->lstar = rdmsr(MSR_LSTAR);
   1634 	cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
   1635 }
   1636 
   1637 static void
   1638 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
   1639 {
   1640 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1641 
   1642 	wrmsr(MSR_STAR, cpudata->star);
   1643 	wrmsr(MSR_LSTAR, cpudata->lstar);
   1644 	wrmsr(MSR_CSTAR, cpudata->cstar);
   1645 	wrmsr(MSR_SFMASK, cpudata->sfmask);
   1646 	wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
   1647 }
   1648 
   1649 /* -------------------------------------------------------------------------- */
   1650 
   1651 #define VMX_INVVPID_ADDRESS		0
   1652 #define VMX_INVVPID_CONTEXT		1
   1653 #define VMX_INVVPID_ALL			2
   1654 #define VMX_INVVPID_CONTEXT_NOGLOBAL	3
   1655 
   1656 #define VMX_INVEPT_CONTEXT		1
   1657 #define VMX_INVEPT_ALL			2
   1658 
   1659 static inline void
   1660 vmx_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1661 {
   1662 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1663 
   1664 	if (vcpu->hcpu_last != hcpu) {
   1665 		cpudata->gtlb_want_flush = true;
   1666 	}
   1667 }
   1668 
   1669 static inline void
   1670 vmx_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1671 {
   1672 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1673 	struct ept_desc ept_desc;
   1674 
   1675 	if (__predict_true(!kcpuset_isset(cpudata->htlb_want_flush, hcpu))) {
   1676 		return;
   1677 	}
   1678 
   1679 	vmx_vmread(VMCS_EPTP, &ept_desc.eptp);
   1680 	ept_desc.mbz = 0;
   1681 	vmx_invept(vmx_ept_flush_op, &ept_desc);
   1682 	kcpuset_clear(cpudata->htlb_want_flush, hcpu);
   1683 }
   1684 
   1685 static inline uint64_t
   1686 vmx_htlb_flush(struct vmx_machdata *machdata, struct vmx_cpudata *cpudata)
   1687 {
   1688 	struct ept_desc ept_desc;
   1689 	uint64_t machgen;
   1690 
   1691 	machgen = machdata->mach_htlb_gen;
   1692 	if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
   1693 		return machgen;
   1694 	}
   1695 
   1696 	kcpuset_copy(cpudata->htlb_want_flush, kcpuset_running);
   1697 
   1698 	vmx_vmread(VMCS_EPTP, &ept_desc.eptp);
   1699 	ept_desc.mbz = 0;
   1700 	vmx_invept(vmx_ept_flush_op, &ept_desc);
   1701 
   1702 	return machgen;
   1703 }
   1704 
   1705 static inline void
   1706 vmx_htlb_flush_ack(struct vmx_cpudata *cpudata, uint64_t machgen)
   1707 {
   1708 	cpudata->vcpu_htlb_gen = machgen;
   1709 	kcpuset_clear(cpudata->htlb_want_flush, cpu_number());
   1710 }
   1711 
   1712 static int
   1713 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1714     struct nvmm_exit *exit)
   1715 {
   1716 	struct vmx_machdata *machdata = mach->machdata;
   1717 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   1718 	struct vpid_desc vpid_desc;
   1719 	struct cpu_info *ci;
   1720 	uint64_t exitcode;
   1721 	uint64_t intstate;
   1722 	uint64_t machgen;
   1723 	int hcpu, s, ret;
   1724 	bool launched = false;
   1725 
   1726 	vmx_vmcs_enter(vcpu);
   1727 	ci = curcpu();
   1728 	hcpu = cpu_number();
   1729 
   1730 	vmx_gtlb_catchup(vcpu, hcpu);
   1731 	vmx_htlb_catchup(vcpu, hcpu);
   1732 
   1733 	if (vcpu->hcpu_last != hcpu) {
   1734 		vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel);
   1735 		vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss);
   1736 		vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt);
   1737 		vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
   1738 		vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->tsc_offset +
   1739 		    curcpu()->ci_data.cpu_cc_skew);
   1740 		vcpu->hcpu_last = hcpu;
   1741 	}
   1742 
   1743 	vmx_vcpu_guest_dbregs_enter(vcpu);
   1744 	vmx_vcpu_guest_misc_enter(vcpu);
   1745 
   1746 	while (1) {
   1747 		if (cpudata->gtlb_want_flush) {
   1748 			vpid_desc.vpid = cpudata->asid;
   1749 			vpid_desc.addr = 0;
   1750 			vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
   1751 			cpudata->gtlb_want_flush = false;
   1752 		}
   1753 
   1754 		s = splhigh();
   1755 		machgen = vmx_htlb_flush(machdata, cpudata);
   1756 		vmx_vcpu_guest_fpu_enter(vcpu);
   1757 		lcr2(cpudata->gcr2);
   1758 		if (launched) {
   1759 			ret = vmx_vmresume(cpudata->gprs);
   1760 		} else {
   1761 			ret = vmx_vmlaunch(cpudata->gprs);
   1762 		}
   1763 		cpudata->gcr2 = rcr2();
   1764 		vmx_vcpu_guest_fpu_leave(vcpu);
   1765 		vmx_htlb_flush_ack(cpudata, machgen);
   1766 		splx(s);
   1767 
   1768 		if (__predict_false(ret != 0)) {
   1769 			exit->reason = NVMM_EXIT_INVALID;
   1770 			break;
   1771 		}
   1772 
   1773 		launched = true;
   1774 
   1775 		vmx_vmread(VMCS_EXIT_REASON, &exitcode);
   1776 		exitcode &= __BITS(15,0);
   1777 
   1778 		switch (exitcode) {
   1779 		case VMCS_EXITCODE_EXC_NMI:
   1780 			vmx_exit_exc_nmi(mach, vcpu, exit);
   1781 			break;
   1782 		case VMCS_EXITCODE_EXT_INT:
   1783 			exit->reason = NVMM_EXIT_NONE;
   1784 			break;
   1785 		case VMCS_EXITCODE_CPUID:
   1786 			vmx_exit_cpuid(mach, vcpu, exit);
   1787 			break;
   1788 		case VMCS_EXITCODE_HLT:
   1789 			vmx_exit_hlt(mach, vcpu, exit);
   1790 			break;
   1791 		case VMCS_EXITCODE_CR:
   1792 			vmx_exit_cr(mach, vcpu, exit);
   1793 			break;
   1794 		case VMCS_EXITCODE_IO:
   1795 			vmx_exit_io(mach, vcpu, exit);
   1796 			break;
   1797 		case VMCS_EXITCODE_RDMSR:
   1798 			vmx_exit_msr(mach, vcpu, exit, true);
   1799 			break;
   1800 		case VMCS_EXITCODE_WRMSR:
   1801 			vmx_exit_msr(mach, vcpu, exit, false);
   1802 			break;
   1803 		case VMCS_EXITCODE_SHUTDOWN:
   1804 			exit->reason = NVMM_EXIT_SHUTDOWN;
   1805 			break;
   1806 		case VMCS_EXITCODE_MONITOR:
   1807 			exit->reason = NVMM_EXIT_MONITOR;
   1808 			break;
   1809 		case VMCS_EXITCODE_MWAIT:
   1810 			exit->reason = NVMM_EXIT_MWAIT;
   1811 			break;
   1812 		case VMCS_EXITCODE_XSETBV:
   1813 			vmx_exit_xsetbv(mach, vcpu, exit);
   1814 			break;
   1815 		case VMCS_EXITCODE_RDPMC:
   1816 		case VMCS_EXITCODE_RDTSCP:
   1817 		case VMCS_EXITCODE_INVVPID:
   1818 		case VMCS_EXITCODE_INVEPT:
   1819 		case VMCS_EXITCODE_VMCALL:
   1820 		case VMCS_EXITCODE_VMCLEAR:
   1821 		case VMCS_EXITCODE_VMLAUNCH:
   1822 		case VMCS_EXITCODE_VMPTRLD:
   1823 		case VMCS_EXITCODE_VMPTRST:
   1824 		case VMCS_EXITCODE_VMREAD:
   1825 		case VMCS_EXITCODE_VMRESUME:
   1826 		case VMCS_EXITCODE_VMWRITE:
   1827 		case VMCS_EXITCODE_VMXOFF:
   1828 		case VMCS_EXITCODE_VMXON:
   1829 			vmx_inject_ud(mach, vcpu);
   1830 			exit->reason = NVMM_EXIT_NONE;
   1831 			break;
   1832 		case VMCS_EXITCODE_EPT_VIOLATION:
   1833 			vmx_exit_epf(mach, vcpu, exit);
   1834 			break;
   1835 		case VMCS_EXITCODE_INT_WINDOW:
   1836 			vmx_event_waitexit_disable(vcpu, false);
   1837 			exit->reason = NVMM_EXIT_INT_READY;
   1838 			break;
   1839 		case VMCS_EXITCODE_NMI_WINDOW:
   1840 			vmx_event_waitexit_disable(vcpu, true);
   1841 			exit->reason = NVMM_EXIT_NMI_READY;
   1842 			break;
   1843 		default:
   1844 			exit->reason = NVMM_EXIT_INVALID;
   1845 			break;
   1846 		}
   1847 
   1848 		/* If no reason to return to userland, keep rolling. */
   1849 		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
   1850 			break;
   1851 		}
   1852 		if (curcpu()->ci_data.cpu_softints != 0) {
   1853 			break;
   1854 		}
   1855 		if (curlwp->l_flag & LW_USERRET) {
   1856 			break;
   1857 		}
   1858 		if (exit->reason != NVMM_EXIT_NONE) {
   1859 			break;
   1860 		}
   1861 	}
   1862 
   1863 	vmx_vcpu_guest_misc_leave(vcpu);
   1864 	vmx_vcpu_guest_dbregs_leave(vcpu);
   1865 
   1866 	exit->exitstate[NVMM_X64_EXITSTATE_CR8] = cpudata->gcr8;
   1867 	vmx_vmread(VMCS_GUEST_RFLAGS,
   1868 	    &exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS]);
   1869 	vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
   1870 	exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] =
   1871 	    (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
   1872 	exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] =
   1873 	    cpudata->int_window_exit;
   1874 	exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT] =
   1875 	    cpudata->nmi_window_exit;
   1876 
   1877 	vmx_vmcs_leave(vcpu);
   1878 
   1879 	return 0;
   1880 }
   1881 
   1882 /* -------------------------------------------------------------------------- */
   1883 
   1884 static int
   1885 vmx_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
   1886 {
   1887 	struct pglist pglist;
   1888 	paddr_t _pa;
   1889 	vaddr_t _va;
   1890 	size_t i;
   1891 	int ret;
   1892 
   1893 	ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
   1894 	    &pglist, 1, 0);
   1895 	if (ret != 0)
   1896 		return ENOMEM;
   1897 	_pa = TAILQ_FIRST(&pglist)->phys_addr;
   1898 	_va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
   1899 	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
   1900 	if (_va == 0)
   1901 		goto error;
   1902 
   1903 	for (i = 0; i < npages; i++) {
   1904 		pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
   1905 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
   1906 	}
   1907 	pmap_update(pmap_kernel());
   1908 
   1909 	memset((void *)_va, 0, npages * PAGE_SIZE);
   1910 
   1911 	*pa = _pa;
   1912 	*va = _va;
   1913 	return 0;
   1914 
   1915 error:
   1916 	for (i = 0; i < npages; i++) {
   1917 		uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
   1918 	}
   1919 	return ENOMEM;
   1920 }
   1921 
   1922 static void
   1923 vmx_memfree(paddr_t pa, vaddr_t va, size_t npages)
   1924 {
   1925 	size_t i;
   1926 
   1927 	pmap_kremove(va, npages * PAGE_SIZE);
   1928 	pmap_update(pmap_kernel());
   1929 	uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
   1930 	for (i = 0; i < npages; i++) {
   1931 		uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
   1932 	}
   1933 }
   1934 
   1935 /* -------------------------------------------------------------------------- */
   1936 
   1937 static void
   1938 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
   1939 {
   1940 	uint64_t byte;
   1941 	uint8_t bitoff;
   1942 
   1943 	if (msr < 0x00002000) {
   1944 		/* Range 1 */
   1945 		byte = ((msr - 0x00000000) / 8) + 0;
   1946 	} else if (msr >= 0xC0000000 && msr < 0xC0002000) {
   1947 		/* Range 2 */
   1948 		byte = ((msr - 0xC0000000) / 8) + 1024;
   1949 	} else {
   1950 		panic("%s: wrong range", __func__);
   1951 	}
   1952 
   1953 	bitoff = (msr & 0x7);
   1954 
   1955 	if (read) {
   1956 		bitmap[byte] &= ~__BIT(bitoff);
   1957 	}
   1958 	if (write) {
   1959 		bitmap[2048 + byte] &= ~__BIT(bitoff);
   1960 	}
   1961 }
   1962 
   1963 #define VMX_SEG_ATTRIB_TYPE		__BITS(3,0)
   1964 #define VMX_SEG_ATTRIB_S		__BIT(4)
   1965 #define VMX_SEG_ATTRIB_DPL		__BITS(6,5)
   1966 #define VMX_SEG_ATTRIB_P		__BIT(7)
   1967 #define VMX_SEG_ATTRIB_AVL		__BIT(12)
   1968 #define VMX_SEG_ATTRIB_L		__BIT(13)
   1969 #define VMX_SEG_ATTRIB_DEF		__BIT(14)
   1970 #define VMX_SEG_ATTRIB_G		__BIT(15)
   1971 #define VMX_SEG_ATTRIB_UNUSABLE		__BIT(16)
   1972 
   1973 static void
   1974 vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg *segs, int idx)
   1975 {
   1976 	uint64_t attrib;
   1977 
   1978 	attrib =
   1979 	    __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) |
   1980 	    __SHIFTIN(segs[idx].attrib.s, VMX_SEG_ATTRIB_S) |
   1981 	    __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) |
   1982 	    __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) |
   1983 	    __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) |
   1984 	    __SHIFTIN(segs[idx].attrib.l, VMX_SEG_ATTRIB_L) |
   1985 	    __SHIFTIN(segs[idx].attrib.def, VMX_SEG_ATTRIB_DEF) |
   1986 	    __SHIFTIN(segs[idx].attrib.g, VMX_SEG_ATTRIB_G) |
   1987 	    (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0);
   1988 
   1989 	if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
   1990 		vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector);
   1991 		vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib);
   1992 	}
   1993 	vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit);
   1994 	vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base);
   1995 }
   1996 
   1997 static void
   1998 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx)
   1999 {
   2000 	uint64_t selector, base, limit, attrib = 0;
   2001 
   2002 	if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
   2003 		vmx_vmread(vmx_guest_segs[idx].selector, &selector);
   2004 		vmx_vmread(vmx_guest_segs[idx].attrib, &attrib);
   2005 	}
   2006 	vmx_vmread(vmx_guest_segs[idx].limit, &limit);
   2007 	vmx_vmread(vmx_guest_segs[idx].base, &base);
   2008 
   2009 	segs[idx].selector = selector;
   2010 	segs[idx].limit = limit;
   2011 	segs[idx].base = base;
   2012 	segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE);
   2013 	segs[idx].attrib.s = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_S);
   2014 	segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL);
   2015 	segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P);
   2016 	segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL);
   2017 	segs[idx].attrib.l = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_L);
   2018 	segs[idx].attrib.def = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF);
   2019 	segs[idx].attrib.g = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_G);
   2020 	if (attrib & VMX_SEG_ATTRIB_UNUSABLE) {
   2021 		segs[idx].attrib.p = 0;
   2022 	}
   2023 }
   2024 
   2025 static inline bool
   2026 vmx_state_tlb_flush(const struct nvmm_x64_state *state, uint64_t flags)
   2027 {
   2028 	uint64_t cr0, cr3, cr4, efer;
   2029 
   2030 	if (flags & NVMM_X64_STATE_CRS) {
   2031 		vmx_vmread(VMCS_GUEST_CR0, &cr0);
   2032 		if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
   2033 			return true;
   2034 		}
   2035 		vmx_vmread(VMCS_GUEST_CR3, &cr3);
   2036 		if (cr3 != state->crs[NVMM_X64_CR_CR3]) {
   2037 			return true;
   2038 		}
   2039 		vmx_vmread(VMCS_GUEST_CR4, &cr4);
   2040 		if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
   2041 			return true;
   2042 		}
   2043 	}
   2044 
   2045 	if (flags & NVMM_X64_STATE_MSRS) {
   2046 		vmx_vmread(VMCS_GUEST_IA32_EFER, &efer);
   2047 		if ((efer ^
   2048 		     state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
   2049 			return true;
   2050 		}
   2051 	}
   2052 
   2053 	return false;
   2054 }
   2055 
   2056 static void
   2057 vmx_vcpu_setstate(struct nvmm_cpu *vcpu, const void *data, uint64_t flags)
   2058 {
   2059 	const struct nvmm_x64_state *state = data;
   2060 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2061 	struct fxsave *fpustate;
   2062 	uint64_t ctls1, intstate;
   2063 
   2064 	vmx_vmcs_enter(vcpu);
   2065 
   2066 	if (vmx_state_tlb_flush(state, flags)) {
   2067 		cpudata->gtlb_want_flush = true;
   2068 	}
   2069 
   2070 	if (flags & NVMM_X64_STATE_SEGS) {
   2071 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS);
   2072 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS);
   2073 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES);
   2074 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS);
   2075 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS);
   2076 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS);
   2077 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT);
   2078 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT);
   2079 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT);
   2080 		vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR);
   2081 	}
   2082 
   2083 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   2084 	if (flags & NVMM_X64_STATE_GPRS) {
   2085 		memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
   2086 
   2087 		vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]);
   2088 		vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]);
   2089 		vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]);
   2090 	}
   2091 
   2092 	if (flags & NVMM_X64_STATE_CRS) {
   2093 		/*
   2094 		 * CR0_NE and CR4_VMXE are mandatory.
   2095 		 */
   2096 		vmx_vmwrite(VMCS_GUEST_CR0,
   2097 		    state->crs[NVMM_X64_CR_CR0] | CR0_NE);
   2098 		cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2];
   2099 		vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]); // XXX PDPTE?
   2100 		vmx_vmwrite(VMCS_GUEST_CR4,
   2101 		    state->crs[NVMM_X64_CR_CR4] | CR4_VMXE);
   2102 		cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8];
   2103 
   2104 		if (vmx_xcr0_mask != 0) {
   2105 			/* Clear illegal XCR0 bits, set mandatory X87 bit. */
   2106 			cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
   2107 			cpudata->gxcr0 &= vmx_xcr0_mask;
   2108 			cpudata->gxcr0 |= XCR0_X87;
   2109 		}
   2110 	}
   2111 
   2112 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   2113 	if (flags & NVMM_X64_STATE_DRS) {
   2114 		memcpy(cpudata->drs, state->drs, sizeof(state->drs));
   2115 
   2116 		cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF;
   2117 		vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]);
   2118 	}
   2119 
   2120 	if (flags & NVMM_X64_STATE_MSRS) {
   2121 		cpudata->gmsr[VMX_MSRLIST_STAR].val =
   2122 		    state->msrs[NVMM_X64_MSR_STAR];
   2123 		cpudata->gmsr[VMX_MSRLIST_LSTAR].val =
   2124 		    state->msrs[NVMM_X64_MSR_LSTAR];
   2125 		cpudata->gmsr[VMX_MSRLIST_CSTAR].val =
   2126 		    state->msrs[NVMM_X64_MSR_CSTAR];
   2127 		cpudata->gmsr[VMX_MSRLIST_SFMASK].val =
   2128 		    state->msrs[NVMM_X64_MSR_SFMASK];
   2129 		cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val =
   2130 		    state->msrs[NVMM_X64_MSR_KERNELGSBASE];
   2131 
   2132 		vmx_vmwrite(VMCS_GUEST_IA32_EFER,
   2133 		    state->msrs[NVMM_X64_MSR_EFER]);
   2134 		vmx_vmwrite(VMCS_GUEST_IA32_PAT,
   2135 		    state->msrs[NVMM_X64_MSR_PAT]);
   2136 		vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS,
   2137 		    state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
   2138 		vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP,
   2139 		    state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
   2140 		vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP,
   2141 		    state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
   2142 
   2143 		/* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */
   2144 		vmx_vmread(VMCS_ENTRY_CTLS, &ctls1);
   2145 		if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) {
   2146 			ctls1 |= ENTRY_CTLS_LONG_MODE;
   2147 		} else {
   2148 			ctls1 &= ~ENTRY_CTLS_LONG_MODE;
   2149 		}
   2150 		vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
   2151 	}
   2152 
   2153 	if (flags & NVMM_X64_STATE_MISC) {
   2154 		vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
   2155 		intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS);
   2156 		if (state->misc[NVMM_X64_MISC_INT_SHADOW]) {
   2157 			intstate |= INT_STATE_MOVSS;
   2158 		}
   2159 		vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate);
   2160 
   2161 		if (state->misc[NVMM_X64_MISC_INT_WINDOW_EXIT]) {
   2162 			vmx_event_waitexit_enable(vcpu, false);
   2163 		} else {
   2164 			vmx_event_waitexit_disable(vcpu, false);
   2165 		}
   2166 
   2167 		if (state->misc[NVMM_X64_MISC_NMI_WINDOW_EXIT]) {
   2168 			vmx_event_waitexit_enable(vcpu, true);
   2169 		} else {
   2170 			vmx_event_waitexit_disable(vcpu, true);
   2171 		}
   2172 	}
   2173 
   2174 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   2175 	if (flags & NVMM_X64_STATE_FPU) {
   2176 		memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
   2177 		    sizeof(state->fpu));
   2178 
   2179 		fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
   2180 		fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
   2181 		fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
   2182 
   2183 		if (vmx_xcr0_mask != 0) {
   2184 			/* Reset XSTATE_BV, to force a reload. */
   2185 			cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
   2186 		}
   2187 	}
   2188 
   2189 	vmx_vmcs_leave(vcpu);
   2190 }
   2191 
   2192 static void
   2193 vmx_vcpu_getstate(struct nvmm_cpu *vcpu, void *data, uint64_t flags)
   2194 {
   2195 	struct nvmm_x64_state *state = (struct nvmm_x64_state *)data;
   2196 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2197 	uint64_t intstate;
   2198 
   2199 	vmx_vmcs_enter(vcpu);
   2200 
   2201 	if (flags & NVMM_X64_STATE_SEGS) {
   2202 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS);
   2203 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS);
   2204 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES);
   2205 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS);
   2206 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS);
   2207 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS);
   2208 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT);
   2209 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT);
   2210 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT);
   2211 		vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR);
   2212 	}
   2213 
   2214 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   2215 	if (flags & NVMM_X64_STATE_GPRS) {
   2216 		memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
   2217 
   2218 		vmx_vmread(VMCS_GUEST_RIP, &state->gprs[NVMM_X64_GPR_RIP]);
   2219 		vmx_vmread(VMCS_GUEST_RSP, &state->gprs[NVMM_X64_GPR_RSP]);
   2220 		vmx_vmread(VMCS_GUEST_RFLAGS, &state->gprs[NVMM_X64_GPR_RFLAGS]);
   2221 	}
   2222 
   2223 	if (flags & NVMM_X64_STATE_CRS) {
   2224 		vmx_vmread(VMCS_GUEST_CR0, &state->crs[NVMM_X64_CR_CR0]);
   2225 		state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2;
   2226 		vmx_vmread(VMCS_GUEST_CR3, &state->crs[NVMM_X64_CR_CR3]);
   2227 		vmx_vmread(VMCS_GUEST_CR4, &state->crs[NVMM_X64_CR_CR4]);
   2228 		state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8;
   2229 		state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
   2230 
   2231 		/* Hide VMXE. */
   2232 		state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE;
   2233 	}
   2234 
   2235 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   2236 	if (flags & NVMM_X64_STATE_DRS) {
   2237 		memcpy(state->drs, cpudata->drs, sizeof(state->drs));
   2238 
   2239 		vmx_vmread(VMCS_GUEST_DR7, &state->drs[NVMM_X64_DR_DR7]);
   2240 	}
   2241 
   2242 	if (flags & NVMM_X64_STATE_MSRS) {
   2243 		state->msrs[NVMM_X64_MSR_STAR] =
   2244 		    cpudata->gmsr[VMX_MSRLIST_STAR].val;
   2245 		state->msrs[NVMM_X64_MSR_LSTAR] =
   2246 		    cpudata->gmsr[VMX_MSRLIST_LSTAR].val;
   2247 		state->msrs[NVMM_X64_MSR_CSTAR] =
   2248 		    cpudata->gmsr[VMX_MSRLIST_CSTAR].val;
   2249 		state->msrs[NVMM_X64_MSR_SFMASK] =
   2250 		    cpudata->gmsr[VMX_MSRLIST_SFMASK].val;
   2251 		state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
   2252 		    cpudata->gmsr[VMX_MSRLIST_KERNELGSBASE].val;
   2253 
   2254 		vmx_vmread(VMCS_GUEST_IA32_EFER,
   2255 		    &state->msrs[NVMM_X64_MSR_EFER]);
   2256 		vmx_vmread(VMCS_GUEST_IA32_PAT,
   2257 		    &state->msrs[NVMM_X64_MSR_PAT]);
   2258 		vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS,
   2259 		    &state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
   2260 		vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP,
   2261 		    &state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
   2262 		vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP,
   2263 		    &state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
   2264 	}
   2265 
   2266 	if (flags & NVMM_X64_STATE_MISC) {
   2267 		vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY, &intstate);
   2268 		state->misc[NVMM_X64_MISC_INT_SHADOW] =
   2269 		    (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
   2270 
   2271 		state->misc[NVMM_X64_MISC_INT_WINDOW_EXIT] =
   2272 		    cpudata->int_window_exit;
   2273 		state->misc[NVMM_X64_MISC_NMI_WINDOW_EXIT] =
   2274 		    cpudata->nmi_window_exit;
   2275 	}
   2276 
   2277 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   2278 	if (flags & NVMM_X64_STATE_FPU) {
   2279 		memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
   2280 		    sizeof(state->fpu));
   2281 	}
   2282 
   2283 	vmx_vmcs_leave(vcpu);
   2284 }
   2285 
   2286 /* -------------------------------------------------------------------------- */
   2287 
   2288 static void
   2289 vmx_asid_alloc(struct nvmm_cpu *vcpu)
   2290 {
   2291 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2292 	size_t i, oct, bit;
   2293 
   2294 	mutex_enter(&vmx_asidlock);
   2295 
   2296 	for (i = 0; i < vmx_maxasid; i++) {
   2297 		oct = i / 8;
   2298 		bit = i % 8;
   2299 
   2300 		if (vmx_asidmap[oct] & __BIT(bit)) {
   2301 			continue;
   2302 		}
   2303 
   2304 		cpudata->asid = i;
   2305 
   2306 		vmx_asidmap[oct] |= __BIT(bit);
   2307 		vmx_vmwrite(VMCS_VPID, i);
   2308 		mutex_exit(&vmx_asidlock);
   2309 		return;
   2310 	}
   2311 
   2312 	mutex_exit(&vmx_asidlock);
   2313 
   2314 	panic("%s: impossible", __func__);
   2315 }
   2316 
   2317 static void
   2318 vmx_asid_free(struct nvmm_cpu *vcpu)
   2319 {
   2320 	size_t oct, bit;
   2321 	uint64_t asid;
   2322 
   2323 	vmx_vmread(VMCS_VPID, &asid);
   2324 
   2325 	oct = asid / 8;
   2326 	bit = asid % 8;
   2327 
   2328 	mutex_enter(&vmx_asidlock);
   2329 	vmx_asidmap[oct] &= ~__BIT(bit);
   2330 	mutex_exit(&vmx_asidlock);
   2331 }
   2332 
   2333 static void
   2334 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2335 {
   2336 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2337 	struct vmcs *vmcs = cpudata->vmcs;
   2338 	struct msr_entry *gmsr = cpudata->gmsr;
   2339 	extern uint8_t vmx_resume_rip;
   2340 	uint64_t rev, eptp;
   2341 
   2342 	rev = vmx_get_revision();
   2343 
   2344 	memset(vmcs, 0, VMCS_SIZE);
   2345 	vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION);
   2346 	vmcs->abort = 0;
   2347 
   2348 	vmx_vmcs_enter(vcpu);
   2349 
   2350 	/* No link pointer. */
   2351 	vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF);
   2352 
   2353 	/* Install the CTLSs. */
   2354 	vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls);
   2355 	vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls);
   2356 	vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2);
   2357 	vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls);
   2358 	vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls);
   2359 
   2360 	/* Allow direct access to certain MSRs. */
   2361 	memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
   2362 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true);
   2363 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
   2364 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
   2365 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
   2366 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
   2367 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
   2368 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
   2369 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
   2370 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
   2371 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
   2372 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
   2373 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
   2374 	vmx_vcpu_msr_allow(cpudata->msrbm, MSR_IA32_ARCH_CAPABILITIES,
   2375 	    true, false);
   2376 	vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa);
   2377 
   2378 	/*
   2379 	 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This
   2380 	 * includes the L1D_FLUSH MSR, to mitigate L1TF.
   2381 	 */
   2382 	gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR;
   2383 	gmsr[VMX_MSRLIST_STAR].val = 0;
   2384 	gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR;
   2385 	gmsr[VMX_MSRLIST_LSTAR].val = 0;
   2386 	gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR;
   2387 	gmsr[VMX_MSRLIST_CSTAR].val = 0;
   2388 	gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK;
   2389 	gmsr[VMX_MSRLIST_SFMASK].val = 0;
   2390 	gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE;
   2391 	gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0;
   2392 	gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD;
   2393 	gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH;
   2394 	vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa);
   2395 	vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa);
   2396 	vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr);
   2397 	vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR);
   2398 
   2399 	/* Force CR0_NW and CR0_CD to zero, CR0_ET to one. */
   2400 	vmx_vmwrite(VMCS_CR0_MASK, CR0_NW|CR0_CD|CR0_ET);
   2401 	vmx_vmwrite(VMCS_CR0_SHADOW, CR0_ET);
   2402 
   2403 	/* Force CR4_VMXE to zero. */
   2404 	vmx_vmwrite(VMCS_CR4_MASK, CR4_VMXE);
   2405 
   2406 	/* Set the Host state for resuming. */
   2407 	vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)&vmx_resume_rip);
   2408 	vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL));
   2409 	vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
   2410 	vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
   2411 	vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
   2412 	vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0);
   2413 	vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0);
   2414 	vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0);
   2415 	vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0);
   2416 	vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0);
   2417 	vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)idt);
   2418 	vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT));
   2419 	vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER));
   2420 	vmx_vmwrite(VMCS_HOST_CR0, rcr0());
   2421 
   2422 	/* Generate ASID. */
   2423 	vmx_asid_alloc(vcpu);
   2424 
   2425 	/* Enable Extended Paging, 4-Level. */
   2426 	eptp =
   2427 	    __SHIFTIN(vmx_eptp_type, EPTP_TYPE) |
   2428 	    __SHIFTIN(4-1, EPTP_WALKLEN) |
   2429 	    (pmap_ept_has_ad ? EPTP_FLAGS_AD : 0) |
   2430 	    mach->vm->vm_map.pmap->pm_pdirpa[0];
   2431 	vmx_vmwrite(VMCS_EPTP, eptp);
   2432 
   2433 	/* Init IA32_MISC_ENABLE. */
   2434 	cpudata->gmsr_misc_enable = rdmsr(MSR_MISC_ENABLE);
   2435 	cpudata->gmsr_misc_enable &=
   2436 	    ~(IA32_MISC_PERFMON_EN|IA32_MISC_EISST_EN|IA32_MISC_MWAIT_EN);
   2437 	cpudata->gmsr_misc_enable |=
   2438 	    (IA32_MISC_BTS_UNAVAIL|IA32_MISC_PEBS_UNAVAIL);
   2439 
   2440 	/* Init XSAVE header. */
   2441 	cpudata->gfpu.xsh_xstate_bv = vmx_xcr0_mask;
   2442 	cpudata->gfpu.xsh_xcomp_bv = 0;
   2443 
   2444 	/* Set guest TSC to zero, more or less. */
   2445 	cpudata->tsc_offset = -cpu_counter();
   2446 
   2447 	/* These MSRs are static. */
   2448 	cpudata->star = rdmsr(MSR_STAR);
   2449 	cpudata->cstar = rdmsr(MSR_CSTAR);
   2450 	cpudata->sfmask = rdmsr(MSR_SFMASK);
   2451 
   2452 	/* Install the RESET state. */
   2453 	vmx_vcpu_setstate(vcpu, &nvmm_x86_reset_state, NVMM_X64_STATE_ALL);
   2454 
   2455 	vmx_vmcs_leave(vcpu);
   2456 }
   2457 
   2458 static int
   2459 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2460 {
   2461 	struct vmx_cpudata *cpudata;
   2462 	int error;
   2463 
   2464 	/* Allocate the VMX cpudata. */
   2465 	cpudata = (struct vmx_cpudata *)uvm_km_alloc(kernel_map,
   2466 	    roundup(sizeof(*cpudata), PAGE_SIZE), 0,
   2467 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
   2468 	vcpu->cpudata = cpudata;
   2469 
   2470 	/* VMCS */
   2471 	error = vmx_memalloc(&cpudata->vmcs_pa, (vaddr_t *)&cpudata->vmcs,
   2472 	    VMCS_NPAGES);
   2473 	if (error)
   2474 		goto error;
   2475 
   2476 	/* MSR Bitmap */
   2477 	error = vmx_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
   2478 	    MSRBM_NPAGES);
   2479 	if (error)
   2480 		goto error;
   2481 
   2482 	/* Guest MSR List */
   2483 	error = vmx_memalloc(&cpudata->gmsr_pa, (vaddr_t *)&cpudata->gmsr, 1);
   2484 	if (error)
   2485 		goto error;
   2486 
   2487 	kcpuset_create(&cpudata->htlb_want_flush, true);
   2488 
   2489 	/* Init the VCPU info. */
   2490 	vmx_vcpu_init(mach, vcpu);
   2491 
   2492 	return 0;
   2493 
   2494 error:
   2495 	if (cpudata->vmcs_pa) {
   2496 		vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
   2497 		    VMCS_NPAGES);
   2498 	}
   2499 	if (cpudata->msrbm_pa) {
   2500 		vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
   2501 		    MSRBM_NPAGES);
   2502 	}
   2503 	if (cpudata->gmsr_pa) {
   2504 		vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
   2505 	}
   2506 
   2507 	kmem_free(cpudata, sizeof(*cpudata));
   2508 	return error;
   2509 }
   2510 
   2511 static void
   2512 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2513 {
   2514 	struct vmx_cpudata *cpudata = vcpu->cpudata;
   2515 
   2516 	vmx_vmcs_enter(vcpu);
   2517 	vmx_asid_free(vcpu);
   2518 	vmx_vmcs_leave(vcpu);
   2519 
   2520 	kcpuset_destroy(cpudata->htlb_want_flush);
   2521 
   2522 	vmx_memfree(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs, VMCS_NPAGES);
   2523 	vmx_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
   2524 	vmx_memfree(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
   2525 	uvm_km_free(kernel_map, (vaddr_t)cpudata,
   2526 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
   2527 }
   2528 
   2529 /* -------------------------------------------------------------------------- */
   2530 
   2531 static void
   2532 vmx_tlb_flush(struct pmap *pm)
   2533 {
   2534 	struct nvmm_machine *mach = pm->pm_data;
   2535 	struct vmx_machdata *machdata = mach->machdata;
   2536 
   2537 	atomic_inc_64(&machdata->mach_htlb_gen);
   2538 
   2539 	/* Generates IPIs, which cause #VMEXITs. */
   2540 	pmap_tlb_shootdown(pmap_kernel(), -1, PG_G, TLBSHOOT_UPDATE);
   2541 }
   2542 
   2543 static void
   2544 vmx_machine_create(struct nvmm_machine *mach)
   2545 {
   2546 	struct pmap *pmap = mach->vm->vm_map.pmap;
   2547 	struct vmx_machdata *machdata;
   2548 
   2549 	/* Convert to EPT. */
   2550 	pmap_ept_transform(pmap);
   2551 
   2552 	/* Fill in pmap info. */
   2553 	pmap->pm_data = (void *)mach;
   2554 	pmap->pm_tlb_flush = vmx_tlb_flush;
   2555 
   2556 	machdata = kmem_zalloc(sizeof(struct vmx_machdata), KM_SLEEP);
   2557 	mach->machdata = machdata;
   2558 
   2559 	/* Start with an hTLB flush everywhere. */
   2560 	machdata->mach_htlb_gen = 1;
   2561 }
   2562 
   2563 static void
   2564 vmx_machine_destroy(struct nvmm_machine *mach)
   2565 {
   2566 	struct vmx_machdata *machdata = mach->machdata;
   2567 
   2568 	kmem_free(machdata, sizeof(struct vmx_machdata));
   2569 }
   2570 
   2571 static int
   2572 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
   2573 {
   2574 	struct nvmm_x86_conf_cpuid *cpuid = data;
   2575 	struct vmx_machdata *machdata = (struct vmx_machdata *)mach->machdata;
   2576 	size_t i;
   2577 
   2578 	if (__predict_false(op != NVMM_X86_CONF_CPUID)) {
   2579 		return EINVAL;
   2580 	}
   2581 
   2582 	if (__predict_false((cpuid->set.eax & cpuid->del.eax) ||
   2583 	    (cpuid->set.ebx & cpuid->del.ebx) ||
   2584 	    (cpuid->set.ecx & cpuid->del.ecx) ||
   2585 	    (cpuid->set.edx & cpuid->del.edx))) {
   2586 		return EINVAL;
   2587 	}
   2588 
   2589 	/* If already here, replace. */
   2590 	for (i = 0; i < VMX_NCPUIDS; i++) {
   2591 		if (!machdata->cpuidpresent[i]) {
   2592 			continue;
   2593 		}
   2594 		if (machdata->cpuid[i].leaf == cpuid->leaf) {
   2595 			memcpy(&machdata->cpuid[i], cpuid,
   2596 			    sizeof(struct nvmm_x86_conf_cpuid));
   2597 			return 0;
   2598 		}
   2599 	}
   2600 
   2601 	/* Not here, insert. */
   2602 	for (i = 0; i < VMX_NCPUIDS; i++) {
   2603 		if (!machdata->cpuidpresent[i]) {
   2604 			machdata->cpuidpresent[i] = true;
   2605 			memcpy(&machdata->cpuid[i], cpuid,
   2606 			    sizeof(struct nvmm_x86_conf_cpuid));
   2607 			return 0;
   2608 		}
   2609 	}
   2610 
   2611 	return ENOBUFS;
   2612 }
   2613 
   2614 /* -------------------------------------------------------------------------- */
   2615 
   2616 static int
   2617 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls,
   2618     uint64_t set_one, uint64_t set_zero, uint64_t *res)
   2619 {
   2620 	uint64_t basic, val, true_val;
   2621 	bool one_allowed, zero_allowed, has_true;
   2622 	size_t i;
   2623 
   2624 	basic = rdmsr(MSR_IA32_VMX_BASIC);
   2625 	has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
   2626 
   2627 	val = rdmsr(msr_ctls);
   2628 	if (has_true) {
   2629 		true_val = rdmsr(msr_true_ctls);
   2630 	} else {
   2631 		true_val = val;
   2632 	}
   2633 
   2634 #define ONE_ALLOWED(msrval, bitoff) \
   2635 	((msrval & __BIT(32 + bitoff)) != 0)
   2636 #define ZERO_ALLOWED(msrval, bitoff) \
   2637 	((msrval & __BIT(bitoff)) == 0)
   2638 
   2639 	for (i = 0; i < 32; i++) {
   2640 		one_allowed = ONE_ALLOWED(true_val, i);
   2641 		zero_allowed = ZERO_ALLOWED(true_val, i);
   2642 
   2643 		if (zero_allowed && !one_allowed) {
   2644 			if (set_one & __BIT(i))
   2645 				return -1;
   2646 			*res &= ~__BIT(i);
   2647 		} else if (one_allowed && !zero_allowed) {
   2648 			if (set_zero & __BIT(i))
   2649 				return -1;
   2650 			*res |= __BIT(i);
   2651 		} else {
   2652 			if (set_zero & __BIT(i)) {
   2653 				*res &= ~__BIT(i);
   2654 			} else if (set_one & __BIT(i)) {
   2655 				*res |= __BIT(i);
   2656 			} else if (!has_true) {
   2657 				*res &= ~__BIT(i);
   2658 			} else if (ZERO_ALLOWED(val, i)) {
   2659 				*res &= ~__BIT(i);
   2660 			} else if (ONE_ALLOWED(val, i)) {
   2661 				*res |= __BIT(i);
   2662 			} else {
   2663 				return -1;
   2664 			}
   2665 		}
   2666 	}
   2667 
   2668 	return 0;
   2669 }
   2670 
   2671 static bool
   2672 vmx_ident(void)
   2673 {
   2674 	uint64_t msr;
   2675 	int ret;
   2676 
   2677 	if (!(cpu_feature[1] & CPUID2_VMX)) {
   2678 		return false;
   2679 	}
   2680 
   2681 	msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
   2682 	if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) {
   2683 		return false;
   2684 	}
   2685 
   2686 	msr = rdmsr(MSR_IA32_VMX_BASIC);
   2687 	if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) {
   2688 		return false;
   2689 	}
   2690 	if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) {
   2691 		return false;
   2692 	}
   2693 
   2694 	/* PG and PE are reported, even if Unrestricted Guests is supported. */
   2695 	vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE);
   2696 	vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE);
   2697 	ret = vmx_check_cr(rcr0(), vmx_cr0_fixed0, vmx_cr0_fixed1);
   2698 	if (ret == -1) {
   2699 		return false;
   2700 	}
   2701 
   2702 	vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
   2703 	vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
   2704 	ret = vmx_check_cr(rcr4() | CR4_VMXE, vmx_cr4_fixed0, vmx_cr4_fixed1);
   2705 	if (ret == -1) {
   2706 		return false;
   2707 	}
   2708 
   2709 	/* Init the CTLSs right now, and check for errors. */
   2710 	ret = vmx_init_ctls(
   2711 	    MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
   2712 	    VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO,
   2713 	    &vmx_pinbased_ctls);
   2714 	if (ret == -1) {
   2715 		return false;
   2716 	}
   2717 	ret = vmx_init_ctls(
   2718 	    MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
   2719 	    VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO,
   2720 	    &vmx_procbased_ctls);
   2721 	if (ret == -1) {
   2722 		return false;
   2723 	}
   2724 	ret = vmx_init_ctls(
   2725 	    MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
   2726 	    VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO,
   2727 	    &vmx_procbased_ctls2);
   2728 	if (ret == -1) {
   2729 		return false;
   2730 	}
   2731 	ret = vmx_init_ctls(
   2732 	    MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
   2733 	    VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO,
   2734 	    &vmx_entry_ctls);
   2735 	if (ret == -1) {
   2736 		return false;
   2737 	}
   2738 	ret = vmx_init_ctls(
   2739 	    MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS,
   2740 	    VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO,
   2741 	    &vmx_exit_ctls);
   2742 	if (ret == -1) {
   2743 		return false;
   2744 	}
   2745 
   2746 	msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
   2747 	if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) {
   2748 		return false;
   2749 	}
   2750 	if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) {
   2751 		return false;
   2752 	}
   2753 	if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) {
   2754 		return false;
   2755 	}
   2756 	if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) != 0) {
   2757 		pmap_ept_has_ad = true;
   2758 	} else {
   2759 		pmap_ept_has_ad = false;
   2760 	}
   2761 	if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) {
   2762 		return false;
   2763 	}
   2764 
   2765 	return true;
   2766 }
   2767 
   2768 static void
   2769 vmx_init_asid(uint32_t maxasid)
   2770 {
   2771 	size_t allocsz;
   2772 
   2773 	mutex_init(&vmx_asidlock, MUTEX_DEFAULT, IPL_NONE);
   2774 
   2775 	vmx_maxasid = maxasid;
   2776 	allocsz = roundup(maxasid, 8) / 8;
   2777 	vmx_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
   2778 
   2779 	/* ASID 0 is reserved for the host. */
   2780 	vmx_asidmap[0] |= __BIT(0);
   2781 }
   2782 
   2783 static void
   2784 vmx_change_cpu(void *arg1, void *arg2)
   2785 {
   2786 	struct cpu_info *ci = curcpu();
   2787 	bool enable = (bool)arg1;
   2788 	uint64_t cr4;
   2789 
   2790 	if (!enable) {
   2791 		vmx_vmxoff();
   2792 	}
   2793 
   2794 	cr4 = rcr4();
   2795 	if (enable) {
   2796 		cr4 |= CR4_VMXE;
   2797 	} else {
   2798 		cr4 &= ~CR4_VMXE;
   2799 	}
   2800 	lcr4(cr4);
   2801 
   2802 	if (enable) {
   2803 		vmx_vmxon(&vmxoncpu[cpu_index(ci)].pa);
   2804 	}
   2805 }
   2806 
   2807 static void
   2808 vmx_init_l1tf(void)
   2809 {
   2810 	u_int descs[4];
   2811 	uint64_t msr;
   2812 
   2813 	if (cpuid_level < 7) {
   2814 		return;
   2815 	}
   2816 
   2817 	x86_cpuid(7, descs);
   2818 
   2819 	if (descs[3] & CPUID_SEF_ARCH_CAP) {
   2820 		msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
   2821 		if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) {
   2822 			/* No mitigation needed. */
   2823 			return;
   2824 		}
   2825 	}
   2826 
   2827 	if (descs[3] & CPUID_SEF_L1D_FLUSH) {
   2828 		/* Enable hardware mitigation. */
   2829 		vmx_msrlist_entry_nmsr += 1;
   2830 	}
   2831 }
   2832 
   2833 static void
   2834 vmx_init(void)
   2835 {
   2836 	CPU_INFO_ITERATOR cii;
   2837 	struct cpu_info *ci;
   2838 	uint64_t xc, msr;
   2839 	struct vmxon *vmxon;
   2840 	uint32_t revision;
   2841 	paddr_t pa;
   2842 	vaddr_t va;
   2843 	int error;
   2844 
   2845 	/* Init the ASID bitmap (VPID). */
   2846 	vmx_init_asid(VPID_MAX);
   2847 
   2848 	/* Init the XCR0 mask. */
   2849 	vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
   2850 
   2851 	/* Init the TLB flush op, the EPT flush op and the EPTP type. */
   2852 	msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
   2853 	if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
   2854 		vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
   2855 	} else {
   2856 		vmx_tlb_flush_op = VMX_INVVPID_ALL;
   2857 	}
   2858 	if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
   2859 		vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
   2860 	} else {
   2861 		vmx_ept_flush_op = VMX_INVEPT_ALL;
   2862 	}
   2863 	if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {
   2864 		vmx_eptp_type = EPTP_TYPE_WB;
   2865 	} else {
   2866 		vmx_eptp_type = EPTP_TYPE_UC;
   2867 	}
   2868 
   2869 	/* Init the L1TF mitigation. */
   2870 	vmx_init_l1tf();
   2871 
   2872 	memset(vmxoncpu, 0, sizeof(vmxoncpu));
   2873 	revision = vmx_get_revision();
   2874 
   2875 	for (CPU_INFO_FOREACH(cii, ci)) {
   2876 		error = vmx_memalloc(&pa, &va, 1);
   2877 		if (error) {
   2878 			panic("%s: out of memory", __func__);
   2879 		}
   2880 		vmxoncpu[cpu_index(ci)].pa = pa;
   2881 		vmxoncpu[cpu_index(ci)].va = va;
   2882 
   2883 		vmxon = (struct vmxon *)vmxoncpu[cpu_index(ci)].va;
   2884 		vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION);
   2885 	}
   2886 
   2887 	xc = xc_broadcast(0, vmx_change_cpu, (void *)true, NULL);
   2888 	xc_wait(xc);
   2889 }
   2890 
   2891 static void
   2892 vmx_fini_asid(void)
   2893 {
   2894 	size_t allocsz;
   2895 
   2896 	allocsz = roundup(vmx_maxasid, 8) / 8;
   2897 	kmem_free(vmx_asidmap, allocsz);
   2898 
   2899 	mutex_destroy(&vmx_asidlock);
   2900 }
   2901 
   2902 static void
   2903 vmx_fini(void)
   2904 {
   2905 	uint64_t xc;
   2906 	size_t i;
   2907 
   2908 	xc = xc_broadcast(0, vmx_change_cpu, (void *)false, NULL);
   2909 	xc_wait(xc);
   2910 
   2911 	for (i = 0; i < MAXCPUS; i++) {
   2912 		if (vmxoncpu[i].pa != 0)
   2913 			vmx_memfree(vmxoncpu[i].pa, vmxoncpu[i].va, 1);
   2914 	}
   2915 
   2916 	vmx_fini_asid();
   2917 }
   2918 
   2919 static void
   2920 vmx_capability(struct nvmm_capability *cap)
   2921 {
   2922 	cap->u.x86.xcr0_mask = vmx_xcr0_mask;
   2923 	cap->u.x86.mxcsr_mask = x86_fpu_mxcsr_mask;
   2924 	cap->u.x86.conf_cpuid_maxops = VMX_NCPUIDS;
   2925 }
   2926 
   2927 const struct nvmm_impl nvmm_x86_vmx = {
   2928 	.ident = vmx_ident,
   2929 	.init = vmx_init,
   2930 	.fini = vmx_fini,
   2931 	.capability = vmx_capability,
   2932 	.conf_max = NVMM_X86_NCONF,
   2933 	.conf_sizes = vmx_conf_sizes,
   2934 	.state_size = sizeof(struct nvmm_x64_state),
   2935 	.machine_create = vmx_machine_create,
   2936 	.machine_destroy = vmx_machine_destroy,
   2937 	.machine_configure = vmx_machine_configure,
   2938 	.vcpu_create = vmx_vcpu_create,
   2939 	.vcpu_destroy = vmx_vcpu_destroy,
   2940 	.vcpu_setstate = vmx_vcpu_setstate,
   2941 	.vcpu_getstate = vmx_vcpu_getstate,
   2942 	.vcpu_inject = vmx_vcpu_inject,
   2943 	.vcpu_run = vmx_vcpu_run
   2944 };
   2945