Home | History | Annotate | Line # | Download | only in x86
nvmm_x86_svm.c revision 1.51
      1 /*	$NetBSD: nvmm_x86_svm.c,v 1.51 2019/10/23 07:01:11 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.51 2019/10/23 07:01:11 maxv Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 #include <sys/kmem.h>
     39 #include <sys/cpu.h>
     40 #include <sys/xcall.h>
     41 #include <sys/mman.h>
     42 
     43 #include <uvm/uvm.h>
     44 #include <uvm/uvm_page.h>
     45 
     46 #include <x86/cputypes.h>
     47 #include <x86/specialreg.h>
     48 #include <x86/pmap.h>
     49 #include <x86/dbregs.h>
     50 #include <x86/cpu_counter.h>
     51 #include <machine/cpuvar.h>
     52 
     53 #include <dev/nvmm/nvmm.h>
     54 #include <dev/nvmm/nvmm_internal.h>
     55 #include <dev/nvmm/x86/nvmm_x86.h>
     56 
     57 int svm_vmrun(paddr_t, uint64_t *);
     58 
     59 #define	MSR_VM_HSAVE_PA	0xC0010117
     60 
     61 /* -------------------------------------------------------------------------- */
     62 
     63 #define VMCB_EXITCODE_CR0_READ		0x0000
     64 #define VMCB_EXITCODE_CR1_READ		0x0001
     65 #define VMCB_EXITCODE_CR2_READ		0x0002
     66 #define VMCB_EXITCODE_CR3_READ		0x0003
     67 #define VMCB_EXITCODE_CR4_READ		0x0004
     68 #define VMCB_EXITCODE_CR5_READ		0x0005
     69 #define VMCB_EXITCODE_CR6_READ		0x0006
     70 #define VMCB_EXITCODE_CR7_READ		0x0007
     71 #define VMCB_EXITCODE_CR8_READ		0x0008
     72 #define VMCB_EXITCODE_CR9_READ		0x0009
     73 #define VMCB_EXITCODE_CR10_READ		0x000A
     74 #define VMCB_EXITCODE_CR11_READ		0x000B
     75 #define VMCB_EXITCODE_CR12_READ		0x000C
     76 #define VMCB_EXITCODE_CR13_READ		0x000D
     77 #define VMCB_EXITCODE_CR14_READ		0x000E
     78 #define VMCB_EXITCODE_CR15_READ		0x000F
     79 #define VMCB_EXITCODE_CR0_WRITE		0x0010
     80 #define VMCB_EXITCODE_CR1_WRITE		0x0011
     81 #define VMCB_EXITCODE_CR2_WRITE		0x0012
     82 #define VMCB_EXITCODE_CR3_WRITE		0x0013
     83 #define VMCB_EXITCODE_CR4_WRITE		0x0014
     84 #define VMCB_EXITCODE_CR5_WRITE		0x0015
     85 #define VMCB_EXITCODE_CR6_WRITE		0x0016
     86 #define VMCB_EXITCODE_CR7_WRITE		0x0017
     87 #define VMCB_EXITCODE_CR8_WRITE		0x0018
     88 #define VMCB_EXITCODE_CR9_WRITE		0x0019
     89 #define VMCB_EXITCODE_CR10_WRITE	0x001A
     90 #define VMCB_EXITCODE_CR11_WRITE	0x001B
     91 #define VMCB_EXITCODE_CR12_WRITE	0x001C
     92 #define VMCB_EXITCODE_CR13_WRITE	0x001D
     93 #define VMCB_EXITCODE_CR14_WRITE	0x001E
     94 #define VMCB_EXITCODE_CR15_WRITE	0x001F
     95 #define VMCB_EXITCODE_DR0_READ		0x0020
     96 #define VMCB_EXITCODE_DR1_READ		0x0021
     97 #define VMCB_EXITCODE_DR2_READ		0x0022
     98 #define VMCB_EXITCODE_DR3_READ		0x0023
     99 #define VMCB_EXITCODE_DR4_READ		0x0024
    100 #define VMCB_EXITCODE_DR5_READ		0x0025
    101 #define VMCB_EXITCODE_DR6_READ		0x0026
    102 #define VMCB_EXITCODE_DR7_READ		0x0027
    103 #define VMCB_EXITCODE_DR8_READ		0x0028
    104 #define VMCB_EXITCODE_DR9_READ		0x0029
    105 #define VMCB_EXITCODE_DR10_READ		0x002A
    106 #define VMCB_EXITCODE_DR11_READ		0x002B
    107 #define VMCB_EXITCODE_DR12_READ		0x002C
    108 #define VMCB_EXITCODE_DR13_READ		0x002D
    109 #define VMCB_EXITCODE_DR14_READ		0x002E
    110 #define VMCB_EXITCODE_DR15_READ		0x002F
    111 #define VMCB_EXITCODE_DR0_WRITE		0x0030
    112 #define VMCB_EXITCODE_DR1_WRITE		0x0031
    113 #define VMCB_EXITCODE_DR2_WRITE		0x0032
    114 #define VMCB_EXITCODE_DR3_WRITE		0x0033
    115 #define VMCB_EXITCODE_DR4_WRITE		0x0034
    116 #define VMCB_EXITCODE_DR5_WRITE		0x0035
    117 #define VMCB_EXITCODE_DR6_WRITE		0x0036
    118 #define VMCB_EXITCODE_DR7_WRITE		0x0037
    119 #define VMCB_EXITCODE_DR8_WRITE		0x0038
    120 #define VMCB_EXITCODE_DR9_WRITE		0x0039
    121 #define VMCB_EXITCODE_DR10_WRITE	0x003A
    122 #define VMCB_EXITCODE_DR11_WRITE	0x003B
    123 #define VMCB_EXITCODE_DR12_WRITE	0x003C
    124 #define VMCB_EXITCODE_DR13_WRITE	0x003D
    125 #define VMCB_EXITCODE_DR14_WRITE	0x003E
    126 #define VMCB_EXITCODE_DR15_WRITE	0x003F
    127 #define VMCB_EXITCODE_EXCP0		0x0040
    128 #define VMCB_EXITCODE_EXCP1		0x0041
    129 #define VMCB_EXITCODE_EXCP2		0x0042
    130 #define VMCB_EXITCODE_EXCP3		0x0043
    131 #define VMCB_EXITCODE_EXCP4		0x0044
    132 #define VMCB_EXITCODE_EXCP5		0x0045
    133 #define VMCB_EXITCODE_EXCP6		0x0046
    134 #define VMCB_EXITCODE_EXCP7		0x0047
    135 #define VMCB_EXITCODE_EXCP8		0x0048
    136 #define VMCB_EXITCODE_EXCP9		0x0049
    137 #define VMCB_EXITCODE_EXCP10		0x004A
    138 #define VMCB_EXITCODE_EXCP11		0x004B
    139 #define VMCB_EXITCODE_EXCP12		0x004C
    140 #define VMCB_EXITCODE_EXCP13		0x004D
    141 #define VMCB_EXITCODE_EXCP14		0x004E
    142 #define VMCB_EXITCODE_EXCP15		0x004F
    143 #define VMCB_EXITCODE_EXCP16		0x0050
    144 #define VMCB_EXITCODE_EXCP17		0x0051
    145 #define VMCB_EXITCODE_EXCP18		0x0052
    146 #define VMCB_EXITCODE_EXCP19		0x0053
    147 #define VMCB_EXITCODE_EXCP20		0x0054
    148 #define VMCB_EXITCODE_EXCP21		0x0055
    149 #define VMCB_EXITCODE_EXCP22		0x0056
    150 #define VMCB_EXITCODE_EXCP23		0x0057
    151 #define VMCB_EXITCODE_EXCP24		0x0058
    152 #define VMCB_EXITCODE_EXCP25		0x0059
    153 #define VMCB_EXITCODE_EXCP26		0x005A
    154 #define VMCB_EXITCODE_EXCP27		0x005B
    155 #define VMCB_EXITCODE_EXCP28		0x005C
    156 #define VMCB_EXITCODE_EXCP29		0x005D
    157 #define VMCB_EXITCODE_EXCP30		0x005E
    158 #define VMCB_EXITCODE_EXCP31		0x005F
    159 #define VMCB_EXITCODE_INTR		0x0060
    160 #define VMCB_EXITCODE_NMI		0x0061
    161 #define VMCB_EXITCODE_SMI		0x0062
    162 #define VMCB_EXITCODE_INIT		0x0063
    163 #define VMCB_EXITCODE_VINTR		0x0064
    164 #define VMCB_EXITCODE_CR0_SEL_WRITE	0x0065
    165 #define VMCB_EXITCODE_IDTR_READ		0x0066
    166 #define VMCB_EXITCODE_GDTR_READ		0x0067
    167 #define VMCB_EXITCODE_LDTR_READ		0x0068
    168 #define VMCB_EXITCODE_TR_READ		0x0069
    169 #define VMCB_EXITCODE_IDTR_WRITE	0x006A
    170 #define VMCB_EXITCODE_GDTR_WRITE	0x006B
    171 #define VMCB_EXITCODE_LDTR_WRITE	0x006C
    172 #define VMCB_EXITCODE_TR_WRITE		0x006D
    173 #define VMCB_EXITCODE_RDTSC		0x006E
    174 #define VMCB_EXITCODE_RDPMC		0x006F
    175 #define VMCB_EXITCODE_PUSHF		0x0070
    176 #define VMCB_EXITCODE_POPF		0x0071
    177 #define VMCB_EXITCODE_CPUID		0x0072
    178 #define VMCB_EXITCODE_RSM		0x0073
    179 #define VMCB_EXITCODE_IRET		0x0074
    180 #define VMCB_EXITCODE_SWINT		0x0075
    181 #define VMCB_EXITCODE_INVD		0x0076
    182 #define VMCB_EXITCODE_PAUSE		0x0077
    183 #define VMCB_EXITCODE_HLT		0x0078
    184 #define VMCB_EXITCODE_INVLPG		0x0079
    185 #define VMCB_EXITCODE_INVLPGA		0x007A
    186 #define VMCB_EXITCODE_IOIO		0x007B
    187 #define VMCB_EXITCODE_MSR		0x007C
    188 #define VMCB_EXITCODE_TASK_SWITCH	0x007D
    189 #define VMCB_EXITCODE_FERR_FREEZE	0x007E
    190 #define VMCB_EXITCODE_SHUTDOWN		0x007F
    191 #define VMCB_EXITCODE_VMRUN		0x0080
    192 #define VMCB_EXITCODE_VMMCALL		0x0081
    193 #define VMCB_EXITCODE_VMLOAD		0x0082
    194 #define VMCB_EXITCODE_VMSAVE		0x0083
    195 #define VMCB_EXITCODE_STGI		0x0084
    196 #define VMCB_EXITCODE_CLGI		0x0085
    197 #define VMCB_EXITCODE_SKINIT		0x0086
    198 #define VMCB_EXITCODE_RDTSCP		0x0087
    199 #define VMCB_EXITCODE_ICEBP		0x0088
    200 #define VMCB_EXITCODE_WBINVD		0x0089
    201 #define VMCB_EXITCODE_MONITOR		0x008A
    202 #define VMCB_EXITCODE_MWAIT		0x008B
    203 #define VMCB_EXITCODE_MWAIT_CONDITIONAL	0x008C
    204 #define VMCB_EXITCODE_XSETBV		0x008D
    205 #define VMCB_EXITCODE_RDPRU		0x008E
    206 #define VMCB_EXITCODE_EFER_WRITE_TRAP	0x008F
    207 #define VMCB_EXITCODE_CR0_WRITE_TRAP	0x0090
    208 #define VMCB_EXITCODE_CR1_WRITE_TRAP	0x0091
    209 #define VMCB_EXITCODE_CR2_WRITE_TRAP	0x0092
    210 #define VMCB_EXITCODE_CR3_WRITE_TRAP	0x0093
    211 #define VMCB_EXITCODE_CR4_WRITE_TRAP	0x0094
    212 #define VMCB_EXITCODE_CR5_WRITE_TRAP	0x0095
    213 #define VMCB_EXITCODE_CR6_WRITE_TRAP	0x0096
    214 #define VMCB_EXITCODE_CR7_WRITE_TRAP	0x0097
    215 #define VMCB_EXITCODE_CR8_WRITE_TRAP	0x0098
    216 #define VMCB_EXITCODE_CR9_WRITE_TRAP	0x0099
    217 #define VMCB_EXITCODE_CR10_WRITE_TRAP	0x009A
    218 #define VMCB_EXITCODE_CR11_WRITE_TRAP	0x009B
    219 #define VMCB_EXITCODE_CR12_WRITE_TRAP	0x009C
    220 #define VMCB_EXITCODE_CR13_WRITE_TRAP	0x009D
    221 #define VMCB_EXITCODE_CR14_WRITE_TRAP	0x009E
    222 #define VMCB_EXITCODE_CR15_WRITE_TRAP	0x009F
    223 #define VMCB_EXITCODE_MCOMMIT		0x00A3
    224 #define VMCB_EXITCODE_NPF		0x0400
    225 #define VMCB_EXITCODE_AVIC_INCOMP_IPI	0x0401
    226 #define VMCB_EXITCODE_AVIC_NOACCEL	0x0402
    227 #define VMCB_EXITCODE_VMGEXIT		0x0403
    228 #define VMCB_EXITCODE_INVALID		-1
    229 
    230 /* -------------------------------------------------------------------------- */
    231 
    232 struct vmcb_ctrl {
    233 	uint32_t intercept_cr;
    234 #define VMCB_CTRL_INTERCEPT_RCR(x)	__BIT( 0 + x)
    235 #define VMCB_CTRL_INTERCEPT_WCR(x)	__BIT(16 + x)
    236 
    237 	uint32_t intercept_dr;
    238 #define VMCB_CTRL_INTERCEPT_RDR(x)	__BIT( 0 + x)
    239 #define VMCB_CTRL_INTERCEPT_WDR(x)	__BIT(16 + x)
    240 
    241 	uint32_t intercept_vec;
    242 #define VMCB_CTRL_INTERCEPT_VEC(x)	__BIT(x)
    243 
    244 	uint32_t intercept_misc1;
    245 #define VMCB_CTRL_INTERCEPT_INTR	__BIT(0)
    246 #define VMCB_CTRL_INTERCEPT_NMI		__BIT(1)
    247 #define VMCB_CTRL_INTERCEPT_SMI		__BIT(2)
    248 #define VMCB_CTRL_INTERCEPT_INIT	__BIT(3)
    249 #define VMCB_CTRL_INTERCEPT_VINTR	__BIT(4)
    250 #define VMCB_CTRL_INTERCEPT_CR0_SPEC	__BIT(5)
    251 #define VMCB_CTRL_INTERCEPT_RIDTR	__BIT(6)
    252 #define VMCB_CTRL_INTERCEPT_RGDTR	__BIT(7)
    253 #define VMCB_CTRL_INTERCEPT_RLDTR	__BIT(8)
    254 #define VMCB_CTRL_INTERCEPT_RTR		__BIT(9)
    255 #define VMCB_CTRL_INTERCEPT_WIDTR	__BIT(10)
    256 #define VMCB_CTRL_INTERCEPT_WGDTR	__BIT(11)
    257 #define VMCB_CTRL_INTERCEPT_WLDTR	__BIT(12)
    258 #define VMCB_CTRL_INTERCEPT_WTR		__BIT(13)
    259 #define VMCB_CTRL_INTERCEPT_RDTSC	__BIT(14)
    260 #define VMCB_CTRL_INTERCEPT_RDPMC	__BIT(15)
    261 #define VMCB_CTRL_INTERCEPT_PUSHF	__BIT(16)
    262 #define VMCB_CTRL_INTERCEPT_POPF	__BIT(17)
    263 #define VMCB_CTRL_INTERCEPT_CPUID	__BIT(18)
    264 #define VMCB_CTRL_INTERCEPT_RSM		__BIT(19)
    265 #define VMCB_CTRL_INTERCEPT_IRET	__BIT(20)
    266 #define VMCB_CTRL_INTERCEPT_INTN	__BIT(21)
    267 #define VMCB_CTRL_INTERCEPT_INVD	__BIT(22)
    268 #define VMCB_CTRL_INTERCEPT_PAUSE	__BIT(23)
    269 #define VMCB_CTRL_INTERCEPT_HLT		__BIT(24)
    270 #define VMCB_CTRL_INTERCEPT_INVLPG	__BIT(25)
    271 #define VMCB_CTRL_INTERCEPT_INVLPGA	__BIT(26)
    272 #define VMCB_CTRL_INTERCEPT_IOIO_PROT	__BIT(27)
    273 #define VMCB_CTRL_INTERCEPT_MSR_PROT	__BIT(28)
    274 #define VMCB_CTRL_INTERCEPT_TASKSW	__BIT(29)
    275 #define VMCB_CTRL_INTERCEPT_FERR_FREEZE	__BIT(30)
    276 #define VMCB_CTRL_INTERCEPT_SHUTDOWN	__BIT(31)
    277 
    278 	uint32_t intercept_misc2;
    279 #define VMCB_CTRL_INTERCEPT_VMRUN	__BIT(0)
    280 #define VMCB_CTRL_INTERCEPT_VMMCALL	__BIT(1)
    281 #define VMCB_CTRL_INTERCEPT_VMLOAD	__BIT(2)
    282 #define VMCB_CTRL_INTERCEPT_VMSAVE	__BIT(3)
    283 #define VMCB_CTRL_INTERCEPT_STGI	__BIT(4)
    284 #define VMCB_CTRL_INTERCEPT_CLGI	__BIT(5)
    285 #define VMCB_CTRL_INTERCEPT_SKINIT	__BIT(6)
    286 #define VMCB_CTRL_INTERCEPT_RDTSCP	__BIT(7)
    287 #define VMCB_CTRL_INTERCEPT_ICEBP	__BIT(8)
    288 #define VMCB_CTRL_INTERCEPT_WBINVD	__BIT(9)
    289 #define VMCB_CTRL_INTERCEPT_MONITOR	__BIT(10)
    290 #define VMCB_CTRL_INTERCEPT_MWAIT	__BIT(11)
    291 #define VMCB_CTRL_INTERCEPT_MWAIT_ARMED	__BIT(12)
    292 #define VMCB_CTRL_INTERCEPT_XSETBV	__BIT(13)
    293 #define VMCB_CTRL_INTERCEPT_RDPRU	__BIT(14)
    294 #define VMCB_CTRL_INTERCEPT_EFER_SPEC	__BIT(15)
    295 #define VMCB_CTRL_INTERCEPT_WCR_SPEC(x)	__BIT(16 + x)
    296 
    297 	uint32_t intercept_misc3;
    298 #define VMCB_CTRL_INTERCEPT_MCOMMIT	__BIT(3)
    299 
    300 	uint8_t  rsvd1[36];
    301 	uint16_t pause_filt_thresh;
    302 	uint16_t pause_filt_cnt;
    303 	uint64_t iopm_base_pa;
    304 	uint64_t msrpm_base_pa;
    305 	uint64_t tsc_offset;
    306 	uint32_t guest_asid;
    307 
    308 	uint32_t tlb_ctrl;
    309 #define VMCB_CTRL_TLB_CTRL_FLUSH_ALL			0x01
    310 #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST			0x03
    311 #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST_NONGLOBAL	0x07
    312 
    313 	uint64_t v;
    314 #define VMCB_CTRL_V_TPR			__BITS(3,0)
    315 #define VMCB_CTRL_V_IRQ			__BIT(8)
    316 #define VMCB_CTRL_V_VGIF		__BIT(9)
    317 #define VMCB_CTRL_V_INTR_PRIO		__BITS(19,16)
    318 #define VMCB_CTRL_V_IGN_TPR		__BIT(20)
    319 #define VMCB_CTRL_V_INTR_MASKING	__BIT(24)
    320 #define VMCB_CTRL_V_GUEST_VGIF		__BIT(25)
    321 #define VMCB_CTRL_V_AVIC_EN		__BIT(31)
    322 #define VMCB_CTRL_V_INTR_VECTOR		__BITS(39,32)
    323 
    324 	uint64_t intr;
    325 #define VMCB_CTRL_INTR_SHADOW		__BIT(0)
    326 
    327 	uint64_t exitcode;
    328 	uint64_t exitinfo1;
    329 	uint64_t exitinfo2;
    330 
    331 	uint64_t exitintinfo;
    332 #define VMCB_CTRL_EXITINTINFO_VECTOR	__BITS(7,0)
    333 #define VMCB_CTRL_EXITINTINFO_TYPE	__BITS(10,8)
    334 #define VMCB_CTRL_EXITINTINFO_EV	__BIT(11)
    335 #define VMCB_CTRL_EXITINTINFO_V		__BIT(31)
    336 #define VMCB_CTRL_EXITINTINFO_ERRORCODE	__BITS(63,32)
    337 
    338 	uint64_t enable1;
    339 #define VMCB_CTRL_ENABLE_NP		__BIT(0)
    340 #define VMCB_CTRL_ENABLE_SEV		__BIT(1)
    341 #define VMCB_CTRL_ENABLE_ES_SEV		__BIT(2)
    342 #define VMCB_CTRL_ENABLE_GMET		__BIT(3)
    343 #define VMCB_CTRL_ENABLE_VTE		__BIT(5)
    344 
    345 	uint64_t avic;
    346 #define VMCB_CTRL_AVIC_APIC_BAR		__BITS(51,0)
    347 
    348 	uint64_t ghcb;
    349 
    350 	uint64_t eventinj;
    351 #define VMCB_CTRL_EVENTINJ_VECTOR	__BITS(7,0)
    352 #define VMCB_CTRL_EVENTINJ_TYPE		__BITS(10,8)
    353 #define VMCB_CTRL_EVENTINJ_EV		__BIT(11)
    354 #define VMCB_CTRL_EVENTINJ_V		__BIT(31)
    355 #define VMCB_CTRL_EVENTINJ_ERRORCODE	__BITS(63,32)
    356 
    357 	uint64_t n_cr3;
    358 
    359 	uint64_t enable2;
    360 #define VMCB_CTRL_ENABLE_LBR		__BIT(0)
    361 #define VMCB_CTRL_ENABLE_VVMSAVE	__BIT(1)
    362 
    363 	uint32_t vmcb_clean;
    364 #define VMCB_CTRL_VMCB_CLEAN_I		__BIT(0)
    365 #define VMCB_CTRL_VMCB_CLEAN_IOPM	__BIT(1)
    366 #define VMCB_CTRL_VMCB_CLEAN_ASID	__BIT(2)
    367 #define VMCB_CTRL_VMCB_CLEAN_TPR	__BIT(3)
    368 #define VMCB_CTRL_VMCB_CLEAN_NP		__BIT(4)
    369 #define VMCB_CTRL_VMCB_CLEAN_CR		__BIT(5)
    370 #define VMCB_CTRL_VMCB_CLEAN_DR		__BIT(6)
    371 #define VMCB_CTRL_VMCB_CLEAN_DT		__BIT(7)
    372 #define VMCB_CTRL_VMCB_CLEAN_SEG	__BIT(8)
    373 #define VMCB_CTRL_VMCB_CLEAN_CR2	__BIT(9)
    374 #define VMCB_CTRL_VMCB_CLEAN_LBR	__BIT(10)
    375 #define VMCB_CTRL_VMCB_CLEAN_AVIC	__BIT(11)
    376 
    377 	uint32_t rsvd2;
    378 	uint64_t nrip;
    379 	uint8_t	inst_len;
    380 	uint8_t	inst_bytes[15];
    381 	uint64_t avic_abpp;
    382 	uint64_t rsvd3;
    383 	uint64_t avic_ltp;
    384 
    385 	uint64_t avic_phys;
    386 #define VMCB_CTRL_AVIC_PHYS_TABLE_PTR	__BITS(51,12)
    387 #define VMCB_CTRL_AVIC_PHYS_MAX_INDEX	__BITS(7,0)
    388 
    389 	uint64_t rsvd4;
    390 	uint64_t vmcb_ptr;
    391 
    392 	uint8_t	pad[752];
    393 } __packed;
    394 
    395 CTASSERT(sizeof(struct vmcb_ctrl) == 1024);
    396 
    397 struct vmcb_segment {
    398 	uint16_t selector;
    399 	uint16_t attrib;	/* hidden */
    400 	uint32_t limit;		/* hidden */
    401 	uint64_t base;		/* hidden */
    402 } __packed;
    403 
    404 CTASSERT(sizeof(struct vmcb_segment) == 16);
    405 
    406 struct vmcb_state {
    407 	struct   vmcb_segment es;
    408 	struct   vmcb_segment cs;
    409 	struct   vmcb_segment ss;
    410 	struct   vmcb_segment ds;
    411 	struct   vmcb_segment fs;
    412 	struct   vmcb_segment gs;
    413 	struct   vmcb_segment gdt;
    414 	struct   vmcb_segment ldt;
    415 	struct   vmcb_segment idt;
    416 	struct   vmcb_segment tr;
    417 	uint8_t	 rsvd1[43];
    418 	uint8_t	 cpl;
    419 	uint8_t  rsvd2[4];
    420 	uint64_t efer;
    421 	uint8_t	 rsvd3[112];
    422 	uint64_t cr4;
    423 	uint64_t cr3;
    424 	uint64_t cr0;
    425 	uint64_t dr7;
    426 	uint64_t dr6;
    427 	uint64_t rflags;
    428 	uint64_t rip;
    429 	uint8_t	 rsvd4[88];
    430 	uint64_t rsp;
    431 	uint8_t	 rsvd5[24];
    432 	uint64_t rax;
    433 	uint64_t star;
    434 	uint64_t lstar;
    435 	uint64_t cstar;
    436 	uint64_t sfmask;
    437 	uint64_t kernelgsbase;
    438 	uint64_t sysenter_cs;
    439 	uint64_t sysenter_esp;
    440 	uint64_t sysenter_eip;
    441 	uint64_t cr2;
    442 	uint8_t	 rsvd6[32];
    443 	uint64_t g_pat;
    444 	uint64_t dbgctl;
    445 	uint64_t br_from;
    446 	uint64_t br_to;
    447 	uint64_t int_from;
    448 	uint64_t int_to;
    449 	uint8_t	 pad[2408];
    450 } __packed;
    451 
    452 CTASSERT(sizeof(struct vmcb_state) == 0xC00);
    453 
    454 struct vmcb {
    455 	struct vmcb_ctrl ctrl;
    456 	struct vmcb_state state;
    457 } __packed;
    458 
    459 CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
    460 CTASSERT(offsetof(struct vmcb, state) == 0x400);
    461 
    462 /* -------------------------------------------------------------------------- */
    463 
    464 static void svm_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
    465 static void svm_vcpu_state_commit(struct nvmm_cpu *);
    466 
    467 struct svm_hsave {
    468 	paddr_t pa;
    469 };
    470 
    471 static struct svm_hsave hsave[MAXCPUS];
    472 
    473 static uint8_t *svm_asidmap __read_mostly;
    474 static uint32_t svm_maxasid __read_mostly;
    475 static kmutex_t svm_asidlock __cacheline_aligned;
    476 
    477 static bool svm_decode_assist __read_mostly;
    478 static uint32_t svm_ctrl_tlb_flush __read_mostly;
    479 
    480 #define SVM_XCR0_MASK_DEFAULT	(XCR0_X87|XCR0_SSE)
    481 static uint64_t svm_xcr0_mask __read_mostly;
    482 
    483 #define SVM_NCPUIDS	32
    484 
    485 #define VMCB_NPAGES	1
    486 
    487 #define MSRBM_NPAGES	2
    488 #define MSRBM_SIZE	(MSRBM_NPAGES * PAGE_SIZE)
    489 
    490 #define IOBM_NPAGES	3
    491 #define IOBM_SIZE	(IOBM_NPAGES * PAGE_SIZE)
    492 
    493 /* Does not include EFER_LMSLE. */
    494 #define EFER_VALID \
    495 	(EFER_SCE|EFER_LME|EFER_LMA|EFER_NXE|EFER_SVME|EFER_FFXSR|EFER_TCE)
    496 
    497 #define EFER_TLB_FLUSH \
    498 	(EFER_NXE|EFER_LMA|EFER_LME)
    499 #define CR0_TLB_FLUSH \
    500 	(CR0_PG|CR0_WP|CR0_CD|CR0_NW)
    501 #define CR4_TLB_FLUSH \
    502 	(CR4_PGE|CR4_PAE|CR4_PSE)
    503 
    504 /* -------------------------------------------------------------------------- */
    505 
    506 struct svm_machdata {
    507 	volatile uint64_t mach_htlb_gen;
    508 };
    509 
    510 static const size_t svm_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
    511 	[NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
    512 	    sizeof(struct nvmm_vcpu_conf_cpuid)
    513 };
    514 
    515 struct svm_cpudata {
    516 	/* General */
    517 	bool shared_asid;
    518 	bool gtlb_want_flush;
    519 	bool gtsc_want_update;
    520 	uint64_t vcpu_htlb_gen;
    521 
    522 	/* VMCB */
    523 	struct vmcb *vmcb;
    524 	paddr_t vmcb_pa;
    525 
    526 	/* I/O bitmap */
    527 	uint8_t *iobm;
    528 	paddr_t iobm_pa;
    529 
    530 	/* MSR bitmap */
    531 	uint8_t *msrbm;
    532 	paddr_t msrbm_pa;
    533 
    534 	/* Host state */
    535 	uint64_t hxcr0;
    536 	uint64_t star;
    537 	uint64_t lstar;
    538 	uint64_t cstar;
    539 	uint64_t sfmask;
    540 	uint64_t fsbase;
    541 	uint64_t kernelgsbase;
    542 
    543 	/* Intr state */
    544 	bool int_window_exit;
    545 	bool nmi_window_exit;
    546 	bool evt_pending;
    547 
    548 	/* Guest state */
    549 	uint64_t gxcr0;
    550 	uint64_t gprs[NVMM_X64_NGPR];
    551 	uint64_t drs[NVMM_X64_NDR];
    552 	uint64_t gtsc;
    553 	struct xsave_header gfpu __aligned(64);
    554 
    555 	/* VCPU configuration. */
    556 	bool cpuidpresent[SVM_NCPUIDS];
    557 	struct nvmm_vcpu_conf_cpuid cpuid[SVM_NCPUIDS];
    558 };
    559 
    560 static void
    561 svm_vmcb_cache_default(struct vmcb *vmcb)
    562 {
    563 	vmcb->ctrl.vmcb_clean =
    564 	    VMCB_CTRL_VMCB_CLEAN_I |
    565 	    VMCB_CTRL_VMCB_CLEAN_IOPM |
    566 	    VMCB_CTRL_VMCB_CLEAN_ASID |
    567 	    VMCB_CTRL_VMCB_CLEAN_TPR |
    568 	    VMCB_CTRL_VMCB_CLEAN_NP |
    569 	    VMCB_CTRL_VMCB_CLEAN_CR |
    570 	    VMCB_CTRL_VMCB_CLEAN_DR |
    571 	    VMCB_CTRL_VMCB_CLEAN_DT |
    572 	    VMCB_CTRL_VMCB_CLEAN_SEG |
    573 	    VMCB_CTRL_VMCB_CLEAN_CR2 |
    574 	    VMCB_CTRL_VMCB_CLEAN_LBR |
    575 	    VMCB_CTRL_VMCB_CLEAN_AVIC;
    576 }
    577 
    578 static void
    579 svm_vmcb_cache_update(struct vmcb *vmcb, uint64_t flags)
    580 {
    581 	if (flags & NVMM_X64_STATE_SEGS) {
    582 		vmcb->ctrl.vmcb_clean &=
    583 		    ~(VMCB_CTRL_VMCB_CLEAN_SEG | VMCB_CTRL_VMCB_CLEAN_DT);
    584 	}
    585 	if (flags & NVMM_X64_STATE_CRS) {
    586 		vmcb->ctrl.vmcb_clean &=
    587 		    ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_CR2 |
    588 		      VMCB_CTRL_VMCB_CLEAN_TPR);
    589 	}
    590 	if (flags & NVMM_X64_STATE_DRS) {
    591 		vmcb->ctrl.vmcb_clean &= ~VMCB_CTRL_VMCB_CLEAN_DR;
    592 	}
    593 	if (flags & NVMM_X64_STATE_MSRS) {
    594 		/* CR for EFER, NP for PAT. */
    595 		vmcb->ctrl.vmcb_clean &=
    596 		    ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_NP);
    597 	}
    598 }
    599 
    600 static inline void
    601 svm_vmcb_cache_flush(struct vmcb *vmcb, uint64_t flags)
    602 {
    603 	vmcb->ctrl.vmcb_clean &= ~flags;
    604 }
    605 
    606 static inline void
    607 svm_vmcb_cache_flush_all(struct vmcb *vmcb)
    608 {
    609 	vmcb->ctrl.vmcb_clean = 0;
    610 }
    611 
    612 #define SVM_EVENT_TYPE_HW_INT	0
    613 #define SVM_EVENT_TYPE_NMI	2
    614 #define SVM_EVENT_TYPE_EXC	3
    615 #define SVM_EVENT_TYPE_SW_INT	4
    616 
    617 static void
    618 svm_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
    619 {
    620 	struct svm_cpudata *cpudata = vcpu->cpudata;
    621 	struct vmcb *vmcb = cpudata->vmcb;
    622 
    623 	if (nmi) {
    624 		vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_IRET;
    625 		cpudata->nmi_window_exit = true;
    626 	} else {
    627 		vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_VINTR;
    628 		vmcb->ctrl.v |= (VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR);
    629 		svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR);
    630 		cpudata->int_window_exit = true;
    631 	}
    632 
    633 	svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
    634 }
    635 
    636 static void
    637 svm_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
    638 {
    639 	struct svm_cpudata *cpudata = vcpu->cpudata;
    640 	struct vmcb *vmcb = cpudata->vmcb;
    641 
    642 	if (nmi) {
    643 		vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_IRET;
    644 		cpudata->nmi_window_exit = false;
    645 	} else {
    646 		vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_VINTR;
    647 		vmcb->ctrl.v &= ~(VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR);
    648 		svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR);
    649 		cpudata->int_window_exit = false;
    650 	}
    651 
    652 	svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
    653 }
    654 
    655 static inline int
    656 svm_event_has_error(uint8_t vector)
    657 {
    658 	switch (vector) {
    659 	case 8:		/* #DF */
    660 	case 10:	/* #TS */
    661 	case 11:	/* #NP */
    662 	case 12:	/* #SS */
    663 	case 13:	/* #GP */
    664 	case 14:	/* #PF */
    665 	case 17:	/* #AC */
    666 	case 30:	/* #SX */
    667 		return 1;
    668 	default:
    669 		return 0;
    670 	}
    671 }
    672 
    673 static int
    674 svm_vcpu_inject(struct nvmm_cpu *vcpu)
    675 {
    676 	struct nvmm_comm_page *comm = vcpu->comm;
    677 	struct svm_cpudata *cpudata = vcpu->cpudata;
    678 	struct vmcb *vmcb = cpudata->vmcb;
    679 	u_int evtype;
    680 	uint8_t vector;
    681 	uint64_t error;
    682 	int type = 0, err = 0;
    683 
    684 	evtype = comm->event.type;
    685 	vector = comm->event.vector;
    686 	error = comm->event.u.excp.error;
    687 	__insn_barrier();
    688 
    689 	switch (evtype) {
    690 	case NVMM_VCPU_EVENT_EXCP:
    691 		type = SVM_EVENT_TYPE_EXC;
    692 		if (vector == 2 || vector >= 32)
    693 			return EINVAL;
    694 		if (vector == 3 || vector == 0)
    695 			return EINVAL;
    696 		err = svm_event_has_error(vector);
    697 		break;
    698 	case NVMM_VCPU_EVENT_INTR:
    699 		type = SVM_EVENT_TYPE_HW_INT;
    700 		if (vector == 2) {
    701 			type = SVM_EVENT_TYPE_NMI;
    702 			svm_event_waitexit_enable(vcpu, true);
    703 		}
    704 		err = 0;
    705 		break;
    706 	default:
    707 		return EINVAL;
    708 	}
    709 
    710 	vmcb->ctrl.eventinj =
    711 	    __SHIFTIN((uint64_t)vector, VMCB_CTRL_EVENTINJ_VECTOR) |
    712 	    __SHIFTIN((uint64_t)type, VMCB_CTRL_EVENTINJ_TYPE) |
    713 	    __SHIFTIN((uint64_t)err, VMCB_CTRL_EVENTINJ_EV) |
    714 	    __SHIFTIN((uint64_t)1, VMCB_CTRL_EVENTINJ_V) |
    715 	    __SHIFTIN((uint64_t)error, VMCB_CTRL_EVENTINJ_ERRORCODE);
    716 
    717 	cpudata->evt_pending = true;
    718 
    719 	return 0;
    720 }
    721 
    722 static void
    723 svm_inject_ud(struct nvmm_cpu *vcpu)
    724 {
    725 	struct nvmm_comm_page *comm = vcpu->comm;
    726 	int ret __diagused;
    727 
    728 	comm->event.type = NVMM_VCPU_EVENT_EXCP;
    729 	comm->event.vector = 6;
    730 	comm->event.u.excp.error = 0;
    731 
    732 	ret = svm_vcpu_inject(vcpu);
    733 	KASSERT(ret == 0);
    734 }
    735 
    736 static void
    737 svm_inject_gp(struct nvmm_cpu *vcpu)
    738 {
    739 	struct nvmm_comm_page *comm = vcpu->comm;
    740 	int ret __diagused;
    741 
    742 	comm->event.type = NVMM_VCPU_EVENT_EXCP;
    743 	comm->event.vector = 13;
    744 	comm->event.u.excp.error = 0;
    745 
    746 	ret = svm_vcpu_inject(vcpu);
    747 	KASSERT(ret == 0);
    748 }
    749 
    750 static inline int
    751 svm_vcpu_event_commit(struct nvmm_cpu *vcpu)
    752 {
    753 	if (__predict_true(!vcpu->comm->event_commit)) {
    754 		return 0;
    755 	}
    756 	vcpu->comm->event_commit = false;
    757 	return svm_vcpu_inject(vcpu);
    758 }
    759 
    760 static inline void
    761 svm_inkernel_advance(struct vmcb *vmcb)
    762 {
    763 	/*
    764 	 * Maybe we should also apply single-stepping and debug exceptions.
    765 	 * Matters for guest-ring3, because it can execute 'cpuid' under a
    766 	 * debugger.
    767 	 */
    768 	vmcb->state.rip = vmcb->ctrl.nrip;
    769 	vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW;
    770 }
    771 
    772 static void
    773 svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx)
    774 {
    775 	struct svm_cpudata *cpudata = vcpu->cpudata;
    776 	uint64_t cr4;
    777 
    778 	switch (eax) {
    779 	case 0x00000001:
    780 		cpudata->vmcb->state.rax &= nvmm_cpuid_00000001.eax;
    781 
    782 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
    783 		cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
    784 		    CPUID_LOCAL_APIC_ID);
    785 
    786 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
    787 		cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
    788 
    789 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
    790 
    791 		/* CPUID2_OSXSAVE depends on CR4. */
    792 		cr4 = cpudata->vmcb->state.cr4;
    793 		if (!(cr4 & CR4_OSXSAVE)) {
    794 			cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
    795 		}
    796 		break;
    797 	case 0x00000005:
    798 	case 0x00000006:
    799 		cpudata->vmcb->state.rax = 0;
    800 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
    801 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
    802 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
    803 		break;
    804 	case 0x00000007:
    805 		cpudata->vmcb->state.rax &= nvmm_cpuid_00000007.eax;
    806 		cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
    807 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
    808 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
    809 		break;
    810 	case 0x0000000D:
    811 		if (svm_xcr0_mask == 0) {
    812 			break;
    813 		}
    814 		switch (ecx) {
    815 		case 0:
    816 			cpudata->vmcb->state.rax = svm_xcr0_mask & 0xFFFFFFFF;
    817 			if (cpudata->gxcr0 & XCR0_SSE) {
    818 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
    819 			} else {
    820 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
    821 			}
    822 			cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
    823 			cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64;
    824 			cpudata->gprs[NVMM_X64_GPR_RDX] = svm_xcr0_mask >> 32;
    825 			break;
    826 		case 1:
    827 			cpudata->vmcb->state.rax &= ~CPUID_PES1_XSAVES;
    828 			break;
    829 		}
    830 		break;
    831 	case 0x40000000:
    832 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
    833 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
    834 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
    835 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
    836 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
    837 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
    838 		break;
    839 	case 0x80000001:
    840 		cpudata->vmcb->state.rax &= nvmm_cpuid_80000001.eax;
    841 		cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
    842 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
    843 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
    844 		break;
    845 	default:
    846 		break;
    847 	}
    848 }
    849 
    850 static void
    851 svm_exit_insn(struct vmcb *vmcb, struct nvmm_vcpu_exit *exit, uint64_t reason)
    852 {
    853 	exit->u.insn.npc = vmcb->ctrl.nrip;
    854 	exit->reason = reason;
    855 }
    856 
    857 static void
    858 svm_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    859     struct nvmm_vcpu_exit *exit)
    860 {
    861 	struct svm_cpudata *cpudata = vcpu->cpudata;
    862 	struct nvmm_vcpu_conf_cpuid *cpuid;
    863 	uint64_t eax, ecx;
    864 	u_int descs[4];
    865 	size_t i;
    866 
    867 	eax = cpudata->vmcb->state.rax;
    868 	ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
    869 	x86_cpuid2(eax, ecx, descs);
    870 
    871 	cpudata->vmcb->state.rax = descs[0];
    872 	cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
    873 	cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
    874 	cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
    875 
    876 	svm_inkernel_handle_cpuid(vcpu, eax, ecx);
    877 
    878 	for (i = 0; i < SVM_NCPUIDS; i++) {
    879 		if (!cpudata->cpuidpresent[i]) {
    880 			continue;
    881 		}
    882 		cpuid = &cpudata->cpuid[i];
    883 		if (cpuid->leaf != eax) {
    884 			continue;
    885 		}
    886 
    887 		if (cpuid->exit) {
    888 			svm_exit_insn(cpudata->vmcb, exit, NVMM_VCPU_EXIT_CPUID);
    889 			return;
    890 		}
    891 		KASSERT(cpuid->mask);
    892 
    893 		/* del */
    894 		cpudata->vmcb->state.rax &= ~cpuid->u.mask.del.eax;
    895 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
    896 		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
    897 		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
    898 
    899 		/* set */
    900 		cpudata->vmcb->state.rax |= cpuid->u.mask.set.eax;
    901 		cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
    902 		cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
    903 		cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
    904 
    905 		break;
    906 	}
    907 
    908 	svm_inkernel_advance(cpudata->vmcb);
    909 	exit->reason = NVMM_VCPU_EXIT_NONE;
    910 }
    911 
    912 static void
    913 svm_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    914     struct nvmm_vcpu_exit *exit)
    915 {
    916 	struct svm_cpudata *cpudata = vcpu->cpudata;
    917 	struct vmcb *vmcb = cpudata->vmcb;
    918 
    919 	if (cpudata->int_window_exit && (vmcb->state.rflags & PSL_I)) {
    920 		svm_event_waitexit_disable(vcpu, false);
    921 	}
    922 
    923 	svm_inkernel_advance(cpudata->vmcb);
    924 	exit->reason = NVMM_VCPU_EXIT_HALTED;
    925 }
    926 
    927 #define SVM_EXIT_IO_PORT	__BITS(31,16)
    928 #define SVM_EXIT_IO_SEG		__BITS(12,10)
    929 #define SVM_EXIT_IO_A64		__BIT(9)
    930 #define SVM_EXIT_IO_A32		__BIT(8)
    931 #define SVM_EXIT_IO_A16		__BIT(7)
    932 #define SVM_EXIT_IO_SZ32	__BIT(6)
    933 #define SVM_EXIT_IO_SZ16	__BIT(5)
    934 #define SVM_EXIT_IO_SZ8		__BIT(4)
    935 #define SVM_EXIT_IO_REP		__BIT(3)
    936 #define SVM_EXIT_IO_STR		__BIT(2)
    937 #define SVM_EXIT_IO_IN		__BIT(0)
    938 
    939 static void
    940 svm_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    941     struct nvmm_vcpu_exit *exit)
    942 {
    943 	struct svm_cpudata *cpudata = vcpu->cpudata;
    944 	uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
    945 	uint64_t nextpc = cpudata->vmcb->ctrl.exitinfo2;
    946 
    947 	exit->reason = NVMM_VCPU_EXIT_IO;
    948 
    949 	exit->u.io.in = (info & SVM_EXIT_IO_IN) != 0;
    950 	exit->u.io.port = __SHIFTOUT(info, SVM_EXIT_IO_PORT);
    951 
    952 	if (svm_decode_assist) {
    953 		KASSERT(__SHIFTOUT(info, SVM_EXIT_IO_SEG) < 6);
    954 		exit->u.io.seg = __SHIFTOUT(info, SVM_EXIT_IO_SEG);
    955 	} else {
    956 		exit->u.io.seg = -1;
    957 	}
    958 
    959 	if (info & SVM_EXIT_IO_A64) {
    960 		exit->u.io.address_size = 8;
    961 	} else if (info & SVM_EXIT_IO_A32) {
    962 		exit->u.io.address_size = 4;
    963 	} else if (info & SVM_EXIT_IO_A16) {
    964 		exit->u.io.address_size = 2;
    965 	}
    966 
    967 	if (info & SVM_EXIT_IO_SZ32) {
    968 		exit->u.io.operand_size = 4;
    969 	} else if (info & SVM_EXIT_IO_SZ16) {
    970 		exit->u.io.operand_size = 2;
    971 	} else if (info & SVM_EXIT_IO_SZ8) {
    972 		exit->u.io.operand_size = 1;
    973 	}
    974 
    975 	exit->u.io.rep = (info & SVM_EXIT_IO_REP) != 0;
    976 	exit->u.io.str = (info & SVM_EXIT_IO_STR) != 0;
    977 	exit->u.io.npc = nextpc;
    978 
    979 	svm_vcpu_state_provide(vcpu,
    980 	    NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
    981 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
    982 }
    983 
    984 static const uint64_t msr_ignore_list[] = {
    985 	0xc0010055, /* MSR_CMPHALT */
    986 	MSR_DE_CFG,
    987 	MSR_IC_CFG,
    988 	MSR_UCODE_AMD_PATCHLEVEL
    989 };
    990 
    991 static bool
    992 svm_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    993     struct nvmm_vcpu_exit *exit)
    994 {
    995 	struct svm_cpudata *cpudata = vcpu->cpudata;
    996 	struct vmcb *vmcb = cpudata->vmcb;
    997 	uint64_t val;
    998 	size_t i;
    999 
   1000 	if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
   1001 		if (exit->u.rdmsr.msr == MSR_NB_CFG) {
   1002 			val = NB_CFG_INITAPICCPUIDLO;
   1003 			vmcb->state.rax = (val & 0xFFFFFFFF);
   1004 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1005 			goto handled;
   1006 		}
   1007 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
   1008 			if (msr_ignore_list[i] != exit->u.rdmsr.msr)
   1009 				continue;
   1010 			val = 0;
   1011 			vmcb->state.rax = (val & 0xFFFFFFFF);
   1012 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1013 			goto handled;
   1014 		}
   1015 	} else {
   1016 		if (exit->u.wrmsr.msr == MSR_EFER) {
   1017 			if (__predict_false(exit->u.wrmsr.val & ~EFER_VALID)) {
   1018 				goto error;
   1019 			}
   1020 			if ((vmcb->state.efer ^ exit->u.wrmsr.val) &
   1021 			     EFER_TLB_FLUSH) {
   1022 				cpudata->gtlb_want_flush = true;
   1023 			}
   1024 			vmcb->state.efer = exit->u.wrmsr.val | EFER_SVME;
   1025 			svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_CR);
   1026 			goto handled;
   1027 		}
   1028 		if (exit->u.wrmsr.msr == MSR_TSC) {
   1029 			cpudata->gtsc = exit->u.wrmsr.val;
   1030 			cpudata->gtsc_want_update = true;
   1031 			goto handled;
   1032 		}
   1033 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
   1034 			if (msr_ignore_list[i] != exit->u.wrmsr.msr)
   1035 				continue;
   1036 			goto handled;
   1037 		}
   1038 	}
   1039 
   1040 	return false;
   1041 
   1042 handled:
   1043 	svm_inkernel_advance(cpudata->vmcb);
   1044 	return true;
   1045 
   1046 error:
   1047 	svm_inject_gp(vcpu);
   1048 	return true;
   1049 }
   1050 
   1051 static inline void
   1052 svm_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1053     struct nvmm_vcpu_exit *exit)
   1054 {
   1055 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1056 
   1057 	exit->reason = NVMM_VCPU_EXIT_RDMSR;
   1058 	exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
   1059 	exit->u.rdmsr.npc = cpudata->vmcb->ctrl.nrip;
   1060 
   1061 	if (svm_inkernel_handle_msr(mach, vcpu, exit)) {
   1062 		exit->reason = NVMM_VCPU_EXIT_NONE;
   1063 		return;
   1064 	}
   1065 
   1066 	svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
   1067 }
   1068 
   1069 static inline void
   1070 svm_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1071     struct nvmm_vcpu_exit *exit)
   1072 {
   1073 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1074 	uint64_t rdx, rax;
   1075 
   1076 	rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
   1077 	rax = cpudata->vmcb->state.rax;
   1078 
   1079 	exit->reason = NVMM_VCPU_EXIT_WRMSR;
   1080 	exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
   1081 	exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
   1082 	exit->u.wrmsr.npc = cpudata->vmcb->ctrl.nrip;
   1083 
   1084 	if (svm_inkernel_handle_msr(mach, vcpu, exit)) {
   1085 		exit->reason = NVMM_VCPU_EXIT_NONE;
   1086 		return;
   1087 	}
   1088 
   1089 	svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
   1090 }
   1091 
   1092 static void
   1093 svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1094     struct nvmm_vcpu_exit *exit)
   1095 {
   1096 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1097 	uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
   1098 
   1099 	if (info == 0) {
   1100 		svm_exit_rdmsr(mach, vcpu, exit);
   1101 	} else {
   1102 		svm_exit_wrmsr(mach, vcpu, exit);
   1103 	}
   1104 }
   1105 
   1106 static void
   1107 svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1108     struct nvmm_vcpu_exit *exit)
   1109 {
   1110 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1111 	gpaddr_t gpa = cpudata->vmcb->ctrl.exitinfo2;
   1112 
   1113 	exit->reason = NVMM_VCPU_EXIT_MEMORY;
   1114 	if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_W)
   1115 		exit->u.mem.prot = PROT_WRITE;
   1116 	else if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_X)
   1117 		exit->u.mem.prot = PROT_EXEC;
   1118 	else
   1119 		exit->u.mem.prot = PROT_READ;
   1120 	exit->u.mem.gpa = gpa;
   1121 	exit->u.mem.inst_len = cpudata->vmcb->ctrl.inst_len;
   1122 	memcpy(exit->u.mem.inst_bytes, cpudata->vmcb->ctrl.inst_bytes,
   1123 	    sizeof(exit->u.mem.inst_bytes));
   1124 
   1125 	svm_vcpu_state_provide(vcpu,
   1126 	    NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
   1127 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
   1128 }
   1129 
   1130 static void
   1131 svm_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1132     struct nvmm_vcpu_exit *exit)
   1133 {
   1134 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1135 	struct vmcb *vmcb = cpudata->vmcb;
   1136 	uint64_t val;
   1137 
   1138 	exit->reason = NVMM_VCPU_EXIT_NONE;
   1139 
   1140 	val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
   1141 	    (vmcb->state.rax & 0xFFFFFFFF);
   1142 
   1143 	if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
   1144 		goto error;
   1145 	} else if (__predict_false(vmcb->state.cpl != 0)) {
   1146 		goto error;
   1147 	} else if (__predict_false((val & ~svm_xcr0_mask) != 0)) {
   1148 		goto error;
   1149 	} else if (__predict_false((val & XCR0_X87) == 0)) {
   1150 		goto error;
   1151 	}
   1152 
   1153 	cpudata->gxcr0 = val;
   1154 	if (svm_xcr0_mask != 0) {
   1155 		wrxcr(0, cpudata->gxcr0);
   1156 	}
   1157 
   1158 	svm_inkernel_advance(cpudata->vmcb);
   1159 	return;
   1160 
   1161 error:
   1162 	svm_inject_gp(vcpu);
   1163 }
   1164 
   1165 static void
   1166 svm_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
   1167 {
   1168 	exit->u.inv.hwcode = code;
   1169 	exit->reason = NVMM_VCPU_EXIT_INVALID;
   1170 }
   1171 
   1172 /* -------------------------------------------------------------------------- */
   1173 
   1174 static void
   1175 svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
   1176 {
   1177 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1178 
   1179 	fpu_save();
   1180 	fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask);
   1181 
   1182 	if (svm_xcr0_mask != 0) {
   1183 		cpudata->hxcr0 = rdxcr(0);
   1184 		wrxcr(0, cpudata->gxcr0);
   1185 	}
   1186 }
   1187 
   1188 static void
   1189 svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
   1190 {
   1191 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1192 
   1193 	if (svm_xcr0_mask != 0) {
   1194 		cpudata->gxcr0 = rdxcr(0);
   1195 		wrxcr(0, cpudata->hxcr0);
   1196 	}
   1197 
   1198 	fpu_area_save(&cpudata->gfpu, svm_xcr0_mask);
   1199 }
   1200 
   1201 static void
   1202 svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
   1203 {
   1204 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1205 
   1206 	x86_dbregs_save(curlwp);
   1207 
   1208 	ldr7(0);
   1209 
   1210 	ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
   1211 	ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
   1212 	ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
   1213 	ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
   1214 }
   1215 
   1216 static void
   1217 svm_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
   1218 {
   1219 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1220 
   1221 	cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
   1222 	cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
   1223 	cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
   1224 	cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
   1225 
   1226 	x86_dbregs_restore(curlwp);
   1227 }
   1228 
   1229 static void
   1230 svm_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
   1231 {
   1232 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1233 
   1234 	cpudata->fsbase = rdmsr(MSR_FSBASE);
   1235 	cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
   1236 }
   1237 
   1238 static void
   1239 svm_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
   1240 {
   1241 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1242 
   1243 	wrmsr(MSR_STAR, cpudata->star);
   1244 	wrmsr(MSR_LSTAR, cpudata->lstar);
   1245 	wrmsr(MSR_CSTAR, cpudata->cstar);
   1246 	wrmsr(MSR_SFMASK, cpudata->sfmask);
   1247 	wrmsr(MSR_FSBASE, cpudata->fsbase);
   1248 	wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
   1249 }
   1250 
   1251 /* -------------------------------------------------------------------------- */
   1252 
   1253 static inline void
   1254 svm_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1255 {
   1256 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1257 
   1258 	if (vcpu->hcpu_last != hcpu || cpudata->shared_asid) {
   1259 		cpudata->gtlb_want_flush = true;
   1260 	}
   1261 }
   1262 
   1263 static inline void
   1264 svm_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1265 {
   1266 	/*
   1267 	 * Nothing to do. If an hTLB flush was needed, either the VCPU was
   1268 	 * executing on this hCPU and the hTLB already got flushed, or it
   1269 	 * was executing on another hCPU in which case the catchup is done
   1270 	 * in svm_gtlb_catchup().
   1271 	 */
   1272 }
   1273 
   1274 static inline uint64_t
   1275 svm_htlb_flush(struct svm_machdata *machdata, struct svm_cpudata *cpudata)
   1276 {
   1277 	struct vmcb *vmcb = cpudata->vmcb;
   1278 	uint64_t machgen;
   1279 
   1280 	machgen = machdata->mach_htlb_gen;
   1281 	if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
   1282 		return machgen;
   1283 	}
   1284 
   1285 	vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush;
   1286 	return machgen;
   1287 }
   1288 
   1289 static inline void
   1290 svm_htlb_flush_ack(struct svm_cpudata *cpudata, uint64_t machgen)
   1291 {
   1292 	struct vmcb *vmcb = cpudata->vmcb;
   1293 
   1294 	if (__predict_true(vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID)) {
   1295 		cpudata->vcpu_htlb_gen = machgen;
   1296 	}
   1297 }
   1298 
   1299 static inline void
   1300 svm_exit_evt(struct svm_cpudata *cpudata, struct vmcb *vmcb)
   1301 {
   1302 	cpudata->evt_pending = false;
   1303 
   1304 	if (__predict_false(vmcb->ctrl.exitintinfo & VMCB_CTRL_EXITINTINFO_V)) {
   1305 		vmcb->ctrl.eventinj = vmcb->ctrl.exitintinfo;
   1306 		cpudata->evt_pending = true;
   1307 	}
   1308 }
   1309 
   1310 static int
   1311 svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1312     struct nvmm_vcpu_exit *exit)
   1313 {
   1314 	struct nvmm_comm_page *comm = vcpu->comm;
   1315 	struct svm_machdata *machdata = mach->machdata;
   1316 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1317 	struct vmcb *vmcb = cpudata->vmcb;
   1318 	uint64_t machgen;
   1319 	int hcpu, s;
   1320 
   1321 	if (__predict_false(svm_vcpu_event_commit(vcpu) != 0)) {
   1322 		return EINVAL;
   1323 	}
   1324 	svm_vcpu_state_commit(vcpu);
   1325 	comm->state_cached = 0;
   1326 
   1327 	kpreempt_disable();
   1328 	hcpu = cpu_number();
   1329 
   1330 	svm_gtlb_catchup(vcpu, hcpu);
   1331 	svm_htlb_catchup(vcpu, hcpu);
   1332 
   1333 	if (vcpu->hcpu_last != hcpu) {
   1334 		svm_vmcb_cache_flush_all(vmcb);
   1335 		cpudata->gtsc_want_update = true;
   1336 	}
   1337 
   1338 	svm_vcpu_guest_dbregs_enter(vcpu);
   1339 	svm_vcpu_guest_misc_enter(vcpu);
   1340 	svm_vcpu_guest_fpu_enter(vcpu);
   1341 
   1342 	while (1) {
   1343 		if (cpudata->gtlb_want_flush) {
   1344 			vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush;
   1345 		} else {
   1346 			vmcb->ctrl.tlb_ctrl = 0;
   1347 		}
   1348 
   1349 		if (__predict_false(cpudata->gtsc_want_update)) {
   1350 			vmcb->ctrl.tsc_offset = cpudata->gtsc - rdtsc();
   1351 			svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
   1352 		}
   1353 
   1354 		s = splhigh();
   1355 		machgen = svm_htlb_flush(machdata, cpudata);
   1356 		svm_vmrun(cpudata->vmcb_pa, cpudata->gprs);
   1357 		svm_htlb_flush_ack(cpudata, machgen);
   1358 		splx(s);
   1359 
   1360 		svm_vmcb_cache_default(vmcb);
   1361 
   1362 		if (vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID) {
   1363 			cpudata->gtlb_want_flush = false;
   1364 			cpudata->gtsc_want_update = false;
   1365 			vcpu->hcpu_last = hcpu;
   1366 		}
   1367 		svm_exit_evt(cpudata, vmcb);
   1368 
   1369 		switch (vmcb->ctrl.exitcode) {
   1370 		case VMCB_EXITCODE_INTR:
   1371 		case VMCB_EXITCODE_NMI:
   1372 			exit->reason = NVMM_VCPU_EXIT_NONE;
   1373 			break;
   1374 		case VMCB_EXITCODE_VINTR:
   1375 			svm_event_waitexit_disable(vcpu, false);
   1376 			exit->reason = NVMM_VCPU_EXIT_INT_READY;
   1377 			break;
   1378 		case VMCB_EXITCODE_IRET:
   1379 			svm_event_waitexit_disable(vcpu, true);
   1380 			exit->reason = NVMM_VCPU_EXIT_NMI_READY;
   1381 			break;
   1382 		case VMCB_EXITCODE_CPUID:
   1383 			svm_exit_cpuid(mach, vcpu, exit);
   1384 			break;
   1385 		case VMCB_EXITCODE_HLT:
   1386 			svm_exit_hlt(mach, vcpu, exit);
   1387 			break;
   1388 		case VMCB_EXITCODE_IOIO:
   1389 			svm_exit_io(mach, vcpu, exit);
   1390 			break;
   1391 		case VMCB_EXITCODE_MSR:
   1392 			svm_exit_msr(mach, vcpu, exit);
   1393 			break;
   1394 		case VMCB_EXITCODE_SHUTDOWN:
   1395 			exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
   1396 			break;
   1397 		case VMCB_EXITCODE_RDPMC:
   1398 		case VMCB_EXITCODE_RSM:
   1399 		case VMCB_EXITCODE_INVLPGA:
   1400 		case VMCB_EXITCODE_VMRUN:
   1401 		case VMCB_EXITCODE_VMMCALL:
   1402 		case VMCB_EXITCODE_VMLOAD:
   1403 		case VMCB_EXITCODE_VMSAVE:
   1404 		case VMCB_EXITCODE_STGI:
   1405 		case VMCB_EXITCODE_CLGI:
   1406 		case VMCB_EXITCODE_SKINIT:
   1407 		case VMCB_EXITCODE_RDTSCP:
   1408 			svm_inject_ud(vcpu);
   1409 			exit->reason = NVMM_VCPU_EXIT_NONE;
   1410 			break;
   1411 		case VMCB_EXITCODE_MONITOR:
   1412 			svm_exit_insn(vmcb, exit, NVMM_VCPU_EXIT_MONITOR);
   1413 			break;
   1414 		case VMCB_EXITCODE_MWAIT:
   1415 		case VMCB_EXITCODE_MWAIT_CONDITIONAL:
   1416 			svm_exit_insn(vmcb, exit, NVMM_VCPU_EXIT_MWAIT);
   1417 			break;
   1418 		case VMCB_EXITCODE_XSETBV:
   1419 			svm_exit_xsetbv(mach, vcpu, exit);
   1420 			break;
   1421 		case VMCB_EXITCODE_NPF:
   1422 			svm_exit_npf(mach, vcpu, exit);
   1423 			break;
   1424 		case VMCB_EXITCODE_FERR_FREEZE: /* ? */
   1425 		default:
   1426 			svm_exit_invalid(exit, vmcb->ctrl.exitcode);
   1427 			break;
   1428 		}
   1429 
   1430 		/* If no reason to return to userland, keep rolling. */
   1431 		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
   1432 			break;
   1433 		}
   1434 		if (curcpu()->ci_data.cpu_softints != 0) {
   1435 			break;
   1436 		}
   1437 		if (curlwp->l_flag & LW_USERRET) {
   1438 			break;
   1439 		}
   1440 		if (exit->reason != NVMM_VCPU_EXIT_NONE) {
   1441 			break;
   1442 		}
   1443 	}
   1444 
   1445 	cpudata->gtsc = rdtsc() + vmcb->ctrl.tsc_offset;
   1446 
   1447 	svm_vcpu_guest_fpu_leave(vcpu);
   1448 	svm_vcpu_guest_misc_leave(vcpu);
   1449 	svm_vcpu_guest_dbregs_leave(vcpu);
   1450 
   1451 	kpreempt_enable();
   1452 
   1453 	exit->exitstate[NVMM_X64_EXITSTATE_CR8] = __SHIFTOUT(vmcb->ctrl.v,
   1454 	    VMCB_CTRL_V_TPR);
   1455 	exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS] = vmcb->state.rflags;
   1456 
   1457 	exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] =
   1458 	    ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0);
   1459 	exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] =
   1460 	    cpudata->int_window_exit;
   1461 	exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT] =
   1462 	    cpudata->nmi_window_exit;
   1463 	exit->exitstate[NVMM_X64_EXITSTATE_EVT_PENDING] =
   1464 	    cpudata->evt_pending;
   1465 
   1466 	return 0;
   1467 }
   1468 
   1469 /* -------------------------------------------------------------------------- */
   1470 
   1471 static int
   1472 svm_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
   1473 {
   1474 	struct pglist pglist;
   1475 	paddr_t _pa;
   1476 	vaddr_t _va;
   1477 	size_t i;
   1478 	int ret;
   1479 
   1480 	ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
   1481 	    &pglist, 1, 0);
   1482 	if (ret != 0)
   1483 		return ENOMEM;
   1484 	_pa = TAILQ_FIRST(&pglist)->phys_addr;
   1485 	_va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
   1486 	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
   1487 	if (_va == 0)
   1488 		goto error;
   1489 
   1490 	for (i = 0; i < npages; i++) {
   1491 		pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
   1492 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
   1493 	}
   1494 	pmap_update(pmap_kernel());
   1495 
   1496 	memset((void *)_va, 0, npages * PAGE_SIZE);
   1497 
   1498 	*pa = _pa;
   1499 	*va = _va;
   1500 	return 0;
   1501 
   1502 error:
   1503 	for (i = 0; i < npages; i++) {
   1504 		uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
   1505 	}
   1506 	return ENOMEM;
   1507 }
   1508 
   1509 static void
   1510 svm_memfree(paddr_t pa, vaddr_t va, size_t npages)
   1511 {
   1512 	size_t i;
   1513 
   1514 	pmap_kremove(va, npages * PAGE_SIZE);
   1515 	pmap_update(pmap_kernel());
   1516 	uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
   1517 	for (i = 0; i < npages; i++) {
   1518 		uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
   1519 	}
   1520 }
   1521 
   1522 /* -------------------------------------------------------------------------- */
   1523 
   1524 #define SVM_MSRBM_READ	__BIT(0)
   1525 #define SVM_MSRBM_WRITE	__BIT(1)
   1526 
   1527 static void
   1528 svm_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
   1529 {
   1530 	uint64_t byte;
   1531 	uint8_t bitoff;
   1532 
   1533 	if (msr < 0x00002000) {
   1534 		/* Range 1 */
   1535 		byte = ((msr - 0x00000000) >> 2UL) + 0x0000;
   1536 	} else if (msr >= 0xC0000000 && msr < 0xC0002000) {
   1537 		/* Range 2 */
   1538 		byte = ((msr - 0xC0000000) >> 2UL) + 0x0800;
   1539 	} else if (msr >= 0xC0010000 && msr < 0xC0012000) {
   1540 		/* Range 3 */
   1541 		byte = ((msr - 0xC0010000) >> 2UL) + 0x1000;
   1542 	} else {
   1543 		panic("%s: wrong range", __func__);
   1544 	}
   1545 
   1546 	bitoff = (msr & 0x3) << 1;
   1547 
   1548 	if (read) {
   1549 		bitmap[byte] &= ~(SVM_MSRBM_READ << bitoff);
   1550 	}
   1551 	if (write) {
   1552 		bitmap[byte] &= ~(SVM_MSRBM_WRITE << bitoff);
   1553 	}
   1554 }
   1555 
   1556 #define SVM_SEG_ATTRIB_TYPE		__BITS(3,0)
   1557 #define SVM_SEG_ATTRIB_S		__BIT(4)
   1558 #define SVM_SEG_ATTRIB_DPL		__BITS(6,5)
   1559 #define SVM_SEG_ATTRIB_P		__BIT(7)
   1560 #define SVM_SEG_ATTRIB_AVL		__BIT(8)
   1561 #define SVM_SEG_ATTRIB_L		__BIT(9)
   1562 #define SVM_SEG_ATTRIB_DEF		__BIT(10)
   1563 #define SVM_SEG_ATTRIB_G		__BIT(11)
   1564 
   1565 static void
   1566 svm_vcpu_setstate_seg(const struct nvmm_x64_state_seg *seg,
   1567     struct vmcb_segment *vseg)
   1568 {
   1569 	vseg->selector = seg->selector;
   1570 	vseg->attrib =
   1571 	    __SHIFTIN(seg->attrib.type, SVM_SEG_ATTRIB_TYPE) |
   1572 	    __SHIFTIN(seg->attrib.s, SVM_SEG_ATTRIB_S) |
   1573 	    __SHIFTIN(seg->attrib.dpl, SVM_SEG_ATTRIB_DPL) |
   1574 	    __SHIFTIN(seg->attrib.p, SVM_SEG_ATTRIB_P) |
   1575 	    __SHIFTIN(seg->attrib.avl, SVM_SEG_ATTRIB_AVL) |
   1576 	    __SHIFTIN(seg->attrib.l, SVM_SEG_ATTRIB_L) |
   1577 	    __SHIFTIN(seg->attrib.def, SVM_SEG_ATTRIB_DEF) |
   1578 	    __SHIFTIN(seg->attrib.g, SVM_SEG_ATTRIB_G);
   1579 	vseg->limit = seg->limit;
   1580 	vseg->base = seg->base;
   1581 }
   1582 
   1583 static void
   1584 svm_vcpu_getstate_seg(struct nvmm_x64_state_seg *seg, struct vmcb_segment *vseg)
   1585 {
   1586 	seg->selector = vseg->selector;
   1587 	seg->attrib.type = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_TYPE);
   1588 	seg->attrib.s = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_S);
   1589 	seg->attrib.dpl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DPL);
   1590 	seg->attrib.p = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_P);
   1591 	seg->attrib.avl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_AVL);
   1592 	seg->attrib.l = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_L);
   1593 	seg->attrib.def = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DEF);
   1594 	seg->attrib.g = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_G);
   1595 	seg->limit = vseg->limit;
   1596 	seg->base = vseg->base;
   1597 }
   1598 
   1599 static inline bool
   1600 svm_state_tlb_flush(const struct vmcb *vmcb, const struct nvmm_x64_state *state,
   1601     uint64_t flags)
   1602 {
   1603 	if (flags & NVMM_X64_STATE_CRS) {
   1604 		if ((vmcb->state.cr0 ^
   1605 		     state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
   1606 			return true;
   1607 		}
   1608 		if (vmcb->state.cr3 != state->crs[NVMM_X64_CR_CR3]) {
   1609 			return true;
   1610 		}
   1611 		if ((vmcb->state.cr4 ^
   1612 		     state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
   1613 			return true;
   1614 		}
   1615 	}
   1616 
   1617 	if (flags & NVMM_X64_STATE_MSRS) {
   1618 		if ((vmcb->state.efer ^
   1619 		     state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
   1620 			return true;
   1621 		}
   1622 	}
   1623 
   1624 	return false;
   1625 }
   1626 
   1627 static void
   1628 svm_vcpu_setstate(struct nvmm_cpu *vcpu)
   1629 {
   1630 	struct nvmm_comm_page *comm = vcpu->comm;
   1631 	const struct nvmm_x64_state *state = &comm->state;
   1632 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1633 	struct vmcb *vmcb = cpudata->vmcb;
   1634 	struct fxsave *fpustate;
   1635 	uint64_t flags;
   1636 
   1637 	flags = comm->state_wanted;
   1638 
   1639 	if (svm_state_tlb_flush(vmcb, state, flags)) {
   1640 		cpudata->gtlb_want_flush = true;
   1641 	}
   1642 
   1643 	if (flags & NVMM_X64_STATE_SEGS) {
   1644 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_CS],
   1645 		    &vmcb->state.cs);
   1646 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_DS],
   1647 		    &vmcb->state.ds);
   1648 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_ES],
   1649 		    &vmcb->state.es);
   1650 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_FS],
   1651 		    &vmcb->state.fs);
   1652 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GS],
   1653 		    &vmcb->state.gs);
   1654 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_SS],
   1655 		    &vmcb->state.ss);
   1656 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GDT],
   1657 		    &vmcb->state.gdt);
   1658 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_IDT],
   1659 		    &vmcb->state.idt);
   1660 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_LDT],
   1661 		    &vmcb->state.ldt);
   1662 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_TR],
   1663 		    &vmcb->state.tr);
   1664 
   1665 		vmcb->state.cpl = state->segs[NVMM_X64_SEG_SS].attrib.dpl;
   1666 	}
   1667 
   1668 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   1669 	if (flags & NVMM_X64_STATE_GPRS) {
   1670 		memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
   1671 
   1672 		vmcb->state.rip = state->gprs[NVMM_X64_GPR_RIP];
   1673 		vmcb->state.rsp = state->gprs[NVMM_X64_GPR_RSP];
   1674 		vmcb->state.rax = state->gprs[NVMM_X64_GPR_RAX];
   1675 		vmcb->state.rflags = state->gprs[NVMM_X64_GPR_RFLAGS];
   1676 	}
   1677 
   1678 	if (flags & NVMM_X64_STATE_CRS) {
   1679 		vmcb->state.cr0 = state->crs[NVMM_X64_CR_CR0];
   1680 		vmcb->state.cr2 = state->crs[NVMM_X64_CR_CR2];
   1681 		vmcb->state.cr3 = state->crs[NVMM_X64_CR_CR3];
   1682 		vmcb->state.cr4 = state->crs[NVMM_X64_CR_CR4];
   1683 
   1684 		vmcb->ctrl.v &= ~VMCB_CTRL_V_TPR;
   1685 		vmcb->ctrl.v |= __SHIFTIN(state->crs[NVMM_X64_CR_CR8],
   1686 		    VMCB_CTRL_V_TPR);
   1687 
   1688 		if (svm_xcr0_mask != 0) {
   1689 			/* Clear illegal XCR0 bits, set mandatory X87 bit. */
   1690 			cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
   1691 			cpudata->gxcr0 &= svm_xcr0_mask;
   1692 			cpudata->gxcr0 |= XCR0_X87;
   1693 		}
   1694 	}
   1695 
   1696 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   1697 	if (flags & NVMM_X64_STATE_DRS) {
   1698 		memcpy(cpudata->drs, state->drs, sizeof(state->drs));
   1699 
   1700 		vmcb->state.dr6 = state->drs[NVMM_X64_DR_DR6];
   1701 		vmcb->state.dr7 = state->drs[NVMM_X64_DR_DR7];
   1702 	}
   1703 
   1704 	if (flags & NVMM_X64_STATE_MSRS) {
   1705 		/*
   1706 		 * EFER_SVME is mandatory.
   1707 		 */
   1708 		vmcb->state.efer = state->msrs[NVMM_X64_MSR_EFER] | EFER_SVME;
   1709 		vmcb->state.star = state->msrs[NVMM_X64_MSR_STAR];
   1710 		vmcb->state.lstar = state->msrs[NVMM_X64_MSR_LSTAR];
   1711 		vmcb->state.cstar = state->msrs[NVMM_X64_MSR_CSTAR];
   1712 		vmcb->state.sfmask = state->msrs[NVMM_X64_MSR_SFMASK];
   1713 		vmcb->state.kernelgsbase =
   1714 		    state->msrs[NVMM_X64_MSR_KERNELGSBASE];
   1715 		vmcb->state.sysenter_cs =
   1716 		    state->msrs[NVMM_X64_MSR_SYSENTER_CS];
   1717 		vmcb->state.sysenter_esp =
   1718 		    state->msrs[NVMM_X64_MSR_SYSENTER_ESP];
   1719 		vmcb->state.sysenter_eip =
   1720 		    state->msrs[NVMM_X64_MSR_SYSENTER_EIP];
   1721 		vmcb->state.g_pat = state->msrs[NVMM_X64_MSR_PAT];
   1722 
   1723 		cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC];
   1724 		cpudata->gtsc_want_update = true;
   1725 	}
   1726 
   1727 	if (flags & NVMM_X64_STATE_INTR) {
   1728 		if (state->intr.int_shadow) {
   1729 			vmcb->ctrl.intr |= VMCB_CTRL_INTR_SHADOW;
   1730 		} else {
   1731 			vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW;
   1732 		}
   1733 
   1734 		if (state->intr.int_window_exiting) {
   1735 			svm_event_waitexit_enable(vcpu, false);
   1736 		} else {
   1737 			svm_event_waitexit_disable(vcpu, false);
   1738 		}
   1739 
   1740 		if (state->intr.nmi_window_exiting) {
   1741 			svm_event_waitexit_enable(vcpu, true);
   1742 		} else {
   1743 			svm_event_waitexit_disable(vcpu, true);
   1744 		}
   1745 	}
   1746 
   1747 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   1748 	if (flags & NVMM_X64_STATE_FPU) {
   1749 		memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
   1750 		    sizeof(state->fpu));
   1751 
   1752 		fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
   1753 		fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
   1754 		fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
   1755 
   1756 		if (svm_xcr0_mask != 0) {
   1757 			/* Reset XSTATE_BV, to force a reload. */
   1758 			cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
   1759 		}
   1760 	}
   1761 
   1762 	svm_vmcb_cache_update(vmcb, flags);
   1763 
   1764 	comm->state_wanted = 0;
   1765 	comm->state_cached |= flags;
   1766 }
   1767 
   1768 static void
   1769 svm_vcpu_getstate(struct nvmm_cpu *vcpu)
   1770 {
   1771 	struct nvmm_comm_page *comm = vcpu->comm;
   1772 	struct nvmm_x64_state *state = &comm->state;
   1773 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1774 	struct vmcb *vmcb = cpudata->vmcb;
   1775 	uint64_t flags;
   1776 
   1777 	flags = comm->state_wanted;
   1778 
   1779 	if (flags & NVMM_X64_STATE_SEGS) {
   1780 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_CS],
   1781 		    &vmcb->state.cs);
   1782 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_DS],
   1783 		    &vmcb->state.ds);
   1784 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_ES],
   1785 		    &vmcb->state.es);
   1786 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_FS],
   1787 		    &vmcb->state.fs);
   1788 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GS],
   1789 		    &vmcb->state.gs);
   1790 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_SS],
   1791 		    &vmcb->state.ss);
   1792 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GDT],
   1793 		    &vmcb->state.gdt);
   1794 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_IDT],
   1795 		    &vmcb->state.idt);
   1796 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_LDT],
   1797 		    &vmcb->state.ldt);
   1798 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_TR],
   1799 		    &vmcb->state.tr);
   1800 
   1801 		state->segs[NVMM_X64_SEG_SS].attrib.dpl = vmcb->state.cpl;
   1802 	}
   1803 
   1804 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   1805 	if (flags & NVMM_X64_STATE_GPRS) {
   1806 		memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
   1807 
   1808 		state->gprs[NVMM_X64_GPR_RIP] = vmcb->state.rip;
   1809 		state->gprs[NVMM_X64_GPR_RSP] = vmcb->state.rsp;
   1810 		state->gprs[NVMM_X64_GPR_RAX] = vmcb->state.rax;
   1811 		state->gprs[NVMM_X64_GPR_RFLAGS] = vmcb->state.rflags;
   1812 	}
   1813 
   1814 	if (flags & NVMM_X64_STATE_CRS) {
   1815 		state->crs[NVMM_X64_CR_CR0] = vmcb->state.cr0;
   1816 		state->crs[NVMM_X64_CR_CR2] = vmcb->state.cr2;
   1817 		state->crs[NVMM_X64_CR_CR3] = vmcb->state.cr3;
   1818 		state->crs[NVMM_X64_CR_CR4] = vmcb->state.cr4;
   1819 		state->crs[NVMM_X64_CR_CR8] = __SHIFTOUT(vmcb->ctrl.v,
   1820 		    VMCB_CTRL_V_TPR);
   1821 		state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
   1822 	}
   1823 
   1824 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   1825 	if (flags & NVMM_X64_STATE_DRS) {
   1826 		memcpy(state->drs, cpudata->drs, sizeof(state->drs));
   1827 
   1828 		state->drs[NVMM_X64_DR_DR6] = vmcb->state.dr6;
   1829 		state->drs[NVMM_X64_DR_DR7] = vmcb->state.dr7;
   1830 	}
   1831 
   1832 	if (flags & NVMM_X64_STATE_MSRS) {
   1833 		state->msrs[NVMM_X64_MSR_EFER] = vmcb->state.efer;
   1834 		state->msrs[NVMM_X64_MSR_STAR] = vmcb->state.star;
   1835 		state->msrs[NVMM_X64_MSR_LSTAR] = vmcb->state.lstar;
   1836 		state->msrs[NVMM_X64_MSR_CSTAR] = vmcb->state.cstar;
   1837 		state->msrs[NVMM_X64_MSR_SFMASK] = vmcb->state.sfmask;
   1838 		state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
   1839 		    vmcb->state.kernelgsbase;
   1840 		state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
   1841 		    vmcb->state.sysenter_cs;
   1842 		state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
   1843 		    vmcb->state.sysenter_esp;
   1844 		state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
   1845 		    vmcb->state.sysenter_eip;
   1846 		state->msrs[NVMM_X64_MSR_PAT] = vmcb->state.g_pat;
   1847 		state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc;
   1848 
   1849 		/* Hide SVME. */
   1850 		state->msrs[NVMM_X64_MSR_EFER] &= ~EFER_SVME;
   1851 	}
   1852 
   1853 	if (flags & NVMM_X64_STATE_INTR) {
   1854 		state->intr.int_shadow =
   1855 		    (vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0;
   1856 		state->intr.int_window_exiting = cpudata->int_window_exit;
   1857 		state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
   1858 		state->intr.evt_pending = cpudata->evt_pending;
   1859 	}
   1860 
   1861 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   1862 	if (flags & NVMM_X64_STATE_FPU) {
   1863 		memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
   1864 		    sizeof(state->fpu));
   1865 	}
   1866 
   1867 	comm->state_wanted = 0;
   1868 	comm->state_cached |= flags;
   1869 }
   1870 
   1871 static void
   1872 svm_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
   1873 {
   1874 	vcpu->comm->state_wanted = flags;
   1875 	svm_vcpu_getstate(vcpu);
   1876 }
   1877 
   1878 static void
   1879 svm_vcpu_state_commit(struct nvmm_cpu *vcpu)
   1880 {
   1881 	vcpu->comm->state_wanted = vcpu->comm->state_commit;
   1882 	vcpu->comm->state_commit = 0;
   1883 	svm_vcpu_setstate(vcpu);
   1884 }
   1885 
   1886 /* -------------------------------------------------------------------------- */
   1887 
   1888 static void
   1889 svm_asid_alloc(struct nvmm_cpu *vcpu)
   1890 {
   1891 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1892 	struct vmcb *vmcb = cpudata->vmcb;
   1893 	size_t i, oct, bit;
   1894 
   1895 	mutex_enter(&svm_asidlock);
   1896 
   1897 	for (i = 0; i < svm_maxasid; i++) {
   1898 		oct = i / 8;
   1899 		bit = i % 8;
   1900 
   1901 		if (svm_asidmap[oct] & __BIT(bit)) {
   1902 			continue;
   1903 		}
   1904 
   1905 		svm_asidmap[oct] |= __BIT(bit);
   1906 		vmcb->ctrl.guest_asid = i;
   1907 		mutex_exit(&svm_asidlock);
   1908 		return;
   1909 	}
   1910 
   1911 	/*
   1912 	 * No free ASID. Use the last one, which is shared and requires
   1913 	 * special TLB handling.
   1914 	 */
   1915 	cpudata->shared_asid = true;
   1916 	vmcb->ctrl.guest_asid = svm_maxasid - 1;
   1917 	mutex_exit(&svm_asidlock);
   1918 }
   1919 
   1920 static void
   1921 svm_asid_free(struct nvmm_cpu *vcpu)
   1922 {
   1923 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1924 	struct vmcb *vmcb = cpudata->vmcb;
   1925 	size_t oct, bit;
   1926 
   1927 	if (cpudata->shared_asid) {
   1928 		return;
   1929 	}
   1930 
   1931 	oct = vmcb->ctrl.guest_asid / 8;
   1932 	bit = vmcb->ctrl.guest_asid % 8;
   1933 
   1934 	mutex_enter(&svm_asidlock);
   1935 	svm_asidmap[oct] &= ~__BIT(bit);
   1936 	mutex_exit(&svm_asidlock);
   1937 }
   1938 
   1939 static void
   1940 svm_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   1941 {
   1942 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1943 	struct vmcb *vmcb = cpudata->vmcb;
   1944 
   1945 	/* Allow reads/writes of Control Registers. */
   1946 	vmcb->ctrl.intercept_cr = 0;
   1947 
   1948 	/* Allow reads/writes of Debug Registers. */
   1949 	vmcb->ctrl.intercept_dr = 0;
   1950 
   1951 	/* Allow exceptions 0 to 31. */
   1952 	vmcb->ctrl.intercept_vec = 0;
   1953 
   1954 	/*
   1955 	 * Allow:
   1956 	 *  - SMI [smm interrupts]
   1957 	 *  - VINTR [virtual interrupts]
   1958 	 *  - CR0_SPEC [CR0 writes changing other fields than CR0.TS or CR0.MP]
   1959 	 *  - RIDTR [reads of IDTR]
   1960 	 *  - RGDTR [reads of GDTR]
   1961 	 *  - RLDTR [reads of LDTR]
   1962 	 *  - RTR [reads of TR]
   1963 	 *  - WIDTR [writes of IDTR]
   1964 	 *  - WGDTR [writes of GDTR]
   1965 	 *  - WLDTR [writes of LDTR]
   1966 	 *  - WTR [writes of TR]
   1967 	 *  - RDTSC [rdtsc instruction]
   1968 	 *  - PUSHF [pushf instruction]
   1969 	 *  - POPF [popf instruction]
   1970 	 *  - IRET [iret instruction]
   1971 	 *  - INTN [int $n instructions]
   1972 	 *  - INVD [invd instruction]
   1973 	 *  - PAUSE [pause instruction]
   1974 	 *  - INVLPG [invplg instruction]
   1975 	 *  - TASKSW [task switches]
   1976 	 *
   1977 	 * Intercept the rest below.
   1978 	 */
   1979 	vmcb->ctrl.intercept_misc1 =
   1980 	    VMCB_CTRL_INTERCEPT_INTR |
   1981 	    VMCB_CTRL_INTERCEPT_NMI |
   1982 	    VMCB_CTRL_INTERCEPT_INIT |
   1983 	    VMCB_CTRL_INTERCEPT_RDPMC |
   1984 	    VMCB_CTRL_INTERCEPT_CPUID |
   1985 	    VMCB_CTRL_INTERCEPT_RSM |
   1986 	    VMCB_CTRL_INTERCEPT_HLT |
   1987 	    VMCB_CTRL_INTERCEPT_INVLPGA |
   1988 	    VMCB_CTRL_INTERCEPT_IOIO_PROT |
   1989 	    VMCB_CTRL_INTERCEPT_MSR_PROT |
   1990 	    VMCB_CTRL_INTERCEPT_FERR_FREEZE |
   1991 	    VMCB_CTRL_INTERCEPT_SHUTDOWN;
   1992 
   1993 	/*
   1994 	 * Allow:
   1995 	 *  - ICEBP [icebp instruction]
   1996 	 *  - WBINVD [wbinvd instruction]
   1997 	 *  - WCR_SPEC(0..15) [writes of CR0-15, received after instruction]
   1998 	 *
   1999 	 * Intercept the rest below.
   2000 	 */
   2001 	vmcb->ctrl.intercept_misc2 =
   2002 	    VMCB_CTRL_INTERCEPT_VMRUN |
   2003 	    VMCB_CTRL_INTERCEPT_VMMCALL |
   2004 	    VMCB_CTRL_INTERCEPT_VMLOAD |
   2005 	    VMCB_CTRL_INTERCEPT_VMSAVE |
   2006 	    VMCB_CTRL_INTERCEPT_STGI |
   2007 	    VMCB_CTRL_INTERCEPT_CLGI |
   2008 	    VMCB_CTRL_INTERCEPT_SKINIT |
   2009 	    VMCB_CTRL_INTERCEPT_RDTSCP |
   2010 	    VMCB_CTRL_INTERCEPT_MONITOR |
   2011 	    VMCB_CTRL_INTERCEPT_MWAIT |
   2012 	    VMCB_CTRL_INTERCEPT_XSETBV;
   2013 
   2014 	/* Intercept all I/O accesses. */
   2015 	memset(cpudata->iobm, 0xFF, IOBM_SIZE);
   2016 	vmcb->ctrl.iopm_base_pa = cpudata->iobm_pa;
   2017 
   2018 	/* Allow direct access to certain MSRs. */
   2019 	memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
   2020 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, false);
   2021 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
   2022 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
   2023 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
   2024 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
   2025 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
   2026 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
   2027 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
   2028 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
   2029 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
   2030 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
   2031 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_CR_PAT, true, true);
   2032 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
   2033 	vmcb->ctrl.msrpm_base_pa = cpudata->msrbm_pa;
   2034 
   2035 	/* Generate ASID. */
   2036 	svm_asid_alloc(vcpu);
   2037 
   2038 	/* Virtual TPR. */
   2039 	vmcb->ctrl.v = VMCB_CTRL_V_INTR_MASKING;
   2040 
   2041 	/* Enable Nested Paging. */
   2042 	vmcb->ctrl.enable1 = VMCB_CTRL_ENABLE_NP;
   2043 	vmcb->ctrl.n_cr3 = mach->vm->vm_map.pmap->pm_pdirpa[0];
   2044 
   2045 	/* Init XSAVE header. */
   2046 	cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
   2047 	cpudata->gfpu.xsh_xcomp_bv = 0;
   2048 
   2049 	/* These MSRs are static. */
   2050 	cpudata->star = rdmsr(MSR_STAR);
   2051 	cpudata->lstar = rdmsr(MSR_LSTAR);
   2052 	cpudata->cstar = rdmsr(MSR_CSTAR);
   2053 	cpudata->sfmask = rdmsr(MSR_SFMASK);
   2054 
   2055 	/* Install the RESET state. */
   2056 	memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
   2057 	    sizeof(nvmm_x86_reset_state));
   2058 	vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
   2059 	vcpu->comm->state_cached = 0;
   2060 	svm_vcpu_setstate(vcpu);
   2061 }
   2062 
   2063 static int
   2064 svm_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2065 {
   2066 	struct svm_cpudata *cpudata;
   2067 	int error;
   2068 
   2069 	/* Allocate the SVM cpudata. */
   2070 	cpudata = (struct svm_cpudata *)uvm_km_alloc(kernel_map,
   2071 	    roundup(sizeof(*cpudata), PAGE_SIZE), 0,
   2072 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
   2073 	vcpu->cpudata = cpudata;
   2074 
   2075 	/* VMCB */
   2076 	error = svm_memalloc(&cpudata->vmcb_pa, (vaddr_t *)&cpudata->vmcb,
   2077 	    VMCB_NPAGES);
   2078 	if (error)
   2079 		goto error;
   2080 
   2081 	/* I/O Bitmap */
   2082 	error = svm_memalloc(&cpudata->iobm_pa, (vaddr_t *)&cpudata->iobm,
   2083 	    IOBM_NPAGES);
   2084 	if (error)
   2085 		goto error;
   2086 
   2087 	/* MSR Bitmap */
   2088 	error = svm_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
   2089 	    MSRBM_NPAGES);
   2090 	if (error)
   2091 		goto error;
   2092 
   2093 	/* Init the VCPU info. */
   2094 	svm_vcpu_init(mach, vcpu);
   2095 
   2096 	return 0;
   2097 
   2098 error:
   2099 	if (cpudata->vmcb_pa) {
   2100 		svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb,
   2101 		    VMCB_NPAGES);
   2102 	}
   2103 	if (cpudata->iobm_pa) {
   2104 		svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm,
   2105 		    IOBM_NPAGES);
   2106 	}
   2107 	if (cpudata->msrbm_pa) {
   2108 		svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
   2109 		    MSRBM_NPAGES);
   2110 	}
   2111 	uvm_km_free(kernel_map, (vaddr_t)cpudata,
   2112 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
   2113 	return error;
   2114 }
   2115 
   2116 static void
   2117 svm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2118 {
   2119 	struct svm_cpudata *cpudata = vcpu->cpudata;
   2120 
   2121 	svm_asid_free(vcpu);
   2122 
   2123 	svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, VMCB_NPAGES);
   2124 	svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, IOBM_NPAGES);
   2125 	svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
   2126 
   2127 	uvm_km_free(kernel_map, (vaddr_t)cpudata,
   2128 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
   2129 }
   2130 
   2131 static int
   2132 svm_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
   2133 {
   2134 	struct svm_cpudata *cpudata = vcpu->cpudata;
   2135 	struct nvmm_vcpu_conf_cpuid *cpuid;
   2136 	size_t i;
   2137 
   2138 	if (__predict_false(op != NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID))) {
   2139 		return EINVAL;
   2140 	}
   2141 	cpuid = data;
   2142 
   2143 	if (__predict_false(cpuid->mask && cpuid->exit)) {
   2144 		return EINVAL;
   2145 	}
   2146 	if (__predict_false(cpuid->mask &&
   2147 	    ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
   2148 	     (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
   2149 	     (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
   2150 	     (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
   2151 		return EINVAL;
   2152 	}
   2153 
   2154 	/* If unset, delete, to restore the default behavior. */
   2155 	if (!cpuid->mask && !cpuid->exit) {
   2156 		for (i = 0; i < SVM_NCPUIDS; i++) {
   2157 			if (!cpudata->cpuidpresent[i]) {
   2158 				continue;
   2159 			}
   2160 			if (cpudata->cpuid[i].leaf == cpuid->leaf) {
   2161 				cpudata->cpuidpresent[i] = false;
   2162 			}
   2163 		}
   2164 		return 0;
   2165 	}
   2166 
   2167 	/* If already here, replace. */
   2168 	for (i = 0; i < SVM_NCPUIDS; i++) {
   2169 		if (!cpudata->cpuidpresent[i]) {
   2170 			continue;
   2171 		}
   2172 		if (cpudata->cpuid[i].leaf == cpuid->leaf) {
   2173 			memcpy(&cpudata->cpuid[i], cpuid,
   2174 			    sizeof(struct nvmm_vcpu_conf_cpuid));
   2175 			return 0;
   2176 		}
   2177 	}
   2178 
   2179 	/* Not here, insert. */
   2180 	for (i = 0; i < SVM_NCPUIDS; i++) {
   2181 		if (!cpudata->cpuidpresent[i]) {
   2182 			cpudata->cpuidpresent[i] = true;
   2183 			memcpy(&cpudata->cpuid[i], cpuid,
   2184 			    sizeof(struct nvmm_vcpu_conf_cpuid));
   2185 			return 0;
   2186 		}
   2187 	}
   2188 
   2189 	return ENOBUFS;
   2190 }
   2191 
   2192 /* -------------------------------------------------------------------------- */
   2193 
   2194 static void
   2195 svm_tlb_flush(struct pmap *pm)
   2196 {
   2197 	struct nvmm_machine *mach = pm->pm_data;
   2198 	struct svm_machdata *machdata = mach->machdata;
   2199 
   2200 	atomic_inc_64(&machdata->mach_htlb_gen);
   2201 
   2202 	/* Generates IPIs, which cause #VMEXITs. */
   2203 	pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_UPDATE);
   2204 }
   2205 
   2206 static void
   2207 svm_machine_create(struct nvmm_machine *mach)
   2208 {
   2209 	struct svm_machdata *machdata;
   2210 
   2211 	/* Fill in pmap info. */
   2212 	mach->vm->vm_map.pmap->pm_data = (void *)mach;
   2213 	mach->vm->vm_map.pmap->pm_tlb_flush = svm_tlb_flush;
   2214 
   2215 	machdata = kmem_zalloc(sizeof(struct svm_machdata), KM_SLEEP);
   2216 	mach->machdata = machdata;
   2217 
   2218 	/* Start with an hTLB flush everywhere. */
   2219 	machdata->mach_htlb_gen = 1;
   2220 }
   2221 
   2222 static void
   2223 svm_machine_destroy(struct nvmm_machine *mach)
   2224 {
   2225 	kmem_free(mach->machdata, sizeof(struct svm_machdata));
   2226 }
   2227 
   2228 static int
   2229 svm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
   2230 {
   2231 	panic("%s: impossible", __func__);
   2232 }
   2233 
   2234 /* -------------------------------------------------------------------------- */
   2235 
   2236 static bool
   2237 svm_ident(void)
   2238 {
   2239 	u_int descs[4];
   2240 	uint64_t msr;
   2241 
   2242 	if (cpu_vendor != CPUVENDOR_AMD) {
   2243 		return false;
   2244 	}
   2245 	if (!(cpu_feature[3] & CPUID_SVM)) {
   2246 		return false;
   2247 	}
   2248 
   2249 	if (curcpu()->ci_max_ext_cpuid < 0x8000000a) {
   2250 		return false;
   2251 	}
   2252 	x86_cpuid(0x8000000a, descs);
   2253 
   2254 	/* Want Nested Paging. */
   2255 	if (!(descs[3] & CPUID_AMD_SVM_NP)) {
   2256 		return false;
   2257 	}
   2258 
   2259 	/* Want nRIP. */
   2260 	if (!(descs[3] & CPUID_AMD_SVM_NRIPS)) {
   2261 		return false;
   2262 	}
   2263 
   2264 	svm_decode_assist = (descs[3] & CPUID_AMD_SVM_DecodeAssist) != 0;
   2265 
   2266 	msr = rdmsr(MSR_VMCR);
   2267 	if ((msr & VMCR_SVMED) && (msr & VMCR_LOCK)) {
   2268 		return false;
   2269 	}
   2270 
   2271 	return true;
   2272 }
   2273 
   2274 static void
   2275 svm_init_asid(uint32_t maxasid)
   2276 {
   2277 	size_t i, j, allocsz;
   2278 
   2279 	mutex_init(&svm_asidlock, MUTEX_DEFAULT, IPL_NONE);
   2280 
   2281 	/* Arbitrarily limit. */
   2282 	maxasid = uimin(maxasid, 8192);
   2283 
   2284 	svm_maxasid = maxasid;
   2285 	allocsz = roundup(maxasid, 8) / 8;
   2286 	svm_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
   2287 
   2288 	/* ASID 0 is reserved for the host. */
   2289 	svm_asidmap[0] |= __BIT(0);
   2290 
   2291 	/* ASID n-1 is special, we share it. */
   2292 	i = (maxasid - 1) / 8;
   2293 	j = (maxasid - 1) % 8;
   2294 	svm_asidmap[i] |= __BIT(j);
   2295 }
   2296 
   2297 static void
   2298 svm_change_cpu(void *arg1, void *arg2)
   2299 {
   2300 	bool enable = (bool)arg1;
   2301 	uint64_t msr;
   2302 
   2303 	msr = rdmsr(MSR_VMCR);
   2304 	if (msr & VMCR_SVMED) {
   2305 		wrmsr(MSR_VMCR, msr & ~VMCR_SVMED);
   2306 	}
   2307 
   2308 	if (!enable) {
   2309 		wrmsr(MSR_VM_HSAVE_PA, 0);
   2310 	}
   2311 
   2312 	msr = rdmsr(MSR_EFER);
   2313 	if (enable) {
   2314 		msr |= EFER_SVME;
   2315 	} else {
   2316 		msr &= ~EFER_SVME;
   2317 	}
   2318 	wrmsr(MSR_EFER, msr);
   2319 
   2320 	if (enable) {
   2321 		wrmsr(MSR_VM_HSAVE_PA, hsave[cpu_index(curcpu())].pa);
   2322 	}
   2323 }
   2324 
   2325 static void
   2326 svm_init(void)
   2327 {
   2328 	CPU_INFO_ITERATOR cii;
   2329 	struct cpu_info *ci;
   2330 	struct vm_page *pg;
   2331 	u_int descs[4];
   2332 	uint64_t xc;
   2333 
   2334 	x86_cpuid(0x8000000a, descs);
   2335 
   2336 	/* The guest TLB flush command. */
   2337 	if (descs[3] & CPUID_AMD_SVM_FlushByASID) {
   2338 		svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_GUEST;
   2339 	} else {
   2340 		svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_ALL;
   2341 	}
   2342 
   2343 	/* Init the ASID. */
   2344 	svm_init_asid(descs[1]);
   2345 
   2346 	/* Init the XCR0 mask. */
   2347 	svm_xcr0_mask = SVM_XCR0_MASK_DEFAULT & x86_xsave_features;
   2348 
   2349 	memset(hsave, 0, sizeof(hsave));
   2350 	for (CPU_INFO_FOREACH(cii, ci)) {
   2351 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
   2352 		hsave[cpu_index(ci)].pa = VM_PAGE_TO_PHYS(pg);
   2353 	}
   2354 
   2355 	xc = xc_broadcast(0, svm_change_cpu, (void *)true, NULL);
   2356 	xc_wait(xc);
   2357 }
   2358 
   2359 static void
   2360 svm_fini_asid(void)
   2361 {
   2362 	size_t allocsz;
   2363 
   2364 	allocsz = roundup(svm_maxasid, 8) / 8;
   2365 	kmem_free(svm_asidmap, allocsz);
   2366 
   2367 	mutex_destroy(&svm_asidlock);
   2368 }
   2369 
   2370 static void
   2371 svm_fini(void)
   2372 {
   2373 	uint64_t xc;
   2374 	size_t i;
   2375 
   2376 	xc = xc_broadcast(0, svm_change_cpu, (void *)false, NULL);
   2377 	xc_wait(xc);
   2378 
   2379 	for (i = 0; i < MAXCPUS; i++) {
   2380 		if (hsave[i].pa != 0)
   2381 			uvm_pagefree(PHYS_TO_VM_PAGE(hsave[i].pa));
   2382 	}
   2383 
   2384 	svm_fini_asid();
   2385 }
   2386 
   2387 static void
   2388 svm_capability(struct nvmm_capability *cap)
   2389 {
   2390 	cap->arch.xcr0_mask = svm_xcr0_mask;
   2391 	cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
   2392 	cap->arch.conf_cpuid_maxops = SVM_NCPUIDS;
   2393 }
   2394 
   2395 const struct nvmm_impl nvmm_x86_svm = {
   2396 	.ident = svm_ident,
   2397 	.init = svm_init,
   2398 	.fini = svm_fini,
   2399 	.capability = svm_capability,
   2400 	.mach_conf_max = NVMM_X86_MACH_NCONF,
   2401 	.mach_conf_sizes = NULL,
   2402 	.vcpu_conf_max = NVMM_X86_VCPU_NCONF,
   2403 	.vcpu_conf_sizes = svm_vcpu_conf_sizes,
   2404 	.state_size = sizeof(struct nvmm_x64_state),
   2405 	.machine_create = svm_machine_create,
   2406 	.machine_destroy = svm_machine_destroy,
   2407 	.machine_configure = svm_machine_configure,
   2408 	.vcpu_create = svm_vcpu_create,
   2409 	.vcpu_destroy = svm_vcpu_destroy,
   2410 	.vcpu_configure = svm_vcpu_configure,
   2411 	.vcpu_setstate = svm_vcpu_setstate,
   2412 	.vcpu_getstate = svm_vcpu_getstate,
   2413 	.vcpu_inject = svm_vcpu_inject,
   2414 	.vcpu_run = svm_vcpu_run
   2415 };
   2416