Home | History | Annotate | Line # | Download | only in x86
nvmm_x86_svm.c revision 1.66
      1 /*	$NetBSD: nvmm_x86_svm.c,v 1.66 2020/08/05 10:31:37 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.66 2020/08/05 10:31:37 maxv Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 #include <sys/kmem.h>
     39 #include <sys/cpu.h>
     40 #include <sys/xcall.h>
     41 #include <sys/mman.h>
     42 
     43 #include <uvm/uvm.h>
     44 #include <uvm/uvm_page.h>
     45 
     46 #include <x86/cputypes.h>
     47 #include <x86/specialreg.h>
     48 #include <x86/pmap.h>
     49 #include <x86/dbregs.h>
     50 #include <x86/cpu_counter.h>
     51 #include <machine/cpuvar.h>
     52 
     53 #include <dev/nvmm/nvmm.h>
     54 #include <dev/nvmm/nvmm_internal.h>
     55 #include <dev/nvmm/x86/nvmm_x86.h>
     56 
     57 int svm_vmrun(paddr_t, uint64_t *);
     58 
     59 static inline void
     60 svm_clgi(void)
     61 {
     62 	asm volatile ("clgi" ::: "memory");
     63 }
     64 
     65 static inline void
     66 svm_stgi(void)
     67 {
     68 	asm volatile ("stgi" ::: "memory");
     69 }
     70 
     71 #define	MSR_VM_HSAVE_PA	0xC0010117
     72 
     73 /* -------------------------------------------------------------------------- */
     74 
     75 #define VMCB_EXITCODE_CR0_READ		0x0000
     76 #define VMCB_EXITCODE_CR1_READ		0x0001
     77 #define VMCB_EXITCODE_CR2_READ		0x0002
     78 #define VMCB_EXITCODE_CR3_READ		0x0003
     79 #define VMCB_EXITCODE_CR4_READ		0x0004
     80 #define VMCB_EXITCODE_CR5_READ		0x0005
     81 #define VMCB_EXITCODE_CR6_READ		0x0006
     82 #define VMCB_EXITCODE_CR7_READ		0x0007
     83 #define VMCB_EXITCODE_CR8_READ		0x0008
     84 #define VMCB_EXITCODE_CR9_READ		0x0009
     85 #define VMCB_EXITCODE_CR10_READ		0x000A
     86 #define VMCB_EXITCODE_CR11_READ		0x000B
     87 #define VMCB_EXITCODE_CR12_READ		0x000C
     88 #define VMCB_EXITCODE_CR13_READ		0x000D
     89 #define VMCB_EXITCODE_CR14_READ		0x000E
     90 #define VMCB_EXITCODE_CR15_READ		0x000F
     91 #define VMCB_EXITCODE_CR0_WRITE		0x0010
     92 #define VMCB_EXITCODE_CR1_WRITE		0x0011
     93 #define VMCB_EXITCODE_CR2_WRITE		0x0012
     94 #define VMCB_EXITCODE_CR3_WRITE		0x0013
     95 #define VMCB_EXITCODE_CR4_WRITE		0x0014
     96 #define VMCB_EXITCODE_CR5_WRITE		0x0015
     97 #define VMCB_EXITCODE_CR6_WRITE		0x0016
     98 #define VMCB_EXITCODE_CR7_WRITE		0x0017
     99 #define VMCB_EXITCODE_CR8_WRITE		0x0018
    100 #define VMCB_EXITCODE_CR9_WRITE		0x0019
    101 #define VMCB_EXITCODE_CR10_WRITE	0x001A
    102 #define VMCB_EXITCODE_CR11_WRITE	0x001B
    103 #define VMCB_EXITCODE_CR12_WRITE	0x001C
    104 #define VMCB_EXITCODE_CR13_WRITE	0x001D
    105 #define VMCB_EXITCODE_CR14_WRITE	0x001E
    106 #define VMCB_EXITCODE_CR15_WRITE	0x001F
    107 #define VMCB_EXITCODE_DR0_READ		0x0020
    108 #define VMCB_EXITCODE_DR1_READ		0x0021
    109 #define VMCB_EXITCODE_DR2_READ		0x0022
    110 #define VMCB_EXITCODE_DR3_READ		0x0023
    111 #define VMCB_EXITCODE_DR4_READ		0x0024
    112 #define VMCB_EXITCODE_DR5_READ		0x0025
    113 #define VMCB_EXITCODE_DR6_READ		0x0026
    114 #define VMCB_EXITCODE_DR7_READ		0x0027
    115 #define VMCB_EXITCODE_DR8_READ		0x0028
    116 #define VMCB_EXITCODE_DR9_READ		0x0029
    117 #define VMCB_EXITCODE_DR10_READ		0x002A
    118 #define VMCB_EXITCODE_DR11_READ		0x002B
    119 #define VMCB_EXITCODE_DR12_READ		0x002C
    120 #define VMCB_EXITCODE_DR13_READ		0x002D
    121 #define VMCB_EXITCODE_DR14_READ		0x002E
    122 #define VMCB_EXITCODE_DR15_READ		0x002F
    123 #define VMCB_EXITCODE_DR0_WRITE		0x0030
    124 #define VMCB_EXITCODE_DR1_WRITE		0x0031
    125 #define VMCB_EXITCODE_DR2_WRITE		0x0032
    126 #define VMCB_EXITCODE_DR3_WRITE		0x0033
    127 #define VMCB_EXITCODE_DR4_WRITE		0x0034
    128 #define VMCB_EXITCODE_DR5_WRITE		0x0035
    129 #define VMCB_EXITCODE_DR6_WRITE		0x0036
    130 #define VMCB_EXITCODE_DR7_WRITE		0x0037
    131 #define VMCB_EXITCODE_DR8_WRITE		0x0038
    132 #define VMCB_EXITCODE_DR9_WRITE		0x0039
    133 #define VMCB_EXITCODE_DR10_WRITE	0x003A
    134 #define VMCB_EXITCODE_DR11_WRITE	0x003B
    135 #define VMCB_EXITCODE_DR12_WRITE	0x003C
    136 #define VMCB_EXITCODE_DR13_WRITE	0x003D
    137 #define VMCB_EXITCODE_DR14_WRITE	0x003E
    138 #define VMCB_EXITCODE_DR15_WRITE	0x003F
    139 #define VMCB_EXITCODE_EXCP0		0x0040
    140 #define VMCB_EXITCODE_EXCP1		0x0041
    141 #define VMCB_EXITCODE_EXCP2		0x0042
    142 #define VMCB_EXITCODE_EXCP3		0x0043
    143 #define VMCB_EXITCODE_EXCP4		0x0044
    144 #define VMCB_EXITCODE_EXCP5		0x0045
    145 #define VMCB_EXITCODE_EXCP6		0x0046
    146 #define VMCB_EXITCODE_EXCP7		0x0047
    147 #define VMCB_EXITCODE_EXCP8		0x0048
    148 #define VMCB_EXITCODE_EXCP9		0x0049
    149 #define VMCB_EXITCODE_EXCP10		0x004A
    150 #define VMCB_EXITCODE_EXCP11		0x004B
    151 #define VMCB_EXITCODE_EXCP12		0x004C
    152 #define VMCB_EXITCODE_EXCP13		0x004D
    153 #define VMCB_EXITCODE_EXCP14		0x004E
    154 #define VMCB_EXITCODE_EXCP15		0x004F
    155 #define VMCB_EXITCODE_EXCP16		0x0050
    156 #define VMCB_EXITCODE_EXCP17		0x0051
    157 #define VMCB_EXITCODE_EXCP18		0x0052
    158 #define VMCB_EXITCODE_EXCP19		0x0053
    159 #define VMCB_EXITCODE_EXCP20		0x0054
    160 #define VMCB_EXITCODE_EXCP21		0x0055
    161 #define VMCB_EXITCODE_EXCP22		0x0056
    162 #define VMCB_EXITCODE_EXCP23		0x0057
    163 #define VMCB_EXITCODE_EXCP24		0x0058
    164 #define VMCB_EXITCODE_EXCP25		0x0059
    165 #define VMCB_EXITCODE_EXCP26		0x005A
    166 #define VMCB_EXITCODE_EXCP27		0x005B
    167 #define VMCB_EXITCODE_EXCP28		0x005C
    168 #define VMCB_EXITCODE_EXCP29		0x005D
    169 #define VMCB_EXITCODE_EXCP30		0x005E
    170 #define VMCB_EXITCODE_EXCP31		0x005F
    171 #define VMCB_EXITCODE_INTR		0x0060
    172 #define VMCB_EXITCODE_NMI		0x0061
    173 #define VMCB_EXITCODE_SMI		0x0062
    174 #define VMCB_EXITCODE_INIT		0x0063
    175 #define VMCB_EXITCODE_VINTR		0x0064
    176 #define VMCB_EXITCODE_CR0_SEL_WRITE	0x0065
    177 #define VMCB_EXITCODE_IDTR_READ		0x0066
    178 #define VMCB_EXITCODE_GDTR_READ		0x0067
    179 #define VMCB_EXITCODE_LDTR_READ		0x0068
    180 #define VMCB_EXITCODE_TR_READ		0x0069
    181 #define VMCB_EXITCODE_IDTR_WRITE	0x006A
    182 #define VMCB_EXITCODE_GDTR_WRITE	0x006B
    183 #define VMCB_EXITCODE_LDTR_WRITE	0x006C
    184 #define VMCB_EXITCODE_TR_WRITE		0x006D
    185 #define VMCB_EXITCODE_RDTSC		0x006E
    186 #define VMCB_EXITCODE_RDPMC		0x006F
    187 #define VMCB_EXITCODE_PUSHF		0x0070
    188 #define VMCB_EXITCODE_POPF		0x0071
    189 #define VMCB_EXITCODE_CPUID		0x0072
    190 #define VMCB_EXITCODE_RSM		0x0073
    191 #define VMCB_EXITCODE_IRET		0x0074
    192 #define VMCB_EXITCODE_SWINT		0x0075
    193 #define VMCB_EXITCODE_INVD		0x0076
    194 #define VMCB_EXITCODE_PAUSE		0x0077
    195 #define VMCB_EXITCODE_HLT		0x0078
    196 #define VMCB_EXITCODE_INVLPG		0x0079
    197 #define VMCB_EXITCODE_INVLPGA		0x007A
    198 #define VMCB_EXITCODE_IOIO		0x007B
    199 #define VMCB_EXITCODE_MSR		0x007C
    200 #define VMCB_EXITCODE_TASK_SWITCH	0x007D
    201 #define VMCB_EXITCODE_FERR_FREEZE	0x007E
    202 #define VMCB_EXITCODE_SHUTDOWN		0x007F
    203 #define VMCB_EXITCODE_VMRUN		0x0080
    204 #define VMCB_EXITCODE_VMMCALL		0x0081
    205 #define VMCB_EXITCODE_VMLOAD		0x0082
    206 #define VMCB_EXITCODE_VMSAVE		0x0083
    207 #define VMCB_EXITCODE_STGI		0x0084
    208 #define VMCB_EXITCODE_CLGI		0x0085
    209 #define VMCB_EXITCODE_SKINIT		0x0086
    210 #define VMCB_EXITCODE_RDTSCP		0x0087
    211 #define VMCB_EXITCODE_ICEBP		0x0088
    212 #define VMCB_EXITCODE_WBINVD		0x0089
    213 #define VMCB_EXITCODE_MONITOR		0x008A
    214 #define VMCB_EXITCODE_MWAIT		0x008B
    215 #define VMCB_EXITCODE_MWAIT_CONDITIONAL	0x008C
    216 #define VMCB_EXITCODE_XSETBV		0x008D
    217 #define VMCB_EXITCODE_RDPRU		0x008E
    218 #define VMCB_EXITCODE_EFER_WRITE_TRAP	0x008F
    219 #define VMCB_EXITCODE_CR0_WRITE_TRAP	0x0090
    220 #define VMCB_EXITCODE_CR1_WRITE_TRAP	0x0091
    221 #define VMCB_EXITCODE_CR2_WRITE_TRAP	0x0092
    222 #define VMCB_EXITCODE_CR3_WRITE_TRAP	0x0093
    223 #define VMCB_EXITCODE_CR4_WRITE_TRAP	0x0094
    224 #define VMCB_EXITCODE_CR5_WRITE_TRAP	0x0095
    225 #define VMCB_EXITCODE_CR6_WRITE_TRAP	0x0096
    226 #define VMCB_EXITCODE_CR7_WRITE_TRAP	0x0097
    227 #define VMCB_EXITCODE_CR8_WRITE_TRAP	0x0098
    228 #define VMCB_EXITCODE_CR9_WRITE_TRAP	0x0099
    229 #define VMCB_EXITCODE_CR10_WRITE_TRAP	0x009A
    230 #define VMCB_EXITCODE_CR11_WRITE_TRAP	0x009B
    231 #define VMCB_EXITCODE_CR12_WRITE_TRAP	0x009C
    232 #define VMCB_EXITCODE_CR13_WRITE_TRAP	0x009D
    233 #define VMCB_EXITCODE_CR14_WRITE_TRAP	0x009E
    234 #define VMCB_EXITCODE_CR15_WRITE_TRAP	0x009F
    235 #define VMCB_EXITCODE_MCOMMIT		0x00A3
    236 #define VMCB_EXITCODE_NPF		0x0400
    237 #define VMCB_EXITCODE_AVIC_INCOMP_IPI	0x0401
    238 #define VMCB_EXITCODE_AVIC_NOACCEL	0x0402
    239 #define VMCB_EXITCODE_VMGEXIT		0x0403
    240 #define VMCB_EXITCODE_INVALID		-1ULL
    241 
    242 /* -------------------------------------------------------------------------- */
    243 
    244 struct vmcb_ctrl {
    245 	uint32_t intercept_cr;
    246 #define VMCB_CTRL_INTERCEPT_RCR(x)	__BIT( 0 + x)
    247 #define VMCB_CTRL_INTERCEPT_WCR(x)	__BIT(16 + x)
    248 
    249 	uint32_t intercept_dr;
    250 #define VMCB_CTRL_INTERCEPT_RDR(x)	__BIT( 0 + x)
    251 #define VMCB_CTRL_INTERCEPT_WDR(x)	__BIT(16 + x)
    252 
    253 	uint32_t intercept_vec;
    254 #define VMCB_CTRL_INTERCEPT_VEC(x)	__BIT(x)
    255 
    256 	uint32_t intercept_misc1;
    257 #define VMCB_CTRL_INTERCEPT_INTR	__BIT(0)
    258 #define VMCB_CTRL_INTERCEPT_NMI		__BIT(1)
    259 #define VMCB_CTRL_INTERCEPT_SMI		__BIT(2)
    260 #define VMCB_CTRL_INTERCEPT_INIT	__BIT(3)
    261 #define VMCB_CTRL_INTERCEPT_VINTR	__BIT(4)
    262 #define VMCB_CTRL_INTERCEPT_CR0_SPEC	__BIT(5)
    263 #define VMCB_CTRL_INTERCEPT_RIDTR	__BIT(6)
    264 #define VMCB_CTRL_INTERCEPT_RGDTR	__BIT(7)
    265 #define VMCB_CTRL_INTERCEPT_RLDTR	__BIT(8)
    266 #define VMCB_CTRL_INTERCEPT_RTR		__BIT(9)
    267 #define VMCB_CTRL_INTERCEPT_WIDTR	__BIT(10)
    268 #define VMCB_CTRL_INTERCEPT_WGDTR	__BIT(11)
    269 #define VMCB_CTRL_INTERCEPT_WLDTR	__BIT(12)
    270 #define VMCB_CTRL_INTERCEPT_WTR		__BIT(13)
    271 #define VMCB_CTRL_INTERCEPT_RDTSC	__BIT(14)
    272 #define VMCB_CTRL_INTERCEPT_RDPMC	__BIT(15)
    273 #define VMCB_CTRL_INTERCEPT_PUSHF	__BIT(16)
    274 #define VMCB_CTRL_INTERCEPT_POPF	__BIT(17)
    275 #define VMCB_CTRL_INTERCEPT_CPUID	__BIT(18)
    276 #define VMCB_CTRL_INTERCEPT_RSM		__BIT(19)
    277 #define VMCB_CTRL_INTERCEPT_IRET	__BIT(20)
    278 #define VMCB_CTRL_INTERCEPT_INTN	__BIT(21)
    279 #define VMCB_CTRL_INTERCEPT_INVD	__BIT(22)
    280 #define VMCB_CTRL_INTERCEPT_PAUSE	__BIT(23)
    281 #define VMCB_CTRL_INTERCEPT_HLT		__BIT(24)
    282 #define VMCB_CTRL_INTERCEPT_INVLPG	__BIT(25)
    283 #define VMCB_CTRL_INTERCEPT_INVLPGA	__BIT(26)
    284 #define VMCB_CTRL_INTERCEPT_IOIO_PROT	__BIT(27)
    285 #define VMCB_CTRL_INTERCEPT_MSR_PROT	__BIT(28)
    286 #define VMCB_CTRL_INTERCEPT_TASKSW	__BIT(29)
    287 #define VMCB_CTRL_INTERCEPT_FERR_FREEZE	__BIT(30)
    288 #define VMCB_CTRL_INTERCEPT_SHUTDOWN	__BIT(31)
    289 
    290 	uint32_t intercept_misc2;
    291 #define VMCB_CTRL_INTERCEPT_VMRUN	__BIT(0)
    292 #define VMCB_CTRL_INTERCEPT_VMMCALL	__BIT(1)
    293 #define VMCB_CTRL_INTERCEPT_VMLOAD	__BIT(2)
    294 #define VMCB_CTRL_INTERCEPT_VMSAVE	__BIT(3)
    295 #define VMCB_CTRL_INTERCEPT_STGI	__BIT(4)
    296 #define VMCB_CTRL_INTERCEPT_CLGI	__BIT(5)
    297 #define VMCB_CTRL_INTERCEPT_SKINIT	__BIT(6)
    298 #define VMCB_CTRL_INTERCEPT_RDTSCP	__BIT(7)
    299 #define VMCB_CTRL_INTERCEPT_ICEBP	__BIT(8)
    300 #define VMCB_CTRL_INTERCEPT_WBINVD	__BIT(9)
    301 #define VMCB_CTRL_INTERCEPT_MONITOR	__BIT(10)
    302 #define VMCB_CTRL_INTERCEPT_MWAIT	__BIT(11)
    303 #define VMCB_CTRL_INTERCEPT_MWAIT_ARMED	__BIT(12)
    304 #define VMCB_CTRL_INTERCEPT_XSETBV	__BIT(13)
    305 #define VMCB_CTRL_INTERCEPT_RDPRU	__BIT(14)
    306 #define VMCB_CTRL_INTERCEPT_EFER_SPEC	__BIT(15)
    307 #define VMCB_CTRL_INTERCEPT_WCR_SPEC(x)	__BIT(16 + x)
    308 
    309 	uint32_t intercept_misc3;
    310 #define VMCB_CTRL_INTERCEPT_MCOMMIT	__BIT(3)
    311 
    312 	uint8_t  rsvd1[36];
    313 	uint16_t pause_filt_thresh;
    314 	uint16_t pause_filt_cnt;
    315 	uint64_t iopm_base_pa;
    316 	uint64_t msrpm_base_pa;
    317 	uint64_t tsc_offset;
    318 	uint32_t guest_asid;
    319 
    320 	uint32_t tlb_ctrl;
    321 #define VMCB_CTRL_TLB_CTRL_FLUSH_ALL			0x01
    322 #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST			0x03
    323 #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST_NONGLOBAL	0x07
    324 
    325 	uint64_t v;
    326 #define VMCB_CTRL_V_TPR			__BITS(3,0)
    327 #define VMCB_CTRL_V_IRQ			__BIT(8)
    328 #define VMCB_CTRL_V_VGIF		__BIT(9)
    329 #define VMCB_CTRL_V_INTR_PRIO		__BITS(19,16)
    330 #define VMCB_CTRL_V_IGN_TPR		__BIT(20)
    331 #define VMCB_CTRL_V_INTR_MASKING	__BIT(24)
    332 #define VMCB_CTRL_V_GUEST_VGIF		__BIT(25)
    333 #define VMCB_CTRL_V_AVIC_EN		__BIT(31)
    334 #define VMCB_CTRL_V_INTR_VECTOR		__BITS(39,32)
    335 
    336 	uint64_t intr;
    337 #define VMCB_CTRL_INTR_SHADOW		__BIT(0)
    338 
    339 	uint64_t exitcode;
    340 	uint64_t exitinfo1;
    341 	uint64_t exitinfo2;
    342 
    343 	uint64_t exitintinfo;
    344 #define VMCB_CTRL_EXITINTINFO_VECTOR	__BITS(7,0)
    345 #define VMCB_CTRL_EXITINTINFO_TYPE	__BITS(10,8)
    346 #define VMCB_CTRL_EXITINTINFO_EV	__BIT(11)
    347 #define VMCB_CTRL_EXITINTINFO_V		__BIT(31)
    348 #define VMCB_CTRL_EXITINTINFO_ERRORCODE	__BITS(63,32)
    349 
    350 	uint64_t enable1;
    351 #define VMCB_CTRL_ENABLE_NP		__BIT(0)
    352 #define VMCB_CTRL_ENABLE_SEV		__BIT(1)
    353 #define VMCB_CTRL_ENABLE_ES_SEV		__BIT(2)
    354 #define VMCB_CTRL_ENABLE_GMET		__BIT(3)
    355 #define VMCB_CTRL_ENABLE_VTE		__BIT(5)
    356 
    357 	uint64_t avic;
    358 #define VMCB_CTRL_AVIC_APIC_BAR		__BITS(51,0)
    359 
    360 	uint64_t ghcb;
    361 
    362 	uint64_t eventinj;
    363 #define VMCB_CTRL_EVENTINJ_VECTOR	__BITS(7,0)
    364 #define VMCB_CTRL_EVENTINJ_TYPE		__BITS(10,8)
    365 #define VMCB_CTRL_EVENTINJ_EV		__BIT(11)
    366 #define VMCB_CTRL_EVENTINJ_V		__BIT(31)
    367 #define VMCB_CTRL_EVENTINJ_ERRORCODE	__BITS(63,32)
    368 
    369 	uint64_t n_cr3;
    370 
    371 	uint64_t enable2;
    372 #define VMCB_CTRL_ENABLE_LBR		__BIT(0)
    373 #define VMCB_CTRL_ENABLE_VVMSAVE	__BIT(1)
    374 
    375 	uint32_t vmcb_clean;
    376 #define VMCB_CTRL_VMCB_CLEAN_I		__BIT(0)
    377 #define VMCB_CTRL_VMCB_CLEAN_IOPM	__BIT(1)
    378 #define VMCB_CTRL_VMCB_CLEAN_ASID	__BIT(2)
    379 #define VMCB_CTRL_VMCB_CLEAN_TPR	__BIT(3)
    380 #define VMCB_CTRL_VMCB_CLEAN_NP		__BIT(4)
    381 #define VMCB_CTRL_VMCB_CLEAN_CR		__BIT(5)
    382 #define VMCB_CTRL_VMCB_CLEAN_DR		__BIT(6)
    383 #define VMCB_CTRL_VMCB_CLEAN_DT		__BIT(7)
    384 #define VMCB_CTRL_VMCB_CLEAN_SEG	__BIT(8)
    385 #define VMCB_CTRL_VMCB_CLEAN_CR2	__BIT(9)
    386 #define VMCB_CTRL_VMCB_CLEAN_LBR	__BIT(10)
    387 #define VMCB_CTRL_VMCB_CLEAN_AVIC	__BIT(11)
    388 
    389 	uint32_t rsvd2;
    390 	uint64_t nrip;
    391 	uint8_t	inst_len;
    392 	uint8_t	inst_bytes[15];
    393 	uint64_t avic_abpp;
    394 	uint64_t rsvd3;
    395 	uint64_t avic_ltp;
    396 
    397 	uint64_t avic_phys;
    398 #define VMCB_CTRL_AVIC_PHYS_TABLE_PTR	__BITS(51,12)
    399 #define VMCB_CTRL_AVIC_PHYS_MAX_INDEX	__BITS(7,0)
    400 
    401 	uint64_t rsvd4;
    402 	uint64_t vmcb_ptr;
    403 
    404 	uint8_t	pad[752];
    405 } __packed;
    406 
    407 CTASSERT(sizeof(struct vmcb_ctrl) == 1024);
    408 
    409 struct vmcb_segment {
    410 	uint16_t selector;
    411 	uint16_t attrib;	/* hidden */
    412 	uint32_t limit;		/* hidden */
    413 	uint64_t base;		/* hidden */
    414 } __packed;
    415 
    416 CTASSERT(sizeof(struct vmcb_segment) == 16);
    417 
    418 struct vmcb_state {
    419 	struct   vmcb_segment es;
    420 	struct   vmcb_segment cs;
    421 	struct   vmcb_segment ss;
    422 	struct   vmcb_segment ds;
    423 	struct   vmcb_segment fs;
    424 	struct   vmcb_segment gs;
    425 	struct   vmcb_segment gdt;
    426 	struct   vmcb_segment ldt;
    427 	struct   vmcb_segment idt;
    428 	struct   vmcb_segment tr;
    429 	uint8_t	 rsvd1[43];
    430 	uint8_t	 cpl;
    431 	uint8_t  rsvd2[4];
    432 	uint64_t efer;
    433 	uint8_t	 rsvd3[112];
    434 	uint64_t cr4;
    435 	uint64_t cr3;
    436 	uint64_t cr0;
    437 	uint64_t dr7;
    438 	uint64_t dr6;
    439 	uint64_t rflags;
    440 	uint64_t rip;
    441 	uint8_t	 rsvd4[88];
    442 	uint64_t rsp;
    443 	uint8_t	 rsvd5[24];
    444 	uint64_t rax;
    445 	uint64_t star;
    446 	uint64_t lstar;
    447 	uint64_t cstar;
    448 	uint64_t sfmask;
    449 	uint64_t kernelgsbase;
    450 	uint64_t sysenter_cs;
    451 	uint64_t sysenter_esp;
    452 	uint64_t sysenter_eip;
    453 	uint64_t cr2;
    454 	uint8_t	 rsvd6[32];
    455 	uint64_t g_pat;
    456 	uint64_t dbgctl;
    457 	uint64_t br_from;
    458 	uint64_t br_to;
    459 	uint64_t int_from;
    460 	uint64_t int_to;
    461 	uint8_t	 pad[2408];
    462 } __packed;
    463 
    464 CTASSERT(sizeof(struct vmcb_state) == 0xC00);
    465 
    466 struct vmcb {
    467 	struct vmcb_ctrl ctrl;
    468 	struct vmcb_state state;
    469 } __packed;
    470 
    471 CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
    472 CTASSERT(offsetof(struct vmcb, state) == 0x400);
    473 
    474 /* -------------------------------------------------------------------------- */
    475 
    476 static void svm_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
    477 static void svm_vcpu_state_commit(struct nvmm_cpu *);
    478 
    479 struct svm_hsave {
    480 	paddr_t pa;
    481 };
    482 
    483 static struct svm_hsave hsave[MAXCPUS];
    484 
    485 static uint8_t *svm_asidmap __read_mostly;
    486 static uint32_t svm_maxasid __read_mostly;
    487 static kmutex_t svm_asidlock __cacheline_aligned;
    488 
    489 static bool svm_decode_assist __read_mostly;
    490 static uint32_t svm_ctrl_tlb_flush __read_mostly;
    491 
    492 #define SVM_XCR0_MASK_DEFAULT	(XCR0_X87|XCR0_SSE)
    493 static uint64_t svm_xcr0_mask __read_mostly;
    494 
    495 #define SVM_NCPUIDS	32
    496 
    497 #define VMCB_NPAGES	1
    498 
    499 #define MSRBM_NPAGES	2
    500 #define MSRBM_SIZE	(MSRBM_NPAGES * PAGE_SIZE)
    501 
    502 #define IOBM_NPAGES	3
    503 #define IOBM_SIZE	(IOBM_NPAGES * PAGE_SIZE)
    504 
    505 /* Does not include EFER_LMSLE. */
    506 #define EFER_VALID \
    507 	(EFER_SCE|EFER_LME|EFER_LMA|EFER_NXE|EFER_SVME|EFER_FFXSR|EFER_TCE)
    508 
    509 #define EFER_TLB_FLUSH \
    510 	(EFER_NXE|EFER_LMA|EFER_LME)
    511 #define CR0_TLB_FLUSH \
    512 	(CR0_PG|CR0_WP|CR0_CD|CR0_NW)
    513 #define CR4_TLB_FLUSH \
    514 	(CR4_PGE|CR4_PAE|CR4_PSE)
    515 
    516 /* -------------------------------------------------------------------------- */
    517 
    518 struct svm_machdata {
    519 	volatile uint64_t mach_htlb_gen;
    520 };
    521 
    522 static const size_t svm_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
    523 	[NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
    524 	    sizeof(struct nvmm_vcpu_conf_cpuid),
    525 	[NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] =
    526 	    sizeof(struct nvmm_vcpu_conf_tpr)
    527 };
    528 
    529 struct svm_cpudata {
    530 	/* General */
    531 	bool shared_asid;
    532 	bool gtlb_want_flush;
    533 	bool gtsc_want_update;
    534 	uint64_t vcpu_htlb_gen;
    535 
    536 	/* VMCB */
    537 	struct vmcb *vmcb;
    538 	paddr_t vmcb_pa;
    539 
    540 	/* I/O bitmap */
    541 	uint8_t *iobm;
    542 	paddr_t iobm_pa;
    543 
    544 	/* MSR bitmap */
    545 	uint8_t *msrbm;
    546 	paddr_t msrbm_pa;
    547 
    548 	/* Host state */
    549 	uint64_t hxcr0;
    550 	uint64_t star;
    551 	uint64_t lstar;
    552 	uint64_t cstar;
    553 	uint64_t sfmask;
    554 	uint64_t fsbase;
    555 	uint64_t kernelgsbase;
    556 
    557 	/* Intr state */
    558 	bool int_window_exit;
    559 	bool nmi_window_exit;
    560 	bool evt_pending;
    561 
    562 	/* Guest state */
    563 	uint64_t gxcr0;
    564 	uint64_t gprs[NVMM_X64_NGPR];
    565 	uint64_t drs[NVMM_X64_NDR];
    566 	uint64_t gtsc;
    567 	struct xsave_header gfpu __aligned(64);
    568 
    569 	/* VCPU configuration. */
    570 	bool cpuidpresent[SVM_NCPUIDS];
    571 	struct nvmm_vcpu_conf_cpuid cpuid[SVM_NCPUIDS];
    572 };
    573 
    574 static void
    575 svm_vmcb_cache_default(struct vmcb *vmcb)
    576 {
    577 	vmcb->ctrl.vmcb_clean =
    578 	    VMCB_CTRL_VMCB_CLEAN_I |
    579 	    VMCB_CTRL_VMCB_CLEAN_IOPM |
    580 	    VMCB_CTRL_VMCB_CLEAN_ASID |
    581 	    VMCB_CTRL_VMCB_CLEAN_TPR |
    582 	    VMCB_CTRL_VMCB_CLEAN_NP |
    583 	    VMCB_CTRL_VMCB_CLEAN_CR |
    584 	    VMCB_CTRL_VMCB_CLEAN_DR |
    585 	    VMCB_CTRL_VMCB_CLEAN_DT |
    586 	    VMCB_CTRL_VMCB_CLEAN_SEG |
    587 	    VMCB_CTRL_VMCB_CLEAN_CR2 |
    588 	    VMCB_CTRL_VMCB_CLEAN_LBR |
    589 	    VMCB_CTRL_VMCB_CLEAN_AVIC;
    590 }
    591 
    592 static void
    593 svm_vmcb_cache_update(struct vmcb *vmcb, uint64_t flags)
    594 {
    595 	if (flags & NVMM_X64_STATE_SEGS) {
    596 		vmcb->ctrl.vmcb_clean &=
    597 		    ~(VMCB_CTRL_VMCB_CLEAN_SEG | VMCB_CTRL_VMCB_CLEAN_DT);
    598 	}
    599 	if (flags & NVMM_X64_STATE_CRS) {
    600 		vmcb->ctrl.vmcb_clean &=
    601 		    ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_CR2 |
    602 		      VMCB_CTRL_VMCB_CLEAN_TPR);
    603 	}
    604 	if (flags & NVMM_X64_STATE_DRS) {
    605 		vmcb->ctrl.vmcb_clean &= ~VMCB_CTRL_VMCB_CLEAN_DR;
    606 	}
    607 	if (flags & NVMM_X64_STATE_MSRS) {
    608 		/* CR for EFER, NP for PAT. */
    609 		vmcb->ctrl.vmcb_clean &=
    610 		    ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_NP);
    611 	}
    612 }
    613 
    614 static inline void
    615 svm_vmcb_cache_flush(struct vmcb *vmcb, uint64_t flags)
    616 {
    617 	vmcb->ctrl.vmcb_clean &= ~flags;
    618 }
    619 
    620 static inline void
    621 svm_vmcb_cache_flush_all(struct vmcb *vmcb)
    622 {
    623 	vmcb->ctrl.vmcb_clean = 0;
    624 }
    625 
    626 #define SVM_EVENT_TYPE_HW_INT	0
    627 #define SVM_EVENT_TYPE_NMI	2
    628 #define SVM_EVENT_TYPE_EXC	3
    629 #define SVM_EVENT_TYPE_SW_INT	4
    630 
    631 static void
    632 svm_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
    633 {
    634 	struct svm_cpudata *cpudata = vcpu->cpudata;
    635 	struct vmcb *vmcb = cpudata->vmcb;
    636 
    637 	if (nmi) {
    638 		vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_IRET;
    639 		cpudata->nmi_window_exit = true;
    640 	} else {
    641 		vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_VINTR;
    642 		vmcb->ctrl.v |= (VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR);
    643 		svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR);
    644 		cpudata->int_window_exit = true;
    645 	}
    646 
    647 	svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
    648 }
    649 
    650 static void
    651 svm_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
    652 {
    653 	struct svm_cpudata *cpudata = vcpu->cpudata;
    654 	struct vmcb *vmcb = cpudata->vmcb;
    655 
    656 	if (nmi) {
    657 		vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_IRET;
    658 		cpudata->nmi_window_exit = false;
    659 	} else {
    660 		vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_VINTR;
    661 		vmcb->ctrl.v &= ~(VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR);
    662 		svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR);
    663 		cpudata->int_window_exit = false;
    664 	}
    665 
    666 	svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
    667 }
    668 
    669 static inline int
    670 svm_event_has_error(uint8_t vector)
    671 {
    672 	switch (vector) {
    673 	case 8:		/* #DF */
    674 	case 10:	/* #TS */
    675 	case 11:	/* #NP */
    676 	case 12:	/* #SS */
    677 	case 13:	/* #GP */
    678 	case 14:	/* #PF */
    679 	case 17:	/* #AC */
    680 	case 30:	/* #SX */
    681 		return 1;
    682 	default:
    683 		return 0;
    684 	}
    685 }
    686 
    687 static int
    688 svm_vcpu_inject(struct nvmm_cpu *vcpu)
    689 {
    690 	struct nvmm_comm_page *comm = vcpu->comm;
    691 	struct svm_cpudata *cpudata = vcpu->cpudata;
    692 	struct vmcb *vmcb = cpudata->vmcb;
    693 	u_int evtype;
    694 	uint8_t vector;
    695 	uint64_t error;
    696 	int type = 0, err = 0;
    697 
    698 	evtype = comm->event.type;
    699 	vector = comm->event.vector;
    700 	error = comm->event.u.excp.error;
    701 	__insn_barrier();
    702 
    703 	switch (evtype) {
    704 	case NVMM_VCPU_EVENT_EXCP:
    705 		type = SVM_EVENT_TYPE_EXC;
    706 		if (vector == 2 || vector >= 32)
    707 			return EINVAL;
    708 		if (vector == 3 || vector == 0)
    709 			return EINVAL;
    710 		err = svm_event_has_error(vector);
    711 		break;
    712 	case NVMM_VCPU_EVENT_INTR:
    713 		type = SVM_EVENT_TYPE_HW_INT;
    714 		if (vector == 2) {
    715 			type = SVM_EVENT_TYPE_NMI;
    716 			svm_event_waitexit_enable(vcpu, true);
    717 		}
    718 		err = 0;
    719 		break;
    720 	default:
    721 		return EINVAL;
    722 	}
    723 
    724 	vmcb->ctrl.eventinj =
    725 	    __SHIFTIN((uint64_t)vector, VMCB_CTRL_EVENTINJ_VECTOR) |
    726 	    __SHIFTIN((uint64_t)type, VMCB_CTRL_EVENTINJ_TYPE) |
    727 	    __SHIFTIN((uint64_t)err, VMCB_CTRL_EVENTINJ_EV) |
    728 	    __SHIFTIN((uint64_t)1, VMCB_CTRL_EVENTINJ_V) |
    729 	    __SHIFTIN((uint64_t)error, VMCB_CTRL_EVENTINJ_ERRORCODE);
    730 
    731 	cpudata->evt_pending = true;
    732 
    733 	return 0;
    734 }
    735 
    736 static void
    737 svm_inject_ud(struct nvmm_cpu *vcpu)
    738 {
    739 	struct nvmm_comm_page *comm = vcpu->comm;
    740 	int ret __diagused;
    741 
    742 	comm->event.type = NVMM_VCPU_EVENT_EXCP;
    743 	comm->event.vector = 6;
    744 	comm->event.u.excp.error = 0;
    745 
    746 	ret = svm_vcpu_inject(vcpu);
    747 	KASSERT(ret == 0);
    748 }
    749 
    750 static void
    751 svm_inject_gp(struct nvmm_cpu *vcpu)
    752 {
    753 	struct nvmm_comm_page *comm = vcpu->comm;
    754 	int ret __diagused;
    755 
    756 	comm->event.type = NVMM_VCPU_EVENT_EXCP;
    757 	comm->event.vector = 13;
    758 	comm->event.u.excp.error = 0;
    759 
    760 	ret = svm_vcpu_inject(vcpu);
    761 	KASSERT(ret == 0);
    762 }
    763 
    764 static inline int
    765 svm_vcpu_event_commit(struct nvmm_cpu *vcpu)
    766 {
    767 	if (__predict_true(!vcpu->comm->event_commit)) {
    768 		return 0;
    769 	}
    770 	vcpu->comm->event_commit = false;
    771 	return svm_vcpu_inject(vcpu);
    772 }
    773 
    774 static inline void
    775 svm_inkernel_advance(struct vmcb *vmcb)
    776 {
    777 	/*
    778 	 * Maybe we should also apply single-stepping and debug exceptions.
    779 	 * Matters for guest-ring3, because it can execute 'cpuid' under a
    780 	 * debugger.
    781 	 */
    782 	vmcb->state.rip = vmcb->ctrl.nrip;
    783 	vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW;
    784 }
    785 
    786 #define SVM_CPUID_MAX_HYPERVISOR	0x40000000
    787 
    788 static void
    789 svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx)
    790 {
    791 	struct svm_cpudata *cpudata = vcpu->cpudata;
    792 	uint64_t cr4;
    793 
    794 	switch (eax) {
    795 	case 0x00000001:
    796 		cpudata->vmcb->state.rax &= nvmm_cpuid_00000001.eax;
    797 
    798 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
    799 		cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
    800 		    CPUID_LOCAL_APIC_ID);
    801 
    802 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
    803 		cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
    804 
    805 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
    806 
    807 		/* CPUID2_OSXSAVE depends on CR4. */
    808 		cr4 = cpudata->vmcb->state.cr4;
    809 		if (!(cr4 & CR4_OSXSAVE)) {
    810 			cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
    811 		}
    812 		break;
    813 	case 0x00000002: /* Empty */
    814 	case 0x00000003: /* Empty */
    815 	case 0x00000004: /* Empty */
    816 	case 0x00000005: /* Monitor/MWait */
    817 	case 0x00000006: /* Power Management Related Features */
    818 		cpudata->vmcb->state.rax = 0;
    819 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
    820 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
    821 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
    822 		break;
    823 	case 0x00000007: /* Structured Extended Features */
    824 		cpudata->vmcb->state.rax &= nvmm_cpuid_00000007.eax;
    825 		cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
    826 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
    827 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
    828 		break;
    829 	case 0x00000008: /* Empty */
    830 	case 0x00000009: /* Empty */
    831 	case 0x0000000A: /* Empty */
    832 	case 0x0000000B: /* Empty */
    833 	case 0x0000000C: /* Empty */
    834 		cpudata->vmcb->state.rax = 0;
    835 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
    836 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
    837 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
    838 		break;
    839 	case 0x0000000D: /* Processor Extended State Enumeration */
    840 		if (svm_xcr0_mask == 0) {
    841 			break;
    842 		}
    843 		switch (ecx) {
    844 		case 0:
    845 			cpudata->vmcb->state.rax = svm_xcr0_mask & 0xFFFFFFFF;
    846 			if (cpudata->gxcr0 & XCR0_SSE) {
    847 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
    848 			} else {
    849 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
    850 			}
    851 			cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
    852 			cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64;
    853 			cpudata->gprs[NVMM_X64_GPR_RDX] = svm_xcr0_mask >> 32;
    854 			break;
    855 		case 1:
    856 			cpudata->vmcb->state.rax &=
    857 			    (CPUID_PES1_XSAVEOPT | CPUID_PES1_XSAVEC |
    858 			     CPUID_PES1_XGETBV);
    859 			cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
    860 			cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
    861 			cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
    862 			break;
    863 		default:
    864 			cpudata->vmcb->state.rax = 0;
    865 			cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
    866 			cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
    867 			cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
    868 			break;
    869 		}
    870 		break;
    871 
    872 	case 0x40000000: /* Hypervisor Information */
    873 		cpudata->vmcb->state.rax = SVM_CPUID_MAX_HYPERVISOR;
    874 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
    875 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
    876 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
    877 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
    878 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
    879 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
    880 		break;
    881 
    882 	case 0x80000001:
    883 		cpudata->vmcb->state.rax &= nvmm_cpuid_80000001.eax;
    884 		cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
    885 		cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
    886 		cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
    887 		break;
    888 	default:
    889 		break;
    890 	}
    891 }
    892 
    893 static void
    894 svm_exit_insn(struct vmcb *vmcb, struct nvmm_vcpu_exit *exit, uint64_t reason)
    895 {
    896 	exit->u.insn.npc = vmcb->ctrl.nrip;
    897 	exit->reason = reason;
    898 }
    899 
    900 static void
    901 svm_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    902     struct nvmm_vcpu_exit *exit)
    903 {
    904 	struct svm_cpudata *cpudata = vcpu->cpudata;
    905 	struct nvmm_vcpu_conf_cpuid *cpuid;
    906 	uint64_t eax, ecx;
    907 	u_int descs[4];
    908 	size_t i;
    909 
    910 	eax = cpudata->vmcb->state.rax;
    911 	ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
    912 	x86_cpuid2(eax, ecx, descs);
    913 
    914 	cpudata->vmcb->state.rax = descs[0];
    915 	cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
    916 	cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
    917 	cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
    918 
    919 	svm_inkernel_handle_cpuid(vcpu, eax, ecx);
    920 
    921 	for (i = 0; i < SVM_NCPUIDS; i++) {
    922 		if (!cpudata->cpuidpresent[i]) {
    923 			continue;
    924 		}
    925 		cpuid = &cpudata->cpuid[i];
    926 		if (cpuid->leaf != eax) {
    927 			continue;
    928 		}
    929 
    930 		if (cpuid->exit) {
    931 			svm_exit_insn(cpudata->vmcb, exit, NVMM_VCPU_EXIT_CPUID);
    932 			return;
    933 		}
    934 		KASSERT(cpuid->mask);
    935 
    936 		/* del */
    937 		cpudata->vmcb->state.rax &= ~cpuid->u.mask.del.eax;
    938 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
    939 		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
    940 		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
    941 
    942 		/* set */
    943 		cpudata->vmcb->state.rax |= cpuid->u.mask.set.eax;
    944 		cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
    945 		cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
    946 		cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
    947 
    948 		break;
    949 	}
    950 
    951 	svm_inkernel_advance(cpudata->vmcb);
    952 	exit->reason = NVMM_VCPU_EXIT_NONE;
    953 }
    954 
    955 static void
    956 svm_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    957     struct nvmm_vcpu_exit *exit)
    958 {
    959 	struct svm_cpudata *cpudata = vcpu->cpudata;
    960 	struct vmcb *vmcb = cpudata->vmcb;
    961 
    962 	if (cpudata->int_window_exit && (vmcb->state.rflags & PSL_I)) {
    963 		svm_event_waitexit_disable(vcpu, false);
    964 	}
    965 
    966 	svm_inkernel_advance(cpudata->vmcb);
    967 	exit->reason = NVMM_VCPU_EXIT_HALTED;
    968 }
    969 
    970 #define SVM_EXIT_IO_PORT	__BITS(31,16)
    971 #define SVM_EXIT_IO_SEG		__BITS(12,10)
    972 #define SVM_EXIT_IO_A64		__BIT(9)
    973 #define SVM_EXIT_IO_A32		__BIT(8)
    974 #define SVM_EXIT_IO_A16		__BIT(7)
    975 #define SVM_EXIT_IO_SZ32	__BIT(6)
    976 #define SVM_EXIT_IO_SZ16	__BIT(5)
    977 #define SVM_EXIT_IO_SZ8		__BIT(4)
    978 #define SVM_EXIT_IO_REP		__BIT(3)
    979 #define SVM_EXIT_IO_STR		__BIT(2)
    980 #define SVM_EXIT_IO_IN		__BIT(0)
    981 
    982 static void
    983 svm_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    984     struct nvmm_vcpu_exit *exit)
    985 {
    986 	struct svm_cpudata *cpudata = vcpu->cpudata;
    987 	uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
    988 	uint64_t nextpc = cpudata->vmcb->ctrl.exitinfo2;
    989 
    990 	exit->reason = NVMM_VCPU_EXIT_IO;
    991 
    992 	exit->u.io.in = (info & SVM_EXIT_IO_IN) != 0;
    993 	exit->u.io.port = __SHIFTOUT(info, SVM_EXIT_IO_PORT);
    994 
    995 	if (svm_decode_assist) {
    996 		KASSERT(__SHIFTOUT(info, SVM_EXIT_IO_SEG) < 6);
    997 		exit->u.io.seg = __SHIFTOUT(info, SVM_EXIT_IO_SEG);
    998 	} else {
    999 		exit->u.io.seg = -1;
   1000 	}
   1001 
   1002 	if (info & SVM_EXIT_IO_A64) {
   1003 		exit->u.io.address_size = 8;
   1004 	} else if (info & SVM_EXIT_IO_A32) {
   1005 		exit->u.io.address_size = 4;
   1006 	} else if (info & SVM_EXIT_IO_A16) {
   1007 		exit->u.io.address_size = 2;
   1008 	}
   1009 
   1010 	if (info & SVM_EXIT_IO_SZ32) {
   1011 		exit->u.io.operand_size = 4;
   1012 	} else if (info & SVM_EXIT_IO_SZ16) {
   1013 		exit->u.io.operand_size = 2;
   1014 	} else if (info & SVM_EXIT_IO_SZ8) {
   1015 		exit->u.io.operand_size = 1;
   1016 	}
   1017 
   1018 	exit->u.io.rep = (info & SVM_EXIT_IO_REP) != 0;
   1019 	exit->u.io.str = (info & SVM_EXIT_IO_STR) != 0;
   1020 	exit->u.io.npc = nextpc;
   1021 
   1022 	svm_vcpu_state_provide(vcpu,
   1023 	    NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
   1024 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
   1025 }
   1026 
   1027 static const uint64_t msr_ignore_list[] = {
   1028 	0xc0010055, /* MSR_CMPHALT */
   1029 	MSR_DE_CFG,
   1030 	MSR_IC_CFG,
   1031 	MSR_UCODE_AMD_PATCHLEVEL
   1032 };
   1033 
   1034 static bool
   1035 svm_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1036     struct nvmm_vcpu_exit *exit)
   1037 {
   1038 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1039 	struct vmcb *vmcb = cpudata->vmcb;
   1040 	uint64_t val;
   1041 	size_t i;
   1042 
   1043 	if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
   1044 		if (exit->u.rdmsr.msr == MSR_NB_CFG) {
   1045 			val = NB_CFG_INITAPICCPUIDLO;
   1046 			vmcb->state.rax = (val & 0xFFFFFFFF);
   1047 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1048 			goto handled;
   1049 		}
   1050 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
   1051 			if (msr_ignore_list[i] != exit->u.rdmsr.msr)
   1052 				continue;
   1053 			val = 0;
   1054 			vmcb->state.rax = (val & 0xFFFFFFFF);
   1055 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
   1056 			goto handled;
   1057 		}
   1058 	} else {
   1059 		if (exit->u.wrmsr.msr == MSR_EFER) {
   1060 			if (__predict_false(exit->u.wrmsr.val & ~EFER_VALID)) {
   1061 				goto error;
   1062 			}
   1063 			if ((vmcb->state.efer ^ exit->u.wrmsr.val) &
   1064 			     EFER_TLB_FLUSH) {
   1065 				cpudata->gtlb_want_flush = true;
   1066 			}
   1067 			vmcb->state.efer = exit->u.wrmsr.val | EFER_SVME;
   1068 			svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_CR);
   1069 			goto handled;
   1070 		}
   1071 		if (exit->u.wrmsr.msr == MSR_TSC) {
   1072 			cpudata->gtsc = exit->u.wrmsr.val;
   1073 			cpudata->gtsc_want_update = true;
   1074 			goto handled;
   1075 		}
   1076 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
   1077 			if (msr_ignore_list[i] != exit->u.wrmsr.msr)
   1078 				continue;
   1079 			goto handled;
   1080 		}
   1081 	}
   1082 
   1083 	return false;
   1084 
   1085 handled:
   1086 	svm_inkernel_advance(cpudata->vmcb);
   1087 	return true;
   1088 
   1089 error:
   1090 	svm_inject_gp(vcpu);
   1091 	return true;
   1092 }
   1093 
   1094 static inline void
   1095 svm_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1096     struct nvmm_vcpu_exit *exit)
   1097 {
   1098 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1099 
   1100 	exit->reason = NVMM_VCPU_EXIT_RDMSR;
   1101 	exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
   1102 	exit->u.rdmsr.npc = cpudata->vmcb->ctrl.nrip;
   1103 
   1104 	if (svm_inkernel_handle_msr(mach, vcpu, exit)) {
   1105 		exit->reason = NVMM_VCPU_EXIT_NONE;
   1106 		return;
   1107 	}
   1108 
   1109 	svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
   1110 }
   1111 
   1112 static inline void
   1113 svm_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1114     struct nvmm_vcpu_exit *exit)
   1115 {
   1116 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1117 	uint64_t rdx, rax;
   1118 
   1119 	rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
   1120 	rax = cpudata->vmcb->state.rax;
   1121 
   1122 	exit->reason = NVMM_VCPU_EXIT_WRMSR;
   1123 	exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
   1124 	exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
   1125 	exit->u.wrmsr.npc = cpudata->vmcb->ctrl.nrip;
   1126 
   1127 	if (svm_inkernel_handle_msr(mach, vcpu, exit)) {
   1128 		exit->reason = NVMM_VCPU_EXIT_NONE;
   1129 		return;
   1130 	}
   1131 
   1132 	svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
   1133 }
   1134 
   1135 static void
   1136 svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1137     struct nvmm_vcpu_exit *exit)
   1138 {
   1139 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1140 	uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
   1141 
   1142 	if (info == 0) {
   1143 		svm_exit_rdmsr(mach, vcpu, exit);
   1144 	} else {
   1145 		svm_exit_wrmsr(mach, vcpu, exit);
   1146 	}
   1147 }
   1148 
   1149 static void
   1150 svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1151     struct nvmm_vcpu_exit *exit)
   1152 {
   1153 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1154 	gpaddr_t gpa = cpudata->vmcb->ctrl.exitinfo2;
   1155 
   1156 	exit->reason = NVMM_VCPU_EXIT_MEMORY;
   1157 	if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_W)
   1158 		exit->u.mem.prot = PROT_WRITE;
   1159 	else if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_X)
   1160 		exit->u.mem.prot = PROT_EXEC;
   1161 	else
   1162 		exit->u.mem.prot = PROT_READ;
   1163 	exit->u.mem.gpa = gpa;
   1164 	exit->u.mem.inst_len = cpudata->vmcb->ctrl.inst_len;
   1165 	memcpy(exit->u.mem.inst_bytes, cpudata->vmcb->ctrl.inst_bytes,
   1166 	    sizeof(exit->u.mem.inst_bytes));
   1167 
   1168 	svm_vcpu_state_provide(vcpu,
   1169 	    NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
   1170 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
   1171 }
   1172 
   1173 static void
   1174 svm_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1175     struct nvmm_vcpu_exit *exit)
   1176 {
   1177 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1178 	struct vmcb *vmcb = cpudata->vmcb;
   1179 	uint64_t val;
   1180 
   1181 	exit->reason = NVMM_VCPU_EXIT_NONE;
   1182 
   1183 	val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
   1184 	    (vmcb->state.rax & 0xFFFFFFFF);
   1185 
   1186 	if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
   1187 		goto error;
   1188 	} else if (__predict_false(vmcb->state.cpl != 0)) {
   1189 		goto error;
   1190 	} else if (__predict_false((val & ~svm_xcr0_mask) != 0)) {
   1191 		goto error;
   1192 	} else if (__predict_false((val & XCR0_X87) == 0)) {
   1193 		goto error;
   1194 	}
   1195 
   1196 	cpudata->gxcr0 = val;
   1197 	if (svm_xcr0_mask != 0) {
   1198 		wrxcr(0, cpudata->gxcr0);
   1199 	}
   1200 
   1201 	svm_inkernel_advance(cpudata->vmcb);
   1202 	return;
   1203 
   1204 error:
   1205 	svm_inject_gp(vcpu);
   1206 }
   1207 
   1208 static void
   1209 svm_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
   1210 {
   1211 	exit->u.inv.hwcode = code;
   1212 	exit->reason = NVMM_VCPU_EXIT_INVALID;
   1213 }
   1214 
   1215 /* -------------------------------------------------------------------------- */
   1216 
   1217 static void
   1218 svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
   1219 {
   1220 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1221 
   1222 	fpu_kern_enter();
   1223 	fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask);
   1224 
   1225 	if (svm_xcr0_mask != 0) {
   1226 		cpudata->hxcr0 = rdxcr(0);
   1227 		wrxcr(0, cpudata->gxcr0);
   1228 	}
   1229 }
   1230 
   1231 static void
   1232 svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
   1233 {
   1234 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1235 
   1236 	if (svm_xcr0_mask != 0) {
   1237 		cpudata->gxcr0 = rdxcr(0);
   1238 		wrxcr(0, cpudata->hxcr0);
   1239 	}
   1240 
   1241 	fpu_area_save(&cpudata->gfpu, svm_xcr0_mask);
   1242 	fpu_kern_leave();
   1243 }
   1244 
   1245 static void
   1246 svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
   1247 {
   1248 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1249 
   1250 	x86_dbregs_save(curlwp);
   1251 
   1252 	ldr7(0);
   1253 
   1254 	ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
   1255 	ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
   1256 	ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
   1257 	ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
   1258 }
   1259 
   1260 static void
   1261 svm_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
   1262 {
   1263 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1264 
   1265 	cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
   1266 	cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
   1267 	cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
   1268 	cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
   1269 
   1270 	x86_dbregs_restore(curlwp);
   1271 }
   1272 
   1273 static void
   1274 svm_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
   1275 {
   1276 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1277 
   1278 	cpudata->fsbase = rdmsr(MSR_FSBASE);
   1279 	cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
   1280 }
   1281 
   1282 static void
   1283 svm_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
   1284 {
   1285 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1286 
   1287 	wrmsr(MSR_STAR, cpudata->star);
   1288 	wrmsr(MSR_LSTAR, cpudata->lstar);
   1289 	wrmsr(MSR_CSTAR, cpudata->cstar);
   1290 	wrmsr(MSR_SFMASK, cpudata->sfmask);
   1291 	wrmsr(MSR_FSBASE, cpudata->fsbase);
   1292 	wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
   1293 }
   1294 
   1295 /* -------------------------------------------------------------------------- */
   1296 
   1297 static inline void
   1298 svm_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1299 {
   1300 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1301 
   1302 	if (vcpu->hcpu_last != hcpu || cpudata->shared_asid) {
   1303 		cpudata->gtlb_want_flush = true;
   1304 	}
   1305 }
   1306 
   1307 static inline void
   1308 svm_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1309 {
   1310 	/*
   1311 	 * Nothing to do. If an hTLB flush was needed, either the VCPU was
   1312 	 * executing on this hCPU and the hTLB already got flushed, or it
   1313 	 * was executing on another hCPU in which case the catchup is done
   1314 	 * in svm_gtlb_catchup().
   1315 	 */
   1316 }
   1317 
   1318 static inline uint64_t
   1319 svm_htlb_flush(struct svm_machdata *machdata, struct svm_cpudata *cpudata)
   1320 {
   1321 	struct vmcb *vmcb = cpudata->vmcb;
   1322 	uint64_t machgen;
   1323 
   1324 	machgen = machdata->mach_htlb_gen;
   1325 	if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
   1326 		return machgen;
   1327 	}
   1328 
   1329 	vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush;
   1330 	return machgen;
   1331 }
   1332 
   1333 static inline void
   1334 svm_htlb_flush_ack(struct svm_cpudata *cpudata, uint64_t machgen)
   1335 {
   1336 	struct vmcb *vmcb = cpudata->vmcb;
   1337 
   1338 	if (__predict_true(vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID)) {
   1339 		cpudata->vcpu_htlb_gen = machgen;
   1340 	}
   1341 }
   1342 
   1343 static inline void
   1344 svm_exit_evt(struct svm_cpudata *cpudata, struct vmcb *vmcb)
   1345 {
   1346 	cpudata->evt_pending = false;
   1347 
   1348 	if (__predict_false(vmcb->ctrl.exitintinfo & VMCB_CTRL_EXITINTINFO_V)) {
   1349 		vmcb->ctrl.eventinj = vmcb->ctrl.exitintinfo;
   1350 		cpudata->evt_pending = true;
   1351 	}
   1352 }
   1353 
   1354 static int
   1355 svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1356     struct nvmm_vcpu_exit *exit)
   1357 {
   1358 	struct nvmm_comm_page *comm = vcpu->comm;
   1359 	struct svm_machdata *machdata = mach->machdata;
   1360 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1361 	struct vmcb *vmcb = cpudata->vmcb;
   1362 	uint64_t machgen;
   1363 	int hcpu;
   1364 
   1365 	if (__predict_false(svm_vcpu_event_commit(vcpu) != 0)) {
   1366 		return EINVAL;
   1367 	}
   1368 	svm_vcpu_state_commit(vcpu);
   1369 	comm->state_cached = 0;
   1370 
   1371 	kpreempt_disable();
   1372 	hcpu = cpu_number();
   1373 
   1374 	svm_gtlb_catchup(vcpu, hcpu);
   1375 	svm_htlb_catchup(vcpu, hcpu);
   1376 
   1377 	if (vcpu->hcpu_last != hcpu) {
   1378 		svm_vmcb_cache_flush_all(vmcb);
   1379 		cpudata->gtsc_want_update = true;
   1380 	}
   1381 
   1382 	svm_vcpu_guest_dbregs_enter(vcpu);
   1383 	svm_vcpu_guest_misc_enter(vcpu);
   1384 	svm_vcpu_guest_fpu_enter(vcpu);
   1385 
   1386 	while (1) {
   1387 		if (cpudata->gtlb_want_flush) {
   1388 			vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush;
   1389 		} else {
   1390 			vmcb->ctrl.tlb_ctrl = 0;
   1391 		}
   1392 
   1393 		if (__predict_false(cpudata->gtsc_want_update)) {
   1394 			vmcb->ctrl.tsc_offset = cpudata->gtsc - rdtsc();
   1395 			svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
   1396 		}
   1397 
   1398 		svm_clgi();
   1399 		machgen = svm_htlb_flush(machdata, cpudata);
   1400 		svm_vmrun(cpudata->vmcb_pa, cpudata->gprs);
   1401 		svm_htlb_flush_ack(cpudata, machgen);
   1402 		svm_stgi();
   1403 
   1404 		svm_vmcb_cache_default(vmcb);
   1405 
   1406 		if (vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID) {
   1407 			cpudata->gtlb_want_flush = false;
   1408 			cpudata->gtsc_want_update = false;
   1409 			vcpu->hcpu_last = hcpu;
   1410 		}
   1411 		svm_exit_evt(cpudata, vmcb);
   1412 
   1413 		switch (vmcb->ctrl.exitcode) {
   1414 		case VMCB_EXITCODE_INTR:
   1415 		case VMCB_EXITCODE_NMI:
   1416 			exit->reason = NVMM_VCPU_EXIT_NONE;
   1417 			break;
   1418 		case VMCB_EXITCODE_VINTR:
   1419 			svm_event_waitexit_disable(vcpu, false);
   1420 			exit->reason = NVMM_VCPU_EXIT_INT_READY;
   1421 			break;
   1422 		case VMCB_EXITCODE_IRET:
   1423 			svm_event_waitexit_disable(vcpu, true);
   1424 			exit->reason = NVMM_VCPU_EXIT_NMI_READY;
   1425 			break;
   1426 		case VMCB_EXITCODE_CPUID:
   1427 			svm_exit_cpuid(mach, vcpu, exit);
   1428 			break;
   1429 		case VMCB_EXITCODE_HLT:
   1430 			svm_exit_hlt(mach, vcpu, exit);
   1431 			break;
   1432 		case VMCB_EXITCODE_IOIO:
   1433 			svm_exit_io(mach, vcpu, exit);
   1434 			break;
   1435 		case VMCB_EXITCODE_MSR:
   1436 			svm_exit_msr(mach, vcpu, exit);
   1437 			break;
   1438 		case VMCB_EXITCODE_SHUTDOWN:
   1439 			exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
   1440 			break;
   1441 		case VMCB_EXITCODE_RDPMC:
   1442 		case VMCB_EXITCODE_RSM:
   1443 		case VMCB_EXITCODE_INVLPGA:
   1444 		case VMCB_EXITCODE_VMRUN:
   1445 		case VMCB_EXITCODE_VMMCALL:
   1446 		case VMCB_EXITCODE_VMLOAD:
   1447 		case VMCB_EXITCODE_VMSAVE:
   1448 		case VMCB_EXITCODE_STGI:
   1449 		case VMCB_EXITCODE_CLGI:
   1450 		case VMCB_EXITCODE_SKINIT:
   1451 		case VMCB_EXITCODE_RDTSCP:
   1452 			svm_inject_ud(vcpu);
   1453 			exit->reason = NVMM_VCPU_EXIT_NONE;
   1454 			break;
   1455 		case VMCB_EXITCODE_MONITOR:
   1456 			svm_exit_insn(vmcb, exit, NVMM_VCPU_EXIT_MONITOR);
   1457 			break;
   1458 		case VMCB_EXITCODE_MWAIT:
   1459 		case VMCB_EXITCODE_MWAIT_CONDITIONAL:
   1460 			svm_exit_insn(vmcb, exit, NVMM_VCPU_EXIT_MWAIT);
   1461 			break;
   1462 		case VMCB_EXITCODE_XSETBV:
   1463 			svm_exit_xsetbv(mach, vcpu, exit);
   1464 			break;
   1465 		case VMCB_EXITCODE_NPF:
   1466 			svm_exit_npf(mach, vcpu, exit);
   1467 			break;
   1468 		case VMCB_EXITCODE_FERR_FREEZE: /* ? */
   1469 		default:
   1470 			svm_exit_invalid(exit, vmcb->ctrl.exitcode);
   1471 			break;
   1472 		}
   1473 
   1474 		/* If no reason to return to userland, keep rolling. */
   1475 		if (nvmm_return_needed()) {
   1476 			break;
   1477 		}
   1478 		if (exit->reason != NVMM_VCPU_EXIT_NONE) {
   1479 			break;
   1480 		}
   1481 	}
   1482 
   1483 	cpudata->gtsc = rdtsc() + vmcb->ctrl.tsc_offset;
   1484 
   1485 	svm_vcpu_guest_fpu_leave(vcpu);
   1486 	svm_vcpu_guest_misc_leave(vcpu);
   1487 	svm_vcpu_guest_dbregs_leave(vcpu);
   1488 
   1489 	kpreempt_enable();
   1490 
   1491 	exit->exitstate.rflags = vmcb->state.rflags;
   1492 	exit->exitstate.cr8 = __SHIFTOUT(vmcb->ctrl.v, VMCB_CTRL_V_TPR);
   1493 	exit->exitstate.int_shadow =
   1494 	    ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0);
   1495 	exit->exitstate.int_window_exiting = cpudata->int_window_exit;
   1496 	exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit;
   1497 	exit->exitstate.evt_pending = cpudata->evt_pending;
   1498 
   1499 	return 0;
   1500 }
   1501 
   1502 /* -------------------------------------------------------------------------- */
   1503 
   1504 static int
   1505 svm_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
   1506 {
   1507 	struct pglist pglist;
   1508 	paddr_t _pa;
   1509 	vaddr_t _va;
   1510 	size_t i;
   1511 	int ret;
   1512 
   1513 	ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
   1514 	    &pglist, 1, 0);
   1515 	if (ret != 0)
   1516 		return ENOMEM;
   1517 	_pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
   1518 	_va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
   1519 	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
   1520 	if (_va == 0)
   1521 		goto error;
   1522 
   1523 	for (i = 0; i < npages; i++) {
   1524 		pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
   1525 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
   1526 	}
   1527 	pmap_update(pmap_kernel());
   1528 
   1529 	memset((void *)_va, 0, npages * PAGE_SIZE);
   1530 
   1531 	*pa = _pa;
   1532 	*va = _va;
   1533 	return 0;
   1534 
   1535 error:
   1536 	for (i = 0; i < npages; i++) {
   1537 		uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
   1538 	}
   1539 	return ENOMEM;
   1540 }
   1541 
   1542 static void
   1543 svm_memfree(paddr_t pa, vaddr_t va, size_t npages)
   1544 {
   1545 	size_t i;
   1546 
   1547 	pmap_kremove(va, npages * PAGE_SIZE);
   1548 	pmap_update(pmap_kernel());
   1549 	uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
   1550 	for (i = 0; i < npages; i++) {
   1551 		uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
   1552 	}
   1553 }
   1554 
   1555 /* -------------------------------------------------------------------------- */
   1556 
   1557 #define SVM_MSRBM_READ	__BIT(0)
   1558 #define SVM_MSRBM_WRITE	__BIT(1)
   1559 
   1560 static void
   1561 svm_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
   1562 {
   1563 	uint64_t byte;
   1564 	uint8_t bitoff;
   1565 
   1566 	if (msr < 0x00002000) {
   1567 		/* Range 1 */
   1568 		byte = ((msr - 0x00000000) >> 2UL) + 0x0000;
   1569 	} else if (msr >= 0xC0000000 && msr < 0xC0002000) {
   1570 		/* Range 2 */
   1571 		byte = ((msr - 0xC0000000) >> 2UL) + 0x0800;
   1572 	} else if (msr >= 0xC0010000 && msr < 0xC0012000) {
   1573 		/* Range 3 */
   1574 		byte = ((msr - 0xC0010000) >> 2UL) + 0x1000;
   1575 	} else {
   1576 		panic("%s: wrong range", __func__);
   1577 	}
   1578 
   1579 	bitoff = (msr & 0x3) << 1;
   1580 
   1581 	if (read) {
   1582 		bitmap[byte] &= ~(SVM_MSRBM_READ << bitoff);
   1583 	}
   1584 	if (write) {
   1585 		bitmap[byte] &= ~(SVM_MSRBM_WRITE << bitoff);
   1586 	}
   1587 }
   1588 
   1589 #define SVM_SEG_ATTRIB_TYPE		__BITS(3,0)
   1590 #define SVM_SEG_ATTRIB_S		__BIT(4)
   1591 #define SVM_SEG_ATTRIB_DPL		__BITS(6,5)
   1592 #define SVM_SEG_ATTRIB_P		__BIT(7)
   1593 #define SVM_SEG_ATTRIB_AVL		__BIT(8)
   1594 #define SVM_SEG_ATTRIB_L		__BIT(9)
   1595 #define SVM_SEG_ATTRIB_DEF		__BIT(10)
   1596 #define SVM_SEG_ATTRIB_G		__BIT(11)
   1597 
   1598 static void
   1599 svm_vcpu_setstate_seg(const struct nvmm_x64_state_seg *seg,
   1600     struct vmcb_segment *vseg)
   1601 {
   1602 	vseg->selector = seg->selector;
   1603 	vseg->attrib =
   1604 	    __SHIFTIN(seg->attrib.type, SVM_SEG_ATTRIB_TYPE) |
   1605 	    __SHIFTIN(seg->attrib.s, SVM_SEG_ATTRIB_S) |
   1606 	    __SHIFTIN(seg->attrib.dpl, SVM_SEG_ATTRIB_DPL) |
   1607 	    __SHIFTIN(seg->attrib.p, SVM_SEG_ATTRIB_P) |
   1608 	    __SHIFTIN(seg->attrib.avl, SVM_SEG_ATTRIB_AVL) |
   1609 	    __SHIFTIN(seg->attrib.l, SVM_SEG_ATTRIB_L) |
   1610 	    __SHIFTIN(seg->attrib.def, SVM_SEG_ATTRIB_DEF) |
   1611 	    __SHIFTIN(seg->attrib.g, SVM_SEG_ATTRIB_G);
   1612 	vseg->limit = seg->limit;
   1613 	vseg->base = seg->base;
   1614 }
   1615 
   1616 static void
   1617 svm_vcpu_getstate_seg(struct nvmm_x64_state_seg *seg, struct vmcb_segment *vseg)
   1618 {
   1619 	seg->selector = vseg->selector;
   1620 	seg->attrib.type = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_TYPE);
   1621 	seg->attrib.s = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_S);
   1622 	seg->attrib.dpl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DPL);
   1623 	seg->attrib.p = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_P);
   1624 	seg->attrib.avl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_AVL);
   1625 	seg->attrib.l = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_L);
   1626 	seg->attrib.def = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DEF);
   1627 	seg->attrib.g = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_G);
   1628 	seg->limit = vseg->limit;
   1629 	seg->base = vseg->base;
   1630 }
   1631 
   1632 static inline bool
   1633 svm_state_tlb_flush(const struct vmcb *vmcb, const struct nvmm_x64_state *state,
   1634     uint64_t flags)
   1635 {
   1636 	if (flags & NVMM_X64_STATE_CRS) {
   1637 		if ((vmcb->state.cr0 ^
   1638 		     state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
   1639 			return true;
   1640 		}
   1641 		if (vmcb->state.cr3 != state->crs[NVMM_X64_CR_CR3]) {
   1642 			return true;
   1643 		}
   1644 		if ((vmcb->state.cr4 ^
   1645 		     state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
   1646 			return true;
   1647 		}
   1648 	}
   1649 
   1650 	if (flags & NVMM_X64_STATE_MSRS) {
   1651 		if ((vmcb->state.efer ^
   1652 		     state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
   1653 			return true;
   1654 		}
   1655 	}
   1656 
   1657 	return false;
   1658 }
   1659 
   1660 static void
   1661 svm_vcpu_setstate(struct nvmm_cpu *vcpu)
   1662 {
   1663 	struct nvmm_comm_page *comm = vcpu->comm;
   1664 	const struct nvmm_x64_state *state = &comm->state;
   1665 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1666 	struct vmcb *vmcb = cpudata->vmcb;
   1667 	struct fxsave *fpustate;
   1668 	uint64_t flags;
   1669 
   1670 	flags = comm->state_wanted;
   1671 
   1672 	if (svm_state_tlb_flush(vmcb, state, flags)) {
   1673 		cpudata->gtlb_want_flush = true;
   1674 	}
   1675 
   1676 	if (flags & NVMM_X64_STATE_SEGS) {
   1677 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_CS],
   1678 		    &vmcb->state.cs);
   1679 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_DS],
   1680 		    &vmcb->state.ds);
   1681 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_ES],
   1682 		    &vmcb->state.es);
   1683 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_FS],
   1684 		    &vmcb->state.fs);
   1685 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GS],
   1686 		    &vmcb->state.gs);
   1687 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_SS],
   1688 		    &vmcb->state.ss);
   1689 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GDT],
   1690 		    &vmcb->state.gdt);
   1691 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_IDT],
   1692 		    &vmcb->state.idt);
   1693 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_LDT],
   1694 		    &vmcb->state.ldt);
   1695 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_TR],
   1696 		    &vmcb->state.tr);
   1697 
   1698 		vmcb->state.cpl = state->segs[NVMM_X64_SEG_SS].attrib.dpl;
   1699 	}
   1700 
   1701 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   1702 	if (flags & NVMM_X64_STATE_GPRS) {
   1703 		memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
   1704 
   1705 		vmcb->state.rip = state->gprs[NVMM_X64_GPR_RIP];
   1706 		vmcb->state.rsp = state->gprs[NVMM_X64_GPR_RSP];
   1707 		vmcb->state.rax = state->gprs[NVMM_X64_GPR_RAX];
   1708 		vmcb->state.rflags = state->gprs[NVMM_X64_GPR_RFLAGS];
   1709 	}
   1710 
   1711 	if (flags & NVMM_X64_STATE_CRS) {
   1712 		vmcb->state.cr0 = state->crs[NVMM_X64_CR_CR0];
   1713 		vmcb->state.cr2 = state->crs[NVMM_X64_CR_CR2];
   1714 		vmcb->state.cr3 = state->crs[NVMM_X64_CR_CR3];
   1715 		vmcb->state.cr4 = state->crs[NVMM_X64_CR_CR4];
   1716 
   1717 		vmcb->ctrl.v &= ~VMCB_CTRL_V_TPR;
   1718 		vmcb->ctrl.v |= __SHIFTIN(state->crs[NVMM_X64_CR_CR8],
   1719 		    VMCB_CTRL_V_TPR);
   1720 
   1721 		if (svm_xcr0_mask != 0) {
   1722 			/* Clear illegal XCR0 bits, set mandatory X87 bit. */
   1723 			cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
   1724 			cpudata->gxcr0 &= svm_xcr0_mask;
   1725 			cpudata->gxcr0 |= XCR0_X87;
   1726 		}
   1727 	}
   1728 
   1729 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   1730 	if (flags & NVMM_X64_STATE_DRS) {
   1731 		memcpy(cpudata->drs, state->drs, sizeof(state->drs));
   1732 
   1733 		vmcb->state.dr6 = state->drs[NVMM_X64_DR_DR6];
   1734 		vmcb->state.dr7 = state->drs[NVMM_X64_DR_DR7];
   1735 	}
   1736 
   1737 	if (flags & NVMM_X64_STATE_MSRS) {
   1738 		/*
   1739 		 * EFER_SVME is mandatory.
   1740 		 */
   1741 		vmcb->state.efer = state->msrs[NVMM_X64_MSR_EFER] | EFER_SVME;
   1742 		vmcb->state.star = state->msrs[NVMM_X64_MSR_STAR];
   1743 		vmcb->state.lstar = state->msrs[NVMM_X64_MSR_LSTAR];
   1744 		vmcb->state.cstar = state->msrs[NVMM_X64_MSR_CSTAR];
   1745 		vmcb->state.sfmask = state->msrs[NVMM_X64_MSR_SFMASK];
   1746 		vmcb->state.kernelgsbase =
   1747 		    state->msrs[NVMM_X64_MSR_KERNELGSBASE];
   1748 		vmcb->state.sysenter_cs =
   1749 		    state->msrs[NVMM_X64_MSR_SYSENTER_CS];
   1750 		vmcb->state.sysenter_esp =
   1751 		    state->msrs[NVMM_X64_MSR_SYSENTER_ESP];
   1752 		vmcb->state.sysenter_eip =
   1753 		    state->msrs[NVMM_X64_MSR_SYSENTER_EIP];
   1754 		vmcb->state.g_pat = state->msrs[NVMM_X64_MSR_PAT];
   1755 
   1756 		cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC];
   1757 		cpudata->gtsc_want_update = true;
   1758 	}
   1759 
   1760 	if (flags & NVMM_X64_STATE_INTR) {
   1761 		if (state->intr.int_shadow) {
   1762 			vmcb->ctrl.intr |= VMCB_CTRL_INTR_SHADOW;
   1763 		} else {
   1764 			vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW;
   1765 		}
   1766 
   1767 		if (state->intr.int_window_exiting) {
   1768 			svm_event_waitexit_enable(vcpu, false);
   1769 		} else {
   1770 			svm_event_waitexit_disable(vcpu, false);
   1771 		}
   1772 
   1773 		if (state->intr.nmi_window_exiting) {
   1774 			svm_event_waitexit_enable(vcpu, true);
   1775 		} else {
   1776 			svm_event_waitexit_disable(vcpu, true);
   1777 		}
   1778 	}
   1779 
   1780 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   1781 	if (flags & NVMM_X64_STATE_FPU) {
   1782 		memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
   1783 		    sizeof(state->fpu));
   1784 
   1785 		fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
   1786 		fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
   1787 		fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
   1788 
   1789 		if (svm_xcr0_mask != 0) {
   1790 			/* Reset XSTATE_BV, to force a reload. */
   1791 			cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
   1792 		}
   1793 	}
   1794 
   1795 	svm_vmcb_cache_update(vmcb, flags);
   1796 
   1797 	comm->state_wanted = 0;
   1798 	comm->state_cached |= flags;
   1799 }
   1800 
   1801 static void
   1802 svm_vcpu_getstate(struct nvmm_cpu *vcpu)
   1803 {
   1804 	struct nvmm_comm_page *comm = vcpu->comm;
   1805 	struct nvmm_x64_state *state = &comm->state;
   1806 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1807 	struct vmcb *vmcb = cpudata->vmcb;
   1808 	uint64_t flags;
   1809 
   1810 	flags = comm->state_wanted;
   1811 
   1812 	if (flags & NVMM_X64_STATE_SEGS) {
   1813 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_CS],
   1814 		    &vmcb->state.cs);
   1815 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_DS],
   1816 		    &vmcb->state.ds);
   1817 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_ES],
   1818 		    &vmcb->state.es);
   1819 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_FS],
   1820 		    &vmcb->state.fs);
   1821 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GS],
   1822 		    &vmcb->state.gs);
   1823 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_SS],
   1824 		    &vmcb->state.ss);
   1825 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GDT],
   1826 		    &vmcb->state.gdt);
   1827 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_IDT],
   1828 		    &vmcb->state.idt);
   1829 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_LDT],
   1830 		    &vmcb->state.ldt);
   1831 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_TR],
   1832 		    &vmcb->state.tr);
   1833 
   1834 		state->segs[NVMM_X64_SEG_SS].attrib.dpl = vmcb->state.cpl;
   1835 	}
   1836 
   1837 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   1838 	if (flags & NVMM_X64_STATE_GPRS) {
   1839 		memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
   1840 
   1841 		state->gprs[NVMM_X64_GPR_RIP] = vmcb->state.rip;
   1842 		state->gprs[NVMM_X64_GPR_RSP] = vmcb->state.rsp;
   1843 		state->gprs[NVMM_X64_GPR_RAX] = vmcb->state.rax;
   1844 		state->gprs[NVMM_X64_GPR_RFLAGS] = vmcb->state.rflags;
   1845 	}
   1846 
   1847 	if (flags & NVMM_X64_STATE_CRS) {
   1848 		state->crs[NVMM_X64_CR_CR0] = vmcb->state.cr0;
   1849 		state->crs[NVMM_X64_CR_CR2] = vmcb->state.cr2;
   1850 		state->crs[NVMM_X64_CR_CR3] = vmcb->state.cr3;
   1851 		state->crs[NVMM_X64_CR_CR4] = vmcb->state.cr4;
   1852 		state->crs[NVMM_X64_CR_CR8] = __SHIFTOUT(vmcb->ctrl.v,
   1853 		    VMCB_CTRL_V_TPR);
   1854 		state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
   1855 	}
   1856 
   1857 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   1858 	if (flags & NVMM_X64_STATE_DRS) {
   1859 		memcpy(state->drs, cpudata->drs, sizeof(state->drs));
   1860 
   1861 		state->drs[NVMM_X64_DR_DR6] = vmcb->state.dr6;
   1862 		state->drs[NVMM_X64_DR_DR7] = vmcb->state.dr7;
   1863 	}
   1864 
   1865 	if (flags & NVMM_X64_STATE_MSRS) {
   1866 		state->msrs[NVMM_X64_MSR_EFER] = vmcb->state.efer;
   1867 		state->msrs[NVMM_X64_MSR_STAR] = vmcb->state.star;
   1868 		state->msrs[NVMM_X64_MSR_LSTAR] = vmcb->state.lstar;
   1869 		state->msrs[NVMM_X64_MSR_CSTAR] = vmcb->state.cstar;
   1870 		state->msrs[NVMM_X64_MSR_SFMASK] = vmcb->state.sfmask;
   1871 		state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
   1872 		    vmcb->state.kernelgsbase;
   1873 		state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
   1874 		    vmcb->state.sysenter_cs;
   1875 		state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
   1876 		    vmcb->state.sysenter_esp;
   1877 		state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
   1878 		    vmcb->state.sysenter_eip;
   1879 		state->msrs[NVMM_X64_MSR_PAT] = vmcb->state.g_pat;
   1880 		state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc;
   1881 
   1882 		/* Hide SVME. */
   1883 		state->msrs[NVMM_X64_MSR_EFER] &= ~EFER_SVME;
   1884 	}
   1885 
   1886 	if (flags & NVMM_X64_STATE_INTR) {
   1887 		state->intr.int_shadow =
   1888 		    (vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0;
   1889 		state->intr.int_window_exiting = cpudata->int_window_exit;
   1890 		state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
   1891 		state->intr.evt_pending = cpudata->evt_pending;
   1892 	}
   1893 
   1894 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   1895 	if (flags & NVMM_X64_STATE_FPU) {
   1896 		memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
   1897 		    sizeof(state->fpu));
   1898 	}
   1899 
   1900 	comm->state_wanted = 0;
   1901 	comm->state_cached |= flags;
   1902 }
   1903 
   1904 static void
   1905 svm_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
   1906 {
   1907 	vcpu->comm->state_wanted = flags;
   1908 	svm_vcpu_getstate(vcpu);
   1909 }
   1910 
   1911 static void
   1912 svm_vcpu_state_commit(struct nvmm_cpu *vcpu)
   1913 {
   1914 	vcpu->comm->state_wanted = vcpu->comm->state_commit;
   1915 	vcpu->comm->state_commit = 0;
   1916 	svm_vcpu_setstate(vcpu);
   1917 }
   1918 
   1919 /* -------------------------------------------------------------------------- */
   1920 
   1921 static void
   1922 svm_asid_alloc(struct nvmm_cpu *vcpu)
   1923 {
   1924 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1925 	struct vmcb *vmcb = cpudata->vmcb;
   1926 	size_t i, oct, bit;
   1927 
   1928 	mutex_enter(&svm_asidlock);
   1929 
   1930 	for (i = 0; i < svm_maxasid; i++) {
   1931 		oct = i / 8;
   1932 		bit = i % 8;
   1933 
   1934 		if (svm_asidmap[oct] & __BIT(bit)) {
   1935 			continue;
   1936 		}
   1937 
   1938 		svm_asidmap[oct] |= __BIT(bit);
   1939 		vmcb->ctrl.guest_asid = i;
   1940 		mutex_exit(&svm_asidlock);
   1941 		return;
   1942 	}
   1943 
   1944 	/*
   1945 	 * No free ASID. Use the last one, which is shared and requires
   1946 	 * special TLB handling.
   1947 	 */
   1948 	cpudata->shared_asid = true;
   1949 	vmcb->ctrl.guest_asid = svm_maxasid - 1;
   1950 	mutex_exit(&svm_asidlock);
   1951 }
   1952 
   1953 static void
   1954 svm_asid_free(struct nvmm_cpu *vcpu)
   1955 {
   1956 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1957 	struct vmcb *vmcb = cpudata->vmcb;
   1958 	size_t oct, bit;
   1959 
   1960 	if (cpudata->shared_asid) {
   1961 		return;
   1962 	}
   1963 
   1964 	oct = vmcb->ctrl.guest_asid / 8;
   1965 	bit = vmcb->ctrl.guest_asid % 8;
   1966 
   1967 	mutex_enter(&svm_asidlock);
   1968 	svm_asidmap[oct] &= ~__BIT(bit);
   1969 	mutex_exit(&svm_asidlock);
   1970 }
   1971 
   1972 static void
   1973 svm_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   1974 {
   1975 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1976 	struct vmcb *vmcb = cpudata->vmcb;
   1977 
   1978 	/* Allow reads/writes of Control Registers. */
   1979 	vmcb->ctrl.intercept_cr = 0;
   1980 
   1981 	/* Allow reads/writes of Debug Registers. */
   1982 	vmcb->ctrl.intercept_dr = 0;
   1983 
   1984 	/* Allow exceptions 0 to 31. */
   1985 	vmcb->ctrl.intercept_vec = 0;
   1986 
   1987 	/*
   1988 	 * Allow:
   1989 	 *  - SMI [smm interrupts]
   1990 	 *  - VINTR [virtual interrupts]
   1991 	 *  - CR0_SPEC [CR0 writes changing other fields than CR0.TS or CR0.MP]
   1992 	 *  - RIDTR [reads of IDTR]
   1993 	 *  - RGDTR [reads of GDTR]
   1994 	 *  - RLDTR [reads of LDTR]
   1995 	 *  - RTR [reads of TR]
   1996 	 *  - WIDTR [writes of IDTR]
   1997 	 *  - WGDTR [writes of GDTR]
   1998 	 *  - WLDTR [writes of LDTR]
   1999 	 *  - WTR [writes of TR]
   2000 	 *  - RDTSC [rdtsc instruction]
   2001 	 *  - PUSHF [pushf instruction]
   2002 	 *  - POPF [popf instruction]
   2003 	 *  - IRET [iret instruction]
   2004 	 *  - INTN [int $n instructions]
   2005 	 *  - INVD [invd instruction]
   2006 	 *  - PAUSE [pause instruction]
   2007 	 *  - INVLPG [invplg instruction]
   2008 	 *  - TASKSW [task switches]
   2009 	 *
   2010 	 * Intercept the rest below.
   2011 	 */
   2012 	vmcb->ctrl.intercept_misc1 =
   2013 	    VMCB_CTRL_INTERCEPT_INTR |
   2014 	    VMCB_CTRL_INTERCEPT_NMI |
   2015 	    VMCB_CTRL_INTERCEPT_INIT |
   2016 	    VMCB_CTRL_INTERCEPT_RDPMC |
   2017 	    VMCB_CTRL_INTERCEPT_CPUID |
   2018 	    VMCB_CTRL_INTERCEPT_RSM |
   2019 	    VMCB_CTRL_INTERCEPT_HLT |
   2020 	    VMCB_CTRL_INTERCEPT_INVLPGA |
   2021 	    VMCB_CTRL_INTERCEPT_IOIO_PROT |
   2022 	    VMCB_CTRL_INTERCEPT_MSR_PROT |
   2023 	    VMCB_CTRL_INTERCEPT_FERR_FREEZE |
   2024 	    VMCB_CTRL_INTERCEPT_SHUTDOWN;
   2025 
   2026 	/*
   2027 	 * Allow:
   2028 	 *  - ICEBP [icebp instruction]
   2029 	 *  - WBINVD [wbinvd instruction]
   2030 	 *  - WCR_SPEC(0..15) [writes of CR0-15, received after instruction]
   2031 	 *
   2032 	 * Intercept the rest below.
   2033 	 */
   2034 	vmcb->ctrl.intercept_misc2 =
   2035 	    VMCB_CTRL_INTERCEPT_VMRUN |
   2036 	    VMCB_CTRL_INTERCEPT_VMMCALL |
   2037 	    VMCB_CTRL_INTERCEPT_VMLOAD |
   2038 	    VMCB_CTRL_INTERCEPT_VMSAVE |
   2039 	    VMCB_CTRL_INTERCEPT_STGI |
   2040 	    VMCB_CTRL_INTERCEPT_CLGI |
   2041 	    VMCB_CTRL_INTERCEPT_SKINIT |
   2042 	    VMCB_CTRL_INTERCEPT_RDTSCP |
   2043 	    VMCB_CTRL_INTERCEPT_MONITOR |
   2044 	    VMCB_CTRL_INTERCEPT_MWAIT |
   2045 	    VMCB_CTRL_INTERCEPT_XSETBV;
   2046 
   2047 	/* Intercept all I/O accesses. */
   2048 	memset(cpudata->iobm, 0xFF, IOBM_SIZE);
   2049 	vmcb->ctrl.iopm_base_pa = cpudata->iobm_pa;
   2050 
   2051 	/* Allow direct access to certain MSRs. */
   2052 	memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
   2053 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, false);
   2054 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
   2055 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
   2056 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
   2057 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
   2058 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
   2059 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
   2060 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
   2061 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
   2062 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
   2063 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
   2064 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_CR_PAT, true, true);
   2065 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
   2066 	vmcb->ctrl.msrpm_base_pa = cpudata->msrbm_pa;
   2067 
   2068 	/* Generate ASID. */
   2069 	svm_asid_alloc(vcpu);
   2070 
   2071 	/* Virtual TPR. */
   2072 	vmcb->ctrl.v = VMCB_CTRL_V_INTR_MASKING;
   2073 
   2074 	/* Enable Nested Paging. */
   2075 	vmcb->ctrl.enable1 = VMCB_CTRL_ENABLE_NP;
   2076 	vmcb->ctrl.n_cr3 = mach->vm->vm_map.pmap->pm_pdirpa[0];
   2077 
   2078 	/* Init XSAVE header. */
   2079 	cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
   2080 	cpudata->gfpu.xsh_xcomp_bv = 0;
   2081 
   2082 	/* These MSRs are static. */
   2083 	cpudata->star = rdmsr(MSR_STAR);
   2084 	cpudata->lstar = rdmsr(MSR_LSTAR);
   2085 	cpudata->cstar = rdmsr(MSR_CSTAR);
   2086 	cpudata->sfmask = rdmsr(MSR_SFMASK);
   2087 
   2088 	/* Install the RESET state. */
   2089 	memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
   2090 	    sizeof(nvmm_x86_reset_state));
   2091 	vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
   2092 	vcpu->comm->state_cached = 0;
   2093 	svm_vcpu_setstate(vcpu);
   2094 }
   2095 
   2096 static int
   2097 svm_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2098 {
   2099 	struct svm_cpudata *cpudata;
   2100 	int error;
   2101 
   2102 	/* Allocate the SVM cpudata. */
   2103 	cpudata = (struct svm_cpudata *)uvm_km_alloc(kernel_map,
   2104 	    roundup(sizeof(*cpudata), PAGE_SIZE), 0,
   2105 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
   2106 	vcpu->cpudata = cpudata;
   2107 
   2108 	/* VMCB */
   2109 	error = svm_memalloc(&cpudata->vmcb_pa, (vaddr_t *)&cpudata->vmcb,
   2110 	    VMCB_NPAGES);
   2111 	if (error)
   2112 		goto error;
   2113 
   2114 	/* I/O Bitmap */
   2115 	error = svm_memalloc(&cpudata->iobm_pa, (vaddr_t *)&cpudata->iobm,
   2116 	    IOBM_NPAGES);
   2117 	if (error)
   2118 		goto error;
   2119 
   2120 	/* MSR Bitmap */
   2121 	error = svm_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
   2122 	    MSRBM_NPAGES);
   2123 	if (error)
   2124 		goto error;
   2125 
   2126 	/* Init the VCPU info. */
   2127 	svm_vcpu_init(mach, vcpu);
   2128 
   2129 	return 0;
   2130 
   2131 error:
   2132 	if (cpudata->vmcb_pa) {
   2133 		svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb,
   2134 		    VMCB_NPAGES);
   2135 	}
   2136 	if (cpudata->iobm_pa) {
   2137 		svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm,
   2138 		    IOBM_NPAGES);
   2139 	}
   2140 	if (cpudata->msrbm_pa) {
   2141 		svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
   2142 		    MSRBM_NPAGES);
   2143 	}
   2144 	uvm_km_free(kernel_map, (vaddr_t)cpudata,
   2145 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
   2146 	return error;
   2147 }
   2148 
   2149 static void
   2150 svm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   2151 {
   2152 	struct svm_cpudata *cpudata = vcpu->cpudata;
   2153 
   2154 	svm_asid_free(vcpu);
   2155 
   2156 	svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, VMCB_NPAGES);
   2157 	svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, IOBM_NPAGES);
   2158 	svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
   2159 
   2160 	uvm_km_free(kernel_map, (vaddr_t)cpudata,
   2161 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
   2162 }
   2163 
   2164 /* -------------------------------------------------------------------------- */
   2165 
   2166 static int
   2167 svm_vcpu_configure_cpuid(struct svm_cpudata *cpudata, void *data)
   2168 {
   2169 	struct nvmm_vcpu_conf_cpuid *cpuid = data;
   2170 	size_t i;
   2171 
   2172 	if (__predict_false(cpuid->mask && cpuid->exit)) {
   2173 		return EINVAL;
   2174 	}
   2175 	if (__predict_false(cpuid->mask &&
   2176 	    ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
   2177 	     (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
   2178 	     (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
   2179 	     (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
   2180 		return EINVAL;
   2181 	}
   2182 
   2183 	/* If unset, delete, to restore the default behavior. */
   2184 	if (!cpuid->mask && !cpuid->exit) {
   2185 		for (i = 0; i < SVM_NCPUIDS; i++) {
   2186 			if (!cpudata->cpuidpresent[i]) {
   2187 				continue;
   2188 			}
   2189 			if (cpudata->cpuid[i].leaf == cpuid->leaf) {
   2190 				cpudata->cpuidpresent[i] = false;
   2191 			}
   2192 		}
   2193 		return 0;
   2194 	}
   2195 
   2196 	/* If already here, replace. */
   2197 	for (i = 0; i < SVM_NCPUIDS; i++) {
   2198 		if (!cpudata->cpuidpresent[i]) {
   2199 			continue;
   2200 		}
   2201 		if (cpudata->cpuid[i].leaf == cpuid->leaf) {
   2202 			memcpy(&cpudata->cpuid[i], cpuid,
   2203 			    sizeof(struct nvmm_vcpu_conf_cpuid));
   2204 			return 0;
   2205 		}
   2206 	}
   2207 
   2208 	/* Not here, insert. */
   2209 	for (i = 0; i < SVM_NCPUIDS; i++) {
   2210 		if (!cpudata->cpuidpresent[i]) {
   2211 			cpudata->cpuidpresent[i] = true;
   2212 			memcpy(&cpudata->cpuid[i], cpuid,
   2213 			    sizeof(struct nvmm_vcpu_conf_cpuid));
   2214 			return 0;
   2215 		}
   2216 	}
   2217 
   2218 	return ENOBUFS;
   2219 }
   2220 
   2221 static int
   2222 svm_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
   2223 {
   2224 	struct svm_cpudata *cpudata = vcpu->cpudata;
   2225 
   2226 	switch (op) {
   2227 	case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID):
   2228 		return svm_vcpu_configure_cpuid(cpudata, data);
   2229 	default:
   2230 		return EINVAL;
   2231 	}
   2232 }
   2233 
   2234 /* -------------------------------------------------------------------------- */
   2235 
   2236 static void
   2237 svm_tlb_flush(struct pmap *pm)
   2238 {
   2239 	struct nvmm_machine *mach = pm->pm_data;
   2240 	struct svm_machdata *machdata = mach->machdata;
   2241 
   2242 	atomic_inc_64(&machdata->mach_htlb_gen);
   2243 
   2244 	/* Generates IPIs, which cause #VMEXITs. */
   2245 	pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM);
   2246 }
   2247 
   2248 static void
   2249 svm_machine_create(struct nvmm_machine *mach)
   2250 {
   2251 	struct svm_machdata *machdata;
   2252 
   2253 	/* Fill in pmap info. */
   2254 	mach->vm->vm_map.pmap->pm_data = (void *)mach;
   2255 	mach->vm->vm_map.pmap->pm_tlb_flush = svm_tlb_flush;
   2256 
   2257 	machdata = kmem_zalloc(sizeof(struct svm_machdata), KM_SLEEP);
   2258 	mach->machdata = machdata;
   2259 
   2260 	/* Start with an hTLB flush everywhere. */
   2261 	machdata->mach_htlb_gen = 1;
   2262 }
   2263 
   2264 static void
   2265 svm_machine_destroy(struct nvmm_machine *mach)
   2266 {
   2267 	kmem_free(mach->machdata, sizeof(struct svm_machdata));
   2268 }
   2269 
   2270 static int
   2271 svm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
   2272 {
   2273 	panic("%s: impossible", __func__);
   2274 }
   2275 
   2276 /* -------------------------------------------------------------------------- */
   2277 
   2278 static bool
   2279 svm_ident(void)
   2280 {
   2281 	u_int descs[4];
   2282 	uint64_t msr;
   2283 
   2284 	if (cpu_vendor != CPUVENDOR_AMD) {
   2285 		return false;
   2286 	}
   2287 	if (!(cpu_feature[3] & CPUID_SVM)) {
   2288 		printf("NVMM: SVM not supported\n");
   2289 		return false;
   2290 	}
   2291 
   2292 	if (curcpu()->ci_max_ext_cpuid < 0x8000000a) {
   2293 		printf("NVMM: CPUID leaf not available\n");
   2294 		return false;
   2295 	}
   2296 	x86_cpuid(0x8000000a, descs);
   2297 
   2298 	/* Want Nested Paging. */
   2299 	if (!(descs[3] & CPUID_AMD_SVM_NP)) {
   2300 		printf("NVMM: SVM-NP not supported\n");
   2301 		return false;
   2302 	}
   2303 
   2304 	/* Want nRIP. */
   2305 	if (!(descs[3] & CPUID_AMD_SVM_NRIPS)) {
   2306 		printf("NVMM: SVM-NRIPS not supported\n");
   2307 		return false;
   2308 	}
   2309 
   2310 	svm_decode_assist = (descs[3] & CPUID_AMD_SVM_DecodeAssist) != 0;
   2311 
   2312 	msr = rdmsr(MSR_VMCR);
   2313 	if ((msr & VMCR_SVMED) && (msr & VMCR_LOCK)) {
   2314 		printf("NVMM: SVM disabled in BIOS\n");
   2315 		return false;
   2316 	}
   2317 
   2318 	return true;
   2319 }
   2320 
   2321 static void
   2322 svm_init_asid(uint32_t maxasid)
   2323 {
   2324 	size_t i, j, allocsz;
   2325 
   2326 	mutex_init(&svm_asidlock, MUTEX_DEFAULT, IPL_NONE);
   2327 
   2328 	/* Arbitrarily limit. */
   2329 	maxasid = uimin(maxasid, 8192);
   2330 
   2331 	svm_maxasid = maxasid;
   2332 	allocsz = roundup(maxasid, 8) / 8;
   2333 	svm_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
   2334 
   2335 	/* ASID 0 is reserved for the host. */
   2336 	svm_asidmap[0] |= __BIT(0);
   2337 
   2338 	/* ASID n-1 is special, we share it. */
   2339 	i = (maxasid - 1) / 8;
   2340 	j = (maxasid - 1) % 8;
   2341 	svm_asidmap[i] |= __BIT(j);
   2342 }
   2343 
   2344 static void
   2345 svm_change_cpu(void *arg1, void *arg2)
   2346 {
   2347 	bool enable = arg1 != NULL;
   2348 	uint64_t msr;
   2349 
   2350 	msr = rdmsr(MSR_VMCR);
   2351 	if (msr & VMCR_SVMED) {
   2352 		wrmsr(MSR_VMCR, msr & ~VMCR_SVMED);
   2353 	}
   2354 
   2355 	if (!enable) {
   2356 		wrmsr(MSR_VM_HSAVE_PA, 0);
   2357 	}
   2358 
   2359 	msr = rdmsr(MSR_EFER);
   2360 	if (enable) {
   2361 		msr |= EFER_SVME;
   2362 	} else {
   2363 		msr &= ~EFER_SVME;
   2364 	}
   2365 	wrmsr(MSR_EFER, msr);
   2366 
   2367 	if (enable) {
   2368 		wrmsr(MSR_VM_HSAVE_PA, hsave[cpu_index(curcpu())].pa);
   2369 	}
   2370 }
   2371 
   2372 static void
   2373 svm_init(void)
   2374 {
   2375 	CPU_INFO_ITERATOR cii;
   2376 	struct cpu_info *ci;
   2377 	struct vm_page *pg;
   2378 	u_int descs[4];
   2379 	uint64_t xc;
   2380 
   2381 	x86_cpuid(0x8000000a, descs);
   2382 
   2383 	/* The guest TLB flush command. */
   2384 	if (descs[3] & CPUID_AMD_SVM_FlushByASID) {
   2385 		svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_GUEST;
   2386 	} else {
   2387 		svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_ALL;
   2388 	}
   2389 
   2390 	/* Init the ASID. */
   2391 	svm_init_asid(descs[1]);
   2392 
   2393 	/* Init the XCR0 mask. */
   2394 	svm_xcr0_mask = SVM_XCR0_MASK_DEFAULT & x86_xsave_features;
   2395 
   2396 	memset(hsave, 0, sizeof(hsave));
   2397 	for (CPU_INFO_FOREACH(cii, ci)) {
   2398 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
   2399 		hsave[cpu_index(ci)].pa = VM_PAGE_TO_PHYS(pg);
   2400 	}
   2401 
   2402 	xc = xc_broadcast(0, svm_change_cpu, (void *)true, NULL);
   2403 	xc_wait(xc);
   2404 }
   2405 
   2406 static void
   2407 svm_fini_asid(void)
   2408 {
   2409 	size_t allocsz;
   2410 
   2411 	allocsz = roundup(svm_maxasid, 8) / 8;
   2412 	kmem_free(svm_asidmap, allocsz);
   2413 
   2414 	mutex_destroy(&svm_asidlock);
   2415 }
   2416 
   2417 static void
   2418 svm_fini(void)
   2419 {
   2420 	uint64_t xc;
   2421 	size_t i;
   2422 
   2423 	xc = xc_broadcast(0, svm_change_cpu, (void *)false, NULL);
   2424 	xc_wait(xc);
   2425 
   2426 	for (i = 0; i < MAXCPUS; i++) {
   2427 		if (hsave[i].pa != 0)
   2428 			uvm_pagefree(PHYS_TO_VM_PAGE(hsave[i].pa));
   2429 	}
   2430 
   2431 	svm_fini_asid();
   2432 }
   2433 
   2434 static void
   2435 svm_capability(struct nvmm_capability *cap)
   2436 {
   2437 	cap->arch.mach_conf_support = 0;
   2438 	cap->arch.vcpu_conf_support =
   2439 	    NVMM_CAP_ARCH_VCPU_CONF_CPUID;
   2440 	cap->arch.xcr0_mask = svm_xcr0_mask;
   2441 	cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
   2442 	cap->arch.conf_cpuid_maxops = SVM_NCPUIDS;
   2443 }
   2444 
   2445 const struct nvmm_impl nvmm_x86_svm = {
   2446 	.name = "x86-svm",
   2447 	.ident = svm_ident,
   2448 	.init = svm_init,
   2449 	.fini = svm_fini,
   2450 	.capability = svm_capability,
   2451 	.mach_conf_max = NVMM_X86_MACH_NCONF,
   2452 	.mach_conf_sizes = NULL,
   2453 	.vcpu_conf_max = NVMM_X86_VCPU_NCONF,
   2454 	.vcpu_conf_sizes = svm_vcpu_conf_sizes,
   2455 	.state_size = sizeof(struct nvmm_x64_state),
   2456 	.machine_create = svm_machine_create,
   2457 	.machine_destroy = svm_machine_destroy,
   2458 	.machine_configure = svm_machine_configure,
   2459 	.vcpu_create = svm_vcpu_create,
   2460 	.vcpu_destroy = svm_vcpu_destroy,
   2461 	.vcpu_configure = svm_vcpu_configure,
   2462 	.vcpu_setstate = svm_vcpu_setstate,
   2463 	.vcpu_getstate = svm_vcpu_getstate,
   2464 	.vcpu_inject = svm_vcpu_inject,
   2465 	.vcpu_run = svm_vcpu_run
   2466 };
   2467