Home | History | Annotate | Line # | Download | only in x86
nvmm_x86_svm.c revision 1.31
      1 /*	$NetBSD: nvmm_x86_svm.c,v 1.31 2019/02/23 12:27:00 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.31 2019/02/23 12:27:00 maxv Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 #include <sys/kmem.h>
     39 #include <sys/cpu.h>
     40 #include <sys/xcall.h>
     41 
     42 #include <uvm/uvm.h>
     43 #include <uvm/uvm_page.h>
     44 
     45 #include <x86/cputypes.h>
     46 #include <x86/specialreg.h>
     47 #include <x86/pmap.h>
     48 #include <x86/dbregs.h>
     49 #include <x86/cpu_counter.h>
     50 #include <machine/cpuvar.h>
     51 
     52 #include <dev/nvmm/nvmm.h>
     53 #include <dev/nvmm/nvmm_internal.h>
     54 #include <dev/nvmm/x86/nvmm_x86.h>
     55 
     56 int svm_vmrun(paddr_t, uint64_t *);
     57 
     58 #define	MSR_VM_HSAVE_PA	0xC0010117
     59 
     60 /* -------------------------------------------------------------------------- */
     61 
     62 #define VMCB_EXITCODE_CR0_READ		0x0000
     63 #define VMCB_EXITCODE_CR1_READ		0x0001
     64 #define VMCB_EXITCODE_CR2_READ		0x0002
     65 #define VMCB_EXITCODE_CR3_READ		0x0003
     66 #define VMCB_EXITCODE_CR4_READ		0x0004
     67 #define VMCB_EXITCODE_CR5_READ		0x0005
     68 #define VMCB_EXITCODE_CR6_READ		0x0006
     69 #define VMCB_EXITCODE_CR7_READ		0x0007
     70 #define VMCB_EXITCODE_CR8_READ		0x0008
     71 #define VMCB_EXITCODE_CR9_READ		0x0009
     72 #define VMCB_EXITCODE_CR10_READ		0x000A
     73 #define VMCB_EXITCODE_CR11_READ		0x000B
     74 #define VMCB_EXITCODE_CR12_READ		0x000C
     75 #define VMCB_EXITCODE_CR13_READ		0x000D
     76 #define VMCB_EXITCODE_CR14_READ		0x000E
     77 #define VMCB_EXITCODE_CR15_READ		0x000F
     78 #define VMCB_EXITCODE_CR0_WRITE		0x0010
     79 #define VMCB_EXITCODE_CR1_WRITE		0x0011
     80 #define VMCB_EXITCODE_CR2_WRITE		0x0012
     81 #define VMCB_EXITCODE_CR3_WRITE		0x0013
     82 #define VMCB_EXITCODE_CR4_WRITE		0x0014
     83 #define VMCB_EXITCODE_CR5_WRITE		0x0015
     84 #define VMCB_EXITCODE_CR6_WRITE		0x0016
     85 #define VMCB_EXITCODE_CR7_WRITE		0x0017
     86 #define VMCB_EXITCODE_CR8_WRITE		0x0018
     87 #define VMCB_EXITCODE_CR9_WRITE		0x0019
     88 #define VMCB_EXITCODE_CR10_WRITE	0x001A
     89 #define VMCB_EXITCODE_CR11_WRITE	0x001B
     90 #define VMCB_EXITCODE_CR12_WRITE	0x001C
     91 #define VMCB_EXITCODE_CR13_WRITE	0x001D
     92 #define VMCB_EXITCODE_CR14_WRITE	0x001E
     93 #define VMCB_EXITCODE_CR15_WRITE	0x001F
     94 #define VMCB_EXITCODE_DR0_READ		0x0020
     95 #define VMCB_EXITCODE_DR1_READ		0x0021
     96 #define VMCB_EXITCODE_DR2_READ		0x0022
     97 #define VMCB_EXITCODE_DR3_READ		0x0023
     98 #define VMCB_EXITCODE_DR4_READ		0x0024
     99 #define VMCB_EXITCODE_DR5_READ		0x0025
    100 #define VMCB_EXITCODE_DR6_READ		0x0026
    101 #define VMCB_EXITCODE_DR7_READ		0x0027
    102 #define VMCB_EXITCODE_DR8_READ		0x0028
    103 #define VMCB_EXITCODE_DR9_READ		0x0029
    104 #define VMCB_EXITCODE_DR10_READ		0x002A
    105 #define VMCB_EXITCODE_DR11_READ		0x002B
    106 #define VMCB_EXITCODE_DR12_READ		0x002C
    107 #define VMCB_EXITCODE_DR13_READ		0x002D
    108 #define VMCB_EXITCODE_DR14_READ		0x002E
    109 #define VMCB_EXITCODE_DR15_READ		0x002F
    110 #define VMCB_EXITCODE_DR0_WRITE		0x0030
    111 #define VMCB_EXITCODE_DR1_WRITE		0x0031
    112 #define VMCB_EXITCODE_DR2_WRITE		0x0032
    113 #define VMCB_EXITCODE_DR3_WRITE		0x0033
    114 #define VMCB_EXITCODE_DR4_WRITE		0x0034
    115 #define VMCB_EXITCODE_DR5_WRITE		0x0035
    116 #define VMCB_EXITCODE_DR6_WRITE		0x0036
    117 #define VMCB_EXITCODE_DR7_WRITE		0x0037
    118 #define VMCB_EXITCODE_DR8_WRITE		0x0038
    119 #define VMCB_EXITCODE_DR9_WRITE		0x0039
    120 #define VMCB_EXITCODE_DR10_WRITE	0x003A
    121 #define VMCB_EXITCODE_DR11_WRITE	0x003B
    122 #define VMCB_EXITCODE_DR12_WRITE	0x003C
    123 #define VMCB_EXITCODE_DR13_WRITE	0x003D
    124 #define VMCB_EXITCODE_DR14_WRITE	0x003E
    125 #define VMCB_EXITCODE_DR15_WRITE	0x003F
    126 #define VMCB_EXITCODE_EXCP0		0x0040
    127 #define VMCB_EXITCODE_EXCP1		0x0041
    128 #define VMCB_EXITCODE_EXCP2		0x0042
    129 #define VMCB_EXITCODE_EXCP3		0x0043
    130 #define VMCB_EXITCODE_EXCP4		0x0044
    131 #define VMCB_EXITCODE_EXCP5		0x0045
    132 #define VMCB_EXITCODE_EXCP6		0x0046
    133 #define VMCB_EXITCODE_EXCP7		0x0047
    134 #define VMCB_EXITCODE_EXCP8		0x0048
    135 #define VMCB_EXITCODE_EXCP9		0x0049
    136 #define VMCB_EXITCODE_EXCP10		0x004A
    137 #define VMCB_EXITCODE_EXCP11		0x004B
    138 #define VMCB_EXITCODE_EXCP12		0x004C
    139 #define VMCB_EXITCODE_EXCP13		0x004D
    140 #define VMCB_EXITCODE_EXCP14		0x004E
    141 #define VMCB_EXITCODE_EXCP15		0x004F
    142 #define VMCB_EXITCODE_EXCP16		0x0050
    143 #define VMCB_EXITCODE_EXCP17		0x0051
    144 #define VMCB_EXITCODE_EXCP18		0x0052
    145 #define VMCB_EXITCODE_EXCP19		0x0053
    146 #define VMCB_EXITCODE_EXCP20		0x0054
    147 #define VMCB_EXITCODE_EXCP21		0x0055
    148 #define VMCB_EXITCODE_EXCP22		0x0056
    149 #define VMCB_EXITCODE_EXCP23		0x0057
    150 #define VMCB_EXITCODE_EXCP24		0x0058
    151 #define VMCB_EXITCODE_EXCP25		0x0059
    152 #define VMCB_EXITCODE_EXCP26		0x005A
    153 #define VMCB_EXITCODE_EXCP27		0x005B
    154 #define VMCB_EXITCODE_EXCP28		0x005C
    155 #define VMCB_EXITCODE_EXCP29		0x005D
    156 #define VMCB_EXITCODE_EXCP30		0x005E
    157 #define VMCB_EXITCODE_EXCP31		0x005F
    158 #define VMCB_EXITCODE_INTR		0x0060
    159 #define VMCB_EXITCODE_NMI		0x0061
    160 #define VMCB_EXITCODE_SMI		0x0062
    161 #define VMCB_EXITCODE_INIT		0x0063
    162 #define VMCB_EXITCODE_VINTR		0x0064
    163 #define VMCB_EXITCODE_CR0_SEL_WRITE	0x0065
    164 #define VMCB_EXITCODE_IDTR_READ		0x0066
    165 #define VMCB_EXITCODE_GDTR_READ		0x0067
    166 #define VMCB_EXITCODE_LDTR_READ		0x0068
    167 #define VMCB_EXITCODE_TR_READ		0x0069
    168 #define VMCB_EXITCODE_IDTR_WRITE	0x006A
    169 #define VMCB_EXITCODE_GDTR_WRITE	0x006B
    170 #define VMCB_EXITCODE_LDTR_WRITE	0x006C
    171 #define VMCB_EXITCODE_TR_WRITE		0x006D
    172 #define VMCB_EXITCODE_RDTSC		0x006E
    173 #define VMCB_EXITCODE_RDPMC		0x006F
    174 #define VMCB_EXITCODE_PUSHF		0x0070
    175 #define VMCB_EXITCODE_POPF		0x0071
    176 #define VMCB_EXITCODE_CPUID		0x0072
    177 #define VMCB_EXITCODE_RSM		0x0073
    178 #define VMCB_EXITCODE_IRET		0x0074
    179 #define VMCB_EXITCODE_SWINT		0x0075
    180 #define VMCB_EXITCODE_INVD		0x0076
    181 #define VMCB_EXITCODE_PAUSE		0x0077
    182 #define VMCB_EXITCODE_HLT		0x0078
    183 #define VMCB_EXITCODE_INVLPG		0x0079
    184 #define VMCB_EXITCODE_INVLPGA		0x007A
    185 #define VMCB_EXITCODE_IOIO		0x007B
    186 #define VMCB_EXITCODE_MSR		0x007C
    187 #define VMCB_EXITCODE_TASK_SWITCH	0x007D
    188 #define VMCB_EXITCODE_FERR_FREEZE	0x007E
    189 #define VMCB_EXITCODE_SHUTDOWN		0x007F
    190 #define VMCB_EXITCODE_VMRUN		0x0080
    191 #define VMCB_EXITCODE_VMMCALL		0x0081
    192 #define VMCB_EXITCODE_VMLOAD		0x0082
    193 #define VMCB_EXITCODE_VMSAVE		0x0083
    194 #define VMCB_EXITCODE_STGI		0x0084
    195 #define VMCB_EXITCODE_CLGI		0x0085
    196 #define VMCB_EXITCODE_SKINIT		0x0086
    197 #define VMCB_EXITCODE_RDTSCP		0x0087
    198 #define VMCB_EXITCODE_ICEBP		0x0088
    199 #define VMCB_EXITCODE_WBINVD		0x0089
    200 #define VMCB_EXITCODE_MONITOR		0x008A
    201 #define VMCB_EXITCODE_MWAIT		0x008B
    202 #define VMCB_EXITCODE_MWAIT_CONDITIONAL	0x008C
    203 #define VMCB_EXITCODE_XSETBV		0x008D
    204 #define VMCB_EXITCODE_EFER_WRITE_TRAP	0x008F
    205 #define VMCB_EXITCODE_CR0_WRITE_TRAP	0x0090
    206 #define VMCB_EXITCODE_CR1_WRITE_TRAP	0x0091
    207 #define VMCB_EXITCODE_CR2_WRITE_TRAP	0x0092
    208 #define VMCB_EXITCODE_CR3_WRITE_TRAP	0x0093
    209 #define VMCB_EXITCODE_CR4_WRITE_TRAP	0x0094
    210 #define VMCB_EXITCODE_CR5_WRITE_TRAP	0x0095
    211 #define VMCB_EXITCODE_CR6_WRITE_TRAP	0x0096
    212 #define VMCB_EXITCODE_CR7_WRITE_TRAP	0x0097
    213 #define VMCB_EXITCODE_CR8_WRITE_TRAP	0x0098
    214 #define VMCB_EXITCODE_CR9_WRITE_TRAP	0x0099
    215 #define VMCB_EXITCODE_CR10_WRITE_TRAP	0x009A
    216 #define VMCB_EXITCODE_CR11_WRITE_TRAP	0x009B
    217 #define VMCB_EXITCODE_CR12_WRITE_TRAP	0x009C
    218 #define VMCB_EXITCODE_CR13_WRITE_TRAP	0x009D
    219 #define VMCB_EXITCODE_CR14_WRITE_TRAP	0x009E
    220 #define VMCB_EXITCODE_CR15_WRITE_TRAP	0x009F
    221 #define VMCB_EXITCODE_NPF		0x0400
    222 #define VMCB_EXITCODE_AVIC_INCOMP_IPI	0x0401
    223 #define VMCB_EXITCODE_AVIC_NOACCEL	0x0402
    224 #define VMCB_EXITCODE_VMGEXIT		0x0403
    225 #define VMCB_EXITCODE_INVALID		-1
    226 
    227 /* -------------------------------------------------------------------------- */
    228 
    229 struct vmcb_ctrl {
    230 	uint32_t intercept_cr;
    231 #define VMCB_CTRL_INTERCEPT_RCR(x)	__BIT( 0 + x)
    232 #define VMCB_CTRL_INTERCEPT_WCR(x)	__BIT(16 + x)
    233 
    234 	uint32_t intercept_dr;
    235 #define VMCB_CTRL_INTERCEPT_RDR(x)	__BIT( 0 + x)
    236 #define VMCB_CTRL_INTERCEPT_WDR(x)	__BIT(16 + x)
    237 
    238 	uint32_t intercept_vec;
    239 #define VMCB_CTRL_INTERCEPT_VEC(x)	__BIT(x)
    240 
    241 	uint32_t intercept_misc1;
    242 #define VMCB_CTRL_INTERCEPT_INTR	__BIT(0)
    243 #define VMCB_CTRL_INTERCEPT_NMI		__BIT(1)
    244 #define VMCB_CTRL_INTERCEPT_SMI		__BIT(2)
    245 #define VMCB_CTRL_INTERCEPT_INIT	__BIT(3)
    246 #define VMCB_CTRL_INTERCEPT_VINTR	__BIT(4)
    247 #define VMCB_CTRL_INTERCEPT_CR0_SPEC	__BIT(5)
    248 #define VMCB_CTRL_INTERCEPT_RIDTR	__BIT(6)
    249 #define VMCB_CTRL_INTERCEPT_RGDTR	__BIT(7)
    250 #define VMCB_CTRL_INTERCEPT_RLDTR	__BIT(8)
    251 #define VMCB_CTRL_INTERCEPT_RTR		__BIT(9)
    252 #define VMCB_CTRL_INTERCEPT_WIDTR	__BIT(10)
    253 #define VMCB_CTRL_INTERCEPT_WGDTR	__BIT(11)
    254 #define VMCB_CTRL_INTERCEPT_WLDTR	__BIT(12)
    255 #define VMCB_CTRL_INTERCEPT_WTR		__BIT(13)
    256 #define VMCB_CTRL_INTERCEPT_RDTSC	__BIT(14)
    257 #define VMCB_CTRL_INTERCEPT_RDPMC	__BIT(15)
    258 #define VMCB_CTRL_INTERCEPT_PUSHF	__BIT(16)
    259 #define VMCB_CTRL_INTERCEPT_POPF	__BIT(17)
    260 #define VMCB_CTRL_INTERCEPT_CPUID	__BIT(18)
    261 #define VMCB_CTRL_INTERCEPT_RSM		__BIT(19)
    262 #define VMCB_CTRL_INTERCEPT_IRET	__BIT(20)
    263 #define VMCB_CTRL_INTERCEPT_INTN	__BIT(21)
    264 #define VMCB_CTRL_INTERCEPT_INVD	__BIT(22)
    265 #define VMCB_CTRL_INTERCEPT_PAUSE	__BIT(23)
    266 #define VMCB_CTRL_INTERCEPT_HLT		__BIT(24)
    267 #define VMCB_CTRL_INTERCEPT_INVLPG	__BIT(25)
    268 #define VMCB_CTRL_INTERCEPT_INVLPGA	__BIT(26)
    269 #define VMCB_CTRL_INTERCEPT_IOIO_PROT	__BIT(27)
    270 #define VMCB_CTRL_INTERCEPT_MSR_PROT	__BIT(28)
    271 #define VMCB_CTRL_INTERCEPT_TASKSW	__BIT(29)
    272 #define VMCB_CTRL_INTERCEPT_FERR_FREEZE	__BIT(30)
    273 #define VMCB_CTRL_INTERCEPT_SHUTDOWN	__BIT(31)
    274 
    275 	uint32_t intercept_misc2;
    276 #define VMCB_CTRL_INTERCEPT_VMRUN	__BIT(0)
    277 #define VMCB_CTRL_INTERCEPT_VMMCALL	__BIT(1)
    278 #define VMCB_CTRL_INTERCEPT_VMLOAD	__BIT(2)
    279 #define VMCB_CTRL_INTERCEPT_VMSAVE	__BIT(3)
    280 #define VMCB_CTRL_INTERCEPT_STGI	__BIT(4)
    281 #define VMCB_CTRL_INTERCEPT_CLGI	__BIT(5)
    282 #define VMCB_CTRL_INTERCEPT_SKINIT	__BIT(6)
    283 #define VMCB_CTRL_INTERCEPT_RDTSCP	__BIT(7)
    284 #define VMCB_CTRL_INTERCEPT_ICEBP	__BIT(8)
    285 #define VMCB_CTRL_INTERCEPT_WBINVD	__BIT(9)
    286 #define VMCB_CTRL_INTERCEPT_MONITOR	__BIT(10)
    287 #define VMCB_CTRL_INTERCEPT_MWAIT	__BIT(12)
    288 #define VMCB_CTRL_INTERCEPT_XSETBV	__BIT(13)
    289 #define VMCB_CTRL_INTERCEPT_EFER_SPEC	__BIT(15)
    290 #define VMCB_CTRL_INTERCEPT_WCR_SPEC(x)	__BIT(16 + x)
    291 
    292 	uint8_t  rsvd1[40];
    293 	uint16_t pause_filt_thresh;
    294 	uint16_t pause_filt_cnt;
    295 	uint64_t iopm_base_pa;
    296 	uint64_t msrpm_base_pa;
    297 	uint64_t tsc_offset;
    298 	uint32_t guest_asid;
    299 
    300 	uint32_t tlb_ctrl;
    301 #define VMCB_CTRL_TLB_CTRL_FLUSH_ALL			0x01
    302 #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST			0x03
    303 #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST_NONGLOBAL	0x07
    304 
    305 	uint64_t v;
    306 #define VMCB_CTRL_V_TPR			__BITS(7,0)
    307 #define VMCB_CTRL_V_IRQ			__BIT(8)
    308 #define VMCB_CTRL_V_VGIF		__BIT(9)
    309 #define VMCB_CTRL_V_INTR_PRIO		__BITS(19,16)
    310 #define VMCB_CTRL_V_IGN_TPR		__BIT(20)
    311 #define VMCB_CTRL_V_INTR_MASKING	__BIT(24)
    312 #define VMCB_CTRL_V_GUEST_VGIF		__BIT(25)
    313 #define VMCB_CTRL_V_AVIC_EN		__BIT(31)
    314 #define VMCB_CTRL_V_INTR_VECTOR		__BITS(39,32)
    315 
    316 	uint64_t intr;
    317 #define VMCB_CTRL_INTR_SHADOW		__BIT(0)
    318 
    319 	uint64_t exitcode;
    320 	uint64_t exitinfo1;
    321 	uint64_t exitinfo2;
    322 
    323 	uint64_t exitintinfo;
    324 #define VMCB_CTRL_EXITINTINFO_VECTOR	__BITS(7,0)
    325 #define VMCB_CTRL_EXITINTINFO_TYPE	__BITS(10,8)
    326 #define VMCB_CTRL_EXITINTINFO_EV	__BIT(11)
    327 #define VMCB_CTRL_EXITINTINFO_V		__BIT(31)
    328 #define VMCB_CTRL_EXITINTINFO_ERRORCODE	__BITS(63,32)
    329 
    330 	uint64_t enable1;
    331 #define VMCB_CTRL_ENABLE_NP		__BIT(0)
    332 #define VMCB_CTRL_ENABLE_SEV		__BIT(1)
    333 #define VMCB_CTRL_ENABLE_ES_SEV		__BIT(2)
    334 
    335 	uint64_t avic;
    336 #define VMCB_CTRL_AVIC_APIC_BAR		__BITS(51,0)
    337 
    338 	uint64_t ghcb;
    339 
    340 	uint64_t eventinj;
    341 #define VMCB_CTRL_EVENTINJ_VECTOR	__BITS(7,0)
    342 #define VMCB_CTRL_EVENTINJ_TYPE		__BITS(10,8)
    343 #define VMCB_CTRL_EVENTINJ_EV		__BIT(11)
    344 #define VMCB_CTRL_EVENTINJ_V		__BIT(31)
    345 #define VMCB_CTRL_EVENTINJ_ERRORCODE	__BITS(63,32)
    346 
    347 	uint64_t n_cr3;
    348 
    349 	uint64_t enable2;
    350 #define VMCB_CTRL_ENABLE_LBR		__BIT(0)
    351 #define VMCB_CTRL_ENABLE_VVMSAVE	__BIT(1)
    352 
    353 	uint32_t vmcb_clean;
    354 #define VMCB_CTRL_VMCB_CLEAN_I		__BIT(0)
    355 #define VMCB_CTRL_VMCB_CLEAN_IOPM	__BIT(1)
    356 #define VMCB_CTRL_VMCB_CLEAN_ASID	__BIT(2)
    357 #define VMCB_CTRL_VMCB_CLEAN_TPR	__BIT(3)
    358 #define VMCB_CTRL_VMCB_CLEAN_NP		__BIT(4)
    359 #define VMCB_CTRL_VMCB_CLEAN_CR		__BIT(5)
    360 #define VMCB_CTRL_VMCB_CLEAN_DR		__BIT(6)
    361 #define VMCB_CTRL_VMCB_CLEAN_DT		__BIT(7)
    362 #define VMCB_CTRL_VMCB_CLEAN_SEG	__BIT(8)
    363 #define VMCB_CTRL_VMCB_CLEAN_CR2	__BIT(9)
    364 #define VMCB_CTRL_VMCB_CLEAN_LBR	__BIT(10)
    365 #define VMCB_CTRL_VMCB_CLEAN_AVIC	__BIT(11)
    366 
    367 	uint32_t rsvd2;
    368 	uint64_t nrip;
    369 	uint8_t	inst_len;
    370 	uint8_t	inst_bytes[15];
    371 	uint64_t avic_abpp;
    372 	uint64_t rsvd3;
    373 	uint64_t avic_ltp;
    374 
    375 	uint64_t avic_phys;
    376 #define VMCB_CTRL_AVIC_PHYS_TABLE_PTR	__BITS(51,12)
    377 #define VMCB_CTRL_AVIC_PHYS_MAX_INDEX	__BITS(7,0)
    378 
    379 	uint64_t rsvd4;
    380 	uint64_t vmcb_ptr;
    381 
    382 	uint8_t	pad[752];
    383 } __packed;
    384 
    385 CTASSERT(sizeof(struct vmcb_ctrl) == 1024);
    386 
    387 struct vmcb_segment {
    388 	uint16_t selector;
    389 	uint16_t attrib;	/* hidden */
    390 	uint32_t limit;		/* hidden */
    391 	uint64_t base;		/* hidden */
    392 } __packed;
    393 
    394 CTASSERT(sizeof(struct vmcb_segment) == 16);
    395 
    396 struct vmcb_state {
    397 	struct   vmcb_segment es;
    398 	struct   vmcb_segment cs;
    399 	struct   vmcb_segment ss;
    400 	struct   vmcb_segment ds;
    401 	struct   vmcb_segment fs;
    402 	struct   vmcb_segment gs;
    403 	struct   vmcb_segment gdt;
    404 	struct   vmcb_segment ldt;
    405 	struct   vmcb_segment idt;
    406 	struct   vmcb_segment tr;
    407 	uint8_t	 rsvd1[43];
    408 	uint8_t	 cpl;
    409 	uint8_t  rsvd2[4];
    410 	uint64_t efer;
    411 	uint8_t	 rsvd3[112];
    412 	uint64_t cr4;
    413 	uint64_t cr3;
    414 	uint64_t cr0;
    415 	uint64_t dr7;
    416 	uint64_t dr6;
    417 	uint64_t rflags;
    418 	uint64_t rip;
    419 	uint8_t	 rsvd4[88];
    420 	uint64_t rsp;
    421 	uint8_t	 rsvd5[24];
    422 	uint64_t rax;
    423 	uint64_t star;
    424 	uint64_t lstar;
    425 	uint64_t cstar;
    426 	uint64_t sfmask;
    427 	uint64_t kernelgsbase;
    428 	uint64_t sysenter_cs;
    429 	uint64_t sysenter_esp;
    430 	uint64_t sysenter_eip;
    431 	uint64_t cr2;
    432 	uint8_t	 rsvd6[32];
    433 	uint64_t g_pat;
    434 	uint64_t dbgctl;
    435 	uint64_t br_from;
    436 	uint64_t br_to;
    437 	uint64_t int_from;
    438 	uint64_t int_to;
    439 	uint8_t	 pad[2408];
    440 } __packed;
    441 
    442 CTASSERT(sizeof(struct vmcb_state) == 0xC00);
    443 
    444 struct vmcb {
    445 	struct vmcb_ctrl ctrl;
    446 	struct vmcb_state state;
    447 } __packed;
    448 
    449 CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
    450 CTASSERT(offsetof(struct vmcb, state) == 0x400);
    451 
    452 /* -------------------------------------------------------------------------- */
    453 
    454 struct svm_hsave {
    455 	paddr_t pa;
    456 };
    457 
    458 static struct svm_hsave hsave[MAXCPUS];
    459 
    460 static uint8_t *svm_asidmap __read_mostly;
    461 static uint32_t svm_maxasid __read_mostly;
    462 static kmutex_t svm_asidlock __cacheline_aligned;
    463 
    464 static bool svm_decode_assist __read_mostly;
    465 static uint32_t svm_ctrl_tlb_flush __read_mostly;
    466 
    467 #define SVM_XCR0_MASK_DEFAULT	(XCR0_X87|XCR0_SSE)
    468 static uint64_t svm_xcr0_mask __read_mostly;
    469 
    470 #define SVM_NCPUIDS	32
    471 
    472 #define VMCB_NPAGES	1
    473 
    474 #define MSRBM_NPAGES	2
    475 #define MSRBM_SIZE	(MSRBM_NPAGES * PAGE_SIZE)
    476 
    477 #define IOBM_NPAGES	3
    478 #define IOBM_SIZE	(IOBM_NPAGES * PAGE_SIZE)
    479 
    480 /* Does not include EFER_LMSLE. */
    481 #define EFER_VALID \
    482 	(EFER_SCE|EFER_LME|EFER_LMA|EFER_NXE|EFER_SVME|EFER_FFXSR|EFER_TCE)
    483 
    484 #define EFER_TLB_FLUSH \
    485 	(EFER_NXE|EFER_LMA|EFER_LME)
    486 #define CR0_TLB_FLUSH \
    487 	(CR0_PG|CR0_WP|CR0_CD|CR0_NW)
    488 #define CR4_TLB_FLUSH \
    489 	(CR4_PGE|CR4_PAE|CR4_PSE)
    490 
    491 /* -------------------------------------------------------------------------- */
    492 
    493 struct svm_machdata {
    494 	bool cpuidpresent[SVM_NCPUIDS];
    495 	struct nvmm_x86_conf_cpuid cpuid[SVM_NCPUIDS];
    496 	volatile uint64_t mach_htlb_gen;
    497 };
    498 
    499 static const size_t svm_conf_sizes[NVMM_X86_NCONF] = {
    500 	[NVMM_X86_CONF_CPUID] = sizeof(struct nvmm_x86_conf_cpuid)
    501 };
    502 
    503 struct svm_cpudata {
    504 	/* General */
    505 	bool shared_asid;
    506 	bool gtlb_want_flush;
    507 	uint64_t vcpu_htlb_gen;
    508 
    509 	/* VMCB */
    510 	struct vmcb *vmcb;
    511 	paddr_t vmcb_pa;
    512 
    513 	/* I/O bitmap */
    514 	uint8_t *iobm;
    515 	paddr_t iobm_pa;
    516 
    517 	/* MSR bitmap */
    518 	uint8_t *msrbm;
    519 	paddr_t msrbm_pa;
    520 
    521 	/* Host state */
    522 	uint64_t hxcr0;
    523 	uint64_t star;
    524 	uint64_t lstar;
    525 	uint64_t cstar;
    526 	uint64_t sfmask;
    527 	uint64_t fsbase;
    528 	uint64_t kernelgsbase;
    529 	bool ts_set;
    530 	struct xsave_header hfpu __aligned(64);
    531 
    532 	/* Event state */
    533 	bool int_window_exit;
    534 	bool nmi_window_exit;
    535 
    536 	/* Guest state */
    537 	uint64_t gxcr0;
    538 	uint64_t gprs[NVMM_X64_NGPR];
    539 	uint64_t drs[NVMM_X64_NDR];
    540 	uint64_t tsc_offset;
    541 	struct xsave_header gfpu __aligned(64);
    542 };
    543 
    544 static void
    545 svm_vmcb_cache_default(struct vmcb *vmcb)
    546 {
    547 	vmcb->ctrl.vmcb_clean =
    548 	    VMCB_CTRL_VMCB_CLEAN_I |
    549 	    VMCB_CTRL_VMCB_CLEAN_IOPM |
    550 	    VMCB_CTRL_VMCB_CLEAN_ASID |
    551 	    VMCB_CTRL_VMCB_CLEAN_TPR |
    552 	    VMCB_CTRL_VMCB_CLEAN_NP |
    553 	    VMCB_CTRL_VMCB_CLEAN_CR |
    554 	    VMCB_CTRL_VMCB_CLEAN_DR |
    555 	    VMCB_CTRL_VMCB_CLEAN_DT |
    556 	    VMCB_CTRL_VMCB_CLEAN_SEG |
    557 	    VMCB_CTRL_VMCB_CLEAN_CR2 |
    558 	    VMCB_CTRL_VMCB_CLEAN_LBR |
    559 	    VMCB_CTRL_VMCB_CLEAN_AVIC;
    560 }
    561 
    562 static void
    563 svm_vmcb_cache_update(struct vmcb *vmcb, uint64_t flags)
    564 {
    565 	if (flags & NVMM_X64_STATE_SEGS) {
    566 		vmcb->ctrl.vmcb_clean &=
    567 		    ~(VMCB_CTRL_VMCB_CLEAN_SEG | VMCB_CTRL_VMCB_CLEAN_DT);
    568 	}
    569 	if (flags & NVMM_X64_STATE_CRS) {
    570 		vmcb->ctrl.vmcb_clean &=
    571 		    ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_CR2 |
    572 		      VMCB_CTRL_VMCB_CLEAN_TPR);
    573 	}
    574 	if (flags & NVMM_X64_STATE_DRS) {
    575 		vmcb->ctrl.vmcb_clean &= ~VMCB_CTRL_VMCB_CLEAN_DR;
    576 	}
    577 	if (flags & NVMM_X64_STATE_MSRS) {
    578 		/* CR for EFER, NP for PAT. */
    579 		vmcb->ctrl.vmcb_clean &=
    580 		    ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_NP);
    581 	}
    582 }
    583 
    584 static inline void
    585 svm_vmcb_cache_flush(struct vmcb *vmcb, uint64_t flags)
    586 {
    587 	vmcb->ctrl.vmcb_clean &= ~flags;
    588 }
    589 
    590 static inline void
    591 svm_vmcb_cache_flush_all(struct vmcb *vmcb)
    592 {
    593 	vmcb->ctrl.vmcb_clean = 0;
    594 }
    595 
    596 #define SVM_EVENT_TYPE_HW_INT	0
    597 #define SVM_EVENT_TYPE_NMI	2
    598 #define SVM_EVENT_TYPE_EXC	3
    599 #define SVM_EVENT_TYPE_SW_INT	4
    600 
    601 static void
    602 svm_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
    603 {
    604 	struct svm_cpudata *cpudata = vcpu->cpudata;
    605 	struct vmcb *vmcb = cpudata->vmcb;
    606 
    607 	if (nmi) {
    608 		vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_IRET;
    609 		cpudata->nmi_window_exit = true;
    610 	} else {
    611 		vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_VINTR;
    612 		vmcb->ctrl.v |= (VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR);
    613 		svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR);
    614 		cpudata->int_window_exit = true;
    615 	}
    616 
    617 	svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
    618 }
    619 
    620 static void
    621 svm_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
    622 {
    623 	struct svm_cpudata *cpudata = vcpu->cpudata;
    624 	struct vmcb *vmcb = cpudata->vmcb;
    625 
    626 	if (nmi) {
    627 		vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_IRET;
    628 		cpudata->nmi_window_exit = false;
    629 	} else {
    630 		vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_VINTR;
    631 		vmcb->ctrl.v &= ~(VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR);
    632 		svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR);
    633 		cpudata->int_window_exit = false;
    634 	}
    635 
    636 	svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
    637 }
    638 
    639 static inline int
    640 svm_event_has_error(uint64_t vector)
    641 {
    642 	switch (vector) {
    643 	case 8:		/* #DF */
    644 	case 10:	/* #TS */
    645 	case 11:	/* #NP */
    646 	case 12:	/* #SS */
    647 	case 13:	/* #GP */
    648 	case 14:	/* #PF */
    649 	case 17:	/* #AC */
    650 	case 30:	/* #SX */
    651 		return 1;
    652 	default:
    653 		return 0;
    654 	}
    655 }
    656 
    657 static int
    658 svm_vcpu_inject(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    659     struct nvmm_event *event)
    660 {
    661 	struct svm_cpudata *cpudata = vcpu->cpudata;
    662 	struct vmcb *vmcb = cpudata->vmcb;
    663 	int type = 0, err = 0;
    664 
    665 	if (event->vector >= 256) {
    666 		return EINVAL;
    667 	}
    668 
    669 	switch (event->type) {
    670 	case NVMM_EVENT_INTERRUPT_HW:
    671 		type = SVM_EVENT_TYPE_HW_INT;
    672 		if (event->vector == 2) {
    673 			type = SVM_EVENT_TYPE_NMI;
    674 		}
    675 		if (type == SVM_EVENT_TYPE_NMI) {
    676 			if (cpudata->nmi_window_exit) {
    677 				return EAGAIN;
    678 			}
    679 			svm_event_waitexit_enable(vcpu, true);
    680 		} else {
    681 			if (((vmcb->state.rflags & PSL_I) == 0) ||
    682 			    ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0)) {
    683 				svm_event_waitexit_enable(vcpu, false);
    684 				return EAGAIN;
    685 			}
    686 		}
    687 		err = 0;
    688 		break;
    689 	case NVMM_EVENT_INTERRUPT_SW:
    690 		return EINVAL;
    691 	case NVMM_EVENT_EXCEPTION:
    692 		type = SVM_EVENT_TYPE_EXC;
    693 		if (event->vector == 2 || event->vector >= 32)
    694 			return EINVAL;
    695 		if (event->vector == 3 || event->vector == 0)
    696 			return EINVAL;
    697 		err = svm_event_has_error(event->vector);
    698 		break;
    699 	default:
    700 		return EINVAL;
    701 	}
    702 
    703 	vmcb->ctrl.eventinj =
    704 	    __SHIFTIN(event->vector, VMCB_CTRL_EVENTINJ_VECTOR) |
    705 	    __SHIFTIN(type, VMCB_CTRL_EVENTINJ_TYPE) |
    706 	    __SHIFTIN(err, VMCB_CTRL_EVENTINJ_EV) |
    707 	    __SHIFTIN(1, VMCB_CTRL_EVENTINJ_V) |
    708 	    __SHIFTIN(event->u.error, VMCB_CTRL_EVENTINJ_ERRORCODE);
    709 
    710 	return 0;
    711 }
    712 
    713 static void
    714 svm_inject_ud(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
    715 {
    716 	struct nvmm_event event;
    717 	int ret __diagused;
    718 
    719 	event.type = NVMM_EVENT_EXCEPTION;
    720 	event.vector = 6;
    721 	event.u.error = 0;
    722 
    723 	ret = svm_vcpu_inject(mach, vcpu, &event);
    724 	KASSERT(ret == 0);
    725 }
    726 
    727 static void
    728 svm_inject_gp(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
    729 {
    730 	struct nvmm_event event;
    731 	int ret __diagused;
    732 
    733 	event.type = NVMM_EVENT_EXCEPTION;
    734 	event.vector = 13;
    735 	event.u.error = 0;
    736 
    737 	ret = svm_vcpu_inject(mach, vcpu, &event);
    738 	KASSERT(ret == 0);
    739 }
    740 
    741 static inline void
    742 svm_inkernel_advance(struct vmcb *vmcb)
    743 {
    744 	/*
    745 	 * Maybe we should also apply single-stepping and debug exceptions.
    746 	 * Matters for guest-ring3, because it can execute 'cpuid' under a
    747 	 * debugger.
    748 	 */
    749 	vmcb->state.rip = vmcb->ctrl.nrip;
    750 	vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW;
    751 }
    752 
    753 static void
    754 svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx)
    755 {
    756 	struct svm_cpudata *cpudata = vcpu->cpudata;
    757 	uint64_t cr4;
    758 
    759 	switch (eax) {
    760 	case 0x00000001:
    761 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
    762 		cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
    763 		    CPUID_LOCAL_APIC_ID);
    764 
    765 		/* CPUID2_OSXSAVE depends on CR4. */
    766 		cr4 = cpudata->vmcb->state.cr4;
    767 		if (!(cr4 & CR4_OSXSAVE)) {
    768 			cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE;
    769 		}
    770 		break;
    771 	case 0x0000000D:
    772 		if (svm_xcr0_mask == 0) {
    773 			break;
    774 		}
    775 		switch (ecx) {
    776 		case 0:
    777 			cpudata->vmcb->state.rax = svm_xcr0_mask & 0xFFFFFFFF;
    778 			if (cpudata->gxcr0 & XCR0_SSE) {
    779 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
    780 			} else {
    781 				cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
    782 			}
    783 			cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
    784 			cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave);
    785 			cpudata->gprs[NVMM_X64_GPR_RDX] = svm_xcr0_mask >> 32;
    786 			break;
    787 		case 1:
    788 			cpudata->vmcb->state.rax &= ~CPUID_PES1_XSAVES;
    789 			break;
    790 		}
    791 		break;
    792 	case 0x40000000:
    793 		cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
    794 		cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
    795 		cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
    796 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
    797 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
    798 		memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
    799 		break;
    800 	case 0x80000001:
    801 		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID_SVM;
    802 		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~CPUID_RDTSCP;
    803 		break;
    804 	default:
    805 		break;
    806 	}
    807 }
    808 
    809 static void
    810 svm_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    811     struct nvmm_exit *exit)
    812 {
    813 	struct svm_machdata *machdata = mach->machdata;
    814 	struct svm_cpudata *cpudata = vcpu->cpudata;
    815 	struct nvmm_x86_conf_cpuid *cpuid;
    816 	uint64_t eax, ecx;
    817 	u_int descs[4];
    818 	size_t i;
    819 
    820 	eax = cpudata->vmcb->state.rax;
    821 	ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
    822 	x86_cpuid2(eax, ecx, descs);
    823 
    824 	cpudata->vmcb->state.rax = descs[0];
    825 	cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
    826 	cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
    827 	cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
    828 
    829 	for (i = 0; i < SVM_NCPUIDS; i++) {
    830 		cpuid = &machdata->cpuid[i];
    831 		if (!machdata->cpuidpresent[i]) {
    832 			continue;
    833 		}
    834 		if (cpuid->leaf != eax) {
    835 			continue;
    836 		}
    837 
    838 		/* del */
    839 		cpudata->vmcb->state.rax &= ~cpuid->del.eax;
    840 		cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->del.ebx;
    841 		cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->del.ecx;
    842 		cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->del.edx;
    843 
    844 		/* set */
    845 		cpudata->vmcb->state.rax |= cpuid->set.eax;
    846 		cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->set.ebx;
    847 		cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->set.ecx;
    848 		cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->set.edx;
    849 
    850 		break;
    851 	}
    852 
    853 	/* Overwrite non-tunable leaves. */
    854 	svm_inkernel_handle_cpuid(vcpu, eax, ecx);
    855 
    856 	svm_inkernel_advance(cpudata->vmcb);
    857 	exit->reason = NVMM_EXIT_NONE;
    858 }
    859 
    860 static void
    861 svm_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    862     struct nvmm_exit *exit)
    863 {
    864 	struct svm_cpudata *cpudata = vcpu->cpudata;
    865 	struct vmcb *vmcb = cpudata->vmcb;
    866 
    867 	if (cpudata->int_window_exit && (vmcb->state.rflags & PSL_I)) {
    868 		svm_event_waitexit_disable(vcpu, false);
    869 	}
    870 
    871 	svm_inkernel_advance(cpudata->vmcb);
    872 	exit->reason = NVMM_EXIT_HALTED;
    873 }
    874 
    875 #define SVM_EXIT_IO_PORT	__BITS(31,16)
    876 #define SVM_EXIT_IO_SEG		__BITS(12,10)
    877 #define SVM_EXIT_IO_A64		__BIT(9)
    878 #define SVM_EXIT_IO_A32		__BIT(8)
    879 #define SVM_EXIT_IO_A16		__BIT(7)
    880 #define SVM_EXIT_IO_SZ32	__BIT(6)
    881 #define SVM_EXIT_IO_SZ16	__BIT(5)
    882 #define SVM_EXIT_IO_SZ8		__BIT(4)
    883 #define SVM_EXIT_IO_REP		__BIT(3)
    884 #define SVM_EXIT_IO_STR		__BIT(2)
    885 #define SVM_EXIT_IO_IN		__BIT(0)
    886 
    887 static const int seg_to_nvmm[] = {
    888 	[0] = NVMM_X64_SEG_ES,
    889 	[1] = NVMM_X64_SEG_CS,
    890 	[2] = NVMM_X64_SEG_SS,
    891 	[3] = NVMM_X64_SEG_DS,
    892 	[4] = NVMM_X64_SEG_FS,
    893 	[5] = NVMM_X64_SEG_GS
    894 };
    895 
    896 static void
    897 svm_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    898     struct nvmm_exit *exit)
    899 {
    900 	struct svm_cpudata *cpudata = vcpu->cpudata;
    901 	uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
    902 	uint64_t nextpc = cpudata->vmcb->ctrl.exitinfo2;
    903 
    904 	exit->reason = NVMM_EXIT_IO;
    905 
    906 	if (info & SVM_EXIT_IO_IN) {
    907 		exit->u.io.type = NVMM_EXIT_IO_IN;
    908 	} else {
    909 		exit->u.io.type = NVMM_EXIT_IO_OUT;
    910 	}
    911 
    912 	exit->u.io.port = __SHIFTOUT(info, SVM_EXIT_IO_PORT);
    913 
    914 	if (svm_decode_assist) {
    915 		KASSERT(__SHIFTOUT(info, SVM_EXIT_IO_SEG) < 6);
    916 		exit->u.io.seg = seg_to_nvmm[__SHIFTOUT(info, SVM_EXIT_IO_SEG)];
    917 	} else {
    918 		exit->u.io.seg = -1;
    919 	}
    920 
    921 	if (info & SVM_EXIT_IO_A64) {
    922 		exit->u.io.address_size = 8;
    923 	} else if (info & SVM_EXIT_IO_A32) {
    924 		exit->u.io.address_size = 4;
    925 	} else if (info & SVM_EXIT_IO_A16) {
    926 		exit->u.io.address_size = 2;
    927 	}
    928 
    929 	if (info & SVM_EXIT_IO_SZ32) {
    930 		exit->u.io.operand_size = 4;
    931 	} else if (info & SVM_EXIT_IO_SZ16) {
    932 		exit->u.io.operand_size = 2;
    933 	} else if (info & SVM_EXIT_IO_SZ8) {
    934 		exit->u.io.operand_size = 1;
    935 	}
    936 
    937 	exit->u.io.rep = (info & SVM_EXIT_IO_REP) != 0;
    938 	exit->u.io.str = (info & SVM_EXIT_IO_STR) != 0;
    939 	exit->u.io.npc = nextpc;
    940 }
    941 
    942 static const uint64_t msr_ignore_list[] = {
    943 	0xc0010055, /* MSR_CMPHALT */
    944 	MSR_DE_CFG,
    945 	MSR_IC_CFG,
    946 	MSR_UCODE_AMD_PATCHLEVEL
    947 };
    948 
    949 static bool
    950 svm_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
    951     struct nvmm_exit *exit)
    952 {
    953 	struct svm_cpudata *cpudata = vcpu->cpudata;
    954 	struct vmcb *vmcb = cpudata->vmcb;
    955 	uint64_t val;
    956 	size_t i;
    957 
    958 	switch (exit->u.msr.type) {
    959 	case NVMM_EXIT_MSR_RDMSR:
    960 		if (exit->u.msr.msr == MSR_NB_CFG) {
    961 			val = NB_CFG_INITAPICCPUIDLO;
    962 			vmcb->state.rax = (val & 0xFFFFFFFF);
    963 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
    964 			goto handled;
    965 		}
    966 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
    967 			if (msr_ignore_list[i] != exit->u.msr.msr)
    968 				continue;
    969 			val = 0;
    970 			vmcb->state.rax = (val & 0xFFFFFFFF);
    971 			cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
    972 			goto handled;
    973 		}
    974 		break;
    975 	case NVMM_EXIT_MSR_WRMSR:
    976 		if (exit->u.msr.msr == MSR_EFER) {
    977 			if (__predict_false(exit->u.msr.val & ~EFER_VALID)) {
    978 				goto error;
    979 			}
    980 			if ((vmcb->state.efer ^ exit->u.msr.val) &
    981 			     EFER_TLB_FLUSH) {
    982 				cpudata->gtlb_want_flush = true;
    983 			}
    984 			vmcb->state.efer = exit->u.msr.val | EFER_SVME;
    985 			svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_CR);
    986 			goto handled;
    987 		}
    988 		if (exit->u.msr.msr == MSR_TSC) {
    989 			cpudata->tsc_offset = exit->u.msr.val - cpu_counter();
    990 			vmcb->ctrl.tsc_offset = cpudata->tsc_offset +
    991 			    curcpu()->ci_data.cpu_cc_skew;
    992 			svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
    993 			goto handled;
    994 		}
    995 		for (i = 0; i < __arraycount(msr_ignore_list); i++) {
    996 			if (msr_ignore_list[i] != exit->u.msr.msr)
    997 				continue;
    998 			goto handled;
    999 		}
   1000 		break;
   1001 	}
   1002 
   1003 	return false;
   1004 
   1005 handled:
   1006 	svm_inkernel_advance(cpudata->vmcb);
   1007 	return true;
   1008 
   1009 error:
   1010 	svm_inject_gp(mach, vcpu);
   1011 	return true;
   1012 }
   1013 
   1014 static void
   1015 svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1016     struct nvmm_exit *exit)
   1017 {
   1018 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1019 	uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
   1020 
   1021 	if (info == 0) {
   1022 		exit->u.msr.type = NVMM_EXIT_MSR_RDMSR;
   1023 	} else {
   1024 		exit->u.msr.type = NVMM_EXIT_MSR_WRMSR;
   1025 	}
   1026 
   1027 	exit->u.msr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
   1028 
   1029 	if (info == 1) {
   1030 		uint64_t rdx, rax;
   1031 		rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
   1032 		rax = cpudata->vmcb->state.rax;
   1033 		exit->u.msr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
   1034 	} else {
   1035 		exit->u.msr.val = 0;
   1036 	}
   1037 
   1038 	if (svm_inkernel_handle_msr(mach, vcpu, exit)) {
   1039 		exit->reason = NVMM_EXIT_NONE;
   1040 		return;
   1041 	}
   1042 
   1043 	exit->reason = NVMM_EXIT_MSR;
   1044 	exit->u.msr.npc = cpudata->vmcb->ctrl.nrip;
   1045 }
   1046 
   1047 static void
   1048 svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1049     struct nvmm_exit *exit)
   1050 {
   1051 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1052 	gpaddr_t gpa = cpudata->vmcb->ctrl.exitinfo2;
   1053 
   1054 	exit->reason = NVMM_EXIT_MEMORY;
   1055 	if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_W)
   1056 		exit->u.mem.perm = NVMM_EXIT_MEMORY_WRITE;
   1057 	else if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_X)
   1058 		exit->u.mem.perm = NVMM_EXIT_MEMORY_EXEC;
   1059 	else
   1060 		exit->u.mem.perm = NVMM_EXIT_MEMORY_READ;
   1061 	exit->u.mem.gpa = gpa;
   1062 	exit->u.mem.inst_len = cpudata->vmcb->ctrl.inst_len;
   1063 	memcpy(exit->u.mem.inst_bytes, cpudata->vmcb->ctrl.inst_bytes,
   1064 	    sizeof(exit->u.mem.inst_bytes));
   1065 }
   1066 
   1067 static void
   1068 svm_exit_insn(struct vmcb *vmcb, struct nvmm_exit *exit, uint64_t reason)
   1069 {
   1070 	exit->u.insn.npc = vmcb->ctrl.nrip;
   1071 	exit->reason = reason;
   1072 }
   1073 
   1074 static void
   1075 svm_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1076     struct nvmm_exit *exit)
   1077 {
   1078 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1079 	struct vmcb *vmcb = cpudata->vmcb;
   1080 	uint64_t val;
   1081 
   1082 	exit->reason = NVMM_EXIT_NONE;
   1083 
   1084 	val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
   1085 	    (vmcb->state.rax & 0xFFFFFFFF);
   1086 
   1087 	if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
   1088 		goto error;
   1089 	} else if (__predict_false(vmcb->state.cpl != 0)) {
   1090 		goto error;
   1091 	} else if (__predict_false((val & ~svm_xcr0_mask) != 0)) {
   1092 		goto error;
   1093 	} else if (__predict_false((val & XCR0_X87) == 0)) {
   1094 		goto error;
   1095 	}
   1096 
   1097 	cpudata->gxcr0 = val;
   1098 
   1099 	svm_inkernel_advance(cpudata->vmcb);
   1100 	return;
   1101 
   1102 error:
   1103 	svm_inject_gp(mach, vcpu);
   1104 }
   1105 
   1106 /* -------------------------------------------------------------------------- */
   1107 
   1108 static void
   1109 svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
   1110 {
   1111 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1112 
   1113 	cpudata->ts_set = (rcr0() & CR0_TS) != 0;
   1114 
   1115 	fpu_area_save(&cpudata->hfpu, svm_xcr0_mask);
   1116 	fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask);
   1117 
   1118 	if (svm_xcr0_mask != 0) {
   1119 		cpudata->hxcr0 = rdxcr(0);
   1120 		wrxcr(0, cpudata->gxcr0);
   1121 	}
   1122 }
   1123 
   1124 static void
   1125 svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
   1126 {
   1127 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1128 
   1129 	if (svm_xcr0_mask != 0) {
   1130 		cpudata->gxcr0 = rdxcr(0);
   1131 		wrxcr(0, cpudata->hxcr0);
   1132 	}
   1133 
   1134 	fpu_area_save(&cpudata->gfpu, svm_xcr0_mask);
   1135 	fpu_area_restore(&cpudata->hfpu, svm_xcr0_mask);
   1136 
   1137 	if (cpudata->ts_set) {
   1138 		stts();
   1139 	}
   1140 }
   1141 
   1142 static void
   1143 svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
   1144 {
   1145 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1146 
   1147 	x86_dbregs_save(curlwp);
   1148 
   1149 	ldr7(0);
   1150 
   1151 	ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
   1152 	ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
   1153 	ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
   1154 	ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
   1155 }
   1156 
   1157 static void
   1158 svm_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
   1159 {
   1160 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1161 
   1162 	cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
   1163 	cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
   1164 	cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
   1165 	cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
   1166 
   1167 	x86_dbregs_restore(curlwp);
   1168 }
   1169 
   1170 static void
   1171 svm_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
   1172 {
   1173 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1174 
   1175 	cpudata->fsbase = rdmsr(MSR_FSBASE);
   1176 	cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
   1177 }
   1178 
   1179 static void
   1180 svm_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
   1181 {
   1182 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1183 
   1184 	wrmsr(MSR_STAR, cpudata->star);
   1185 	wrmsr(MSR_LSTAR, cpudata->lstar);
   1186 	wrmsr(MSR_CSTAR, cpudata->cstar);
   1187 	wrmsr(MSR_SFMASK, cpudata->sfmask);
   1188 	wrmsr(MSR_FSBASE, cpudata->fsbase);
   1189 	wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
   1190 }
   1191 
   1192 /* -------------------------------------------------------------------------- */
   1193 
   1194 static inline void
   1195 svm_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1196 {
   1197 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1198 
   1199 	if (vcpu->hcpu_last != hcpu || cpudata->shared_asid) {
   1200 		cpudata->gtlb_want_flush = true;
   1201 	}
   1202 }
   1203 
   1204 static inline void
   1205 svm_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
   1206 {
   1207 	/*
   1208 	 * Nothing to do. If an hTLB flush was needed, either the VCPU was
   1209 	 * executing on this hCPU and the hTLB already got flushed, or it
   1210 	 * was executing on another hCPU in which case the catchup is done
   1211 	 * in svm_gtlb_catchup().
   1212 	 */
   1213 }
   1214 
   1215 static inline uint64_t
   1216 svm_htlb_flush(struct svm_machdata *machdata, struct svm_cpudata *cpudata)
   1217 {
   1218 	struct vmcb *vmcb = cpudata->vmcb;
   1219 	uint64_t machgen;
   1220 
   1221 	machgen = machdata->mach_htlb_gen;
   1222 	if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
   1223 		return machgen;
   1224 	}
   1225 
   1226 	vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush;
   1227 	return machgen;
   1228 }
   1229 
   1230 static inline void
   1231 svm_htlb_flush_ack(struct svm_cpudata *cpudata, uint64_t machgen)
   1232 {
   1233 	struct vmcb *vmcb = cpudata->vmcb;
   1234 
   1235 	if (__predict_true(vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID)) {
   1236 		cpudata->vcpu_htlb_gen = machgen;
   1237 	}
   1238 }
   1239 
   1240 static int
   1241 svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
   1242     struct nvmm_exit *exit)
   1243 {
   1244 	struct svm_machdata *machdata = mach->machdata;
   1245 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1246 	struct vmcb *vmcb = cpudata->vmcb;
   1247 	uint64_t machgen;
   1248 	int hcpu, s;
   1249 
   1250 	kpreempt_disable();
   1251 	hcpu = cpu_number();
   1252 
   1253 	svm_gtlb_catchup(vcpu, hcpu);
   1254 	svm_htlb_catchup(vcpu, hcpu);
   1255 
   1256 	if (vcpu->hcpu_last != hcpu) {
   1257 		vmcb->ctrl.tsc_offset = cpudata->tsc_offset +
   1258 		    curcpu()->ci_data.cpu_cc_skew;
   1259 		svm_vmcb_cache_flush_all(vmcb);
   1260 	}
   1261 
   1262 	svm_vcpu_guest_dbregs_enter(vcpu);
   1263 	svm_vcpu_guest_misc_enter(vcpu);
   1264 
   1265 	while (1) {
   1266 		if (cpudata->gtlb_want_flush) {
   1267 			vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush;
   1268 		} else {
   1269 			vmcb->ctrl.tlb_ctrl = 0;
   1270 		}
   1271 
   1272 		s = splhigh();
   1273 		machgen = svm_htlb_flush(machdata, cpudata);
   1274 		svm_vcpu_guest_fpu_enter(vcpu);
   1275 		svm_vmrun(cpudata->vmcb_pa, cpudata->gprs);
   1276 		svm_vcpu_guest_fpu_leave(vcpu);
   1277 		svm_htlb_flush_ack(cpudata, machgen);
   1278 		splx(s);
   1279 
   1280 		svm_vmcb_cache_default(vmcb);
   1281 
   1282 		if (vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID) {
   1283 			cpudata->gtlb_want_flush = false;
   1284 			vcpu->hcpu_last = hcpu;
   1285 		}
   1286 
   1287 		switch (vmcb->ctrl.exitcode) {
   1288 		case VMCB_EXITCODE_INTR:
   1289 		case VMCB_EXITCODE_NMI:
   1290 			exit->reason = NVMM_EXIT_NONE;
   1291 			break;
   1292 		case VMCB_EXITCODE_VINTR:
   1293 			svm_event_waitexit_disable(vcpu, false);
   1294 			exit->reason = NVMM_EXIT_INT_READY;
   1295 			break;
   1296 		case VMCB_EXITCODE_IRET:
   1297 			svm_event_waitexit_disable(vcpu, true);
   1298 			exit->reason = NVMM_EXIT_NMI_READY;
   1299 			break;
   1300 		case VMCB_EXITCODE_CPUID:
   1301 			svm_exit_cpuid(mach, vcpu, exit);
   1302 			break;
   1303 		case VMCB_EXITCODE_HLT:
   1304 			svm_exit_hlt(mach, vcpu, exit);
   1305 			break;
   1306 		case VMCB_EXITCODE_IOIO:
   1307 			svm_exit_io(mach, vcpu, exit);
   1308 			break;
   1309 		case VMCB_EXITCODE_MSR:
   1310 			svm_exit_msr(mach, vcpu, exit);
   1311 			break;
   1312 		case VMCB_EXITCODE_SHUTDOWN:
   1313 			exit->reason = NVMM_EXIT_SHUTDOWN;
   1314 			break;
   1315 		case VMCB_EXITCODE_RDPMC:
   1316 		case VMCB_EXITCODE_RSM:
   1317 		case VMCB_EXITCODE_INVLPGA:
   1318 		case VMCB_EXITCODE_VMRUN:
   1319 		case VMCB_EXITCODE_VMMCALL:
   1320 		case VMCB_EXITCODE_VMLOAD:
   1321 		case VMCB_EXITCODE_VMSAVE:
   1322 		case VMCB_EXITCODE_STGI:
   1323 		case VMCB_EXITCODE_CLGI:
   1324 		case VMCB_EXITCODE_SKINIT:
   1325 		case VMCB_EXITCODE_RDTSCP:
   1326 			svm_inject_ud(mach, vcpu);
   1327 			exit->reason = NVMM_EXIT_NONE;
   1328 			break;
   1329 		case VMCB_EXITCODE_MONITOR:
   1330 			svm_exit_insn(vmcb, exit, NVMM_EXIT_MONITOR);
   1331 			break;
   1332 		case VMCB_EXITCODE_MWAIT:
   1333 			svm_exit_insn(vmcb, exit, NVMM_EXIT_MWAIT);
   1334 			break;
   1335 		case VMCB_EXITCODE_MWAIT_CONDITIONAL:
   1336 			svm_exit_insn(vmcb, exit, NVMM_EXIT_MWAIT_COND);
   1337 			break;
   1338 		case VMCB_EXITCODE_XSETBV:
   1339 			svm_exit_xsetbv(mach, vcpu, exit);
   1340 			break;
   1341 		case VMCB_EXITCODE_NPF:
   1342 			svm_exit_npf(mach, vcpu, exit);
   1343 			break;
   1344 		case VMCB_EXITCODE_FERR_FREEZE: /* ? */
   1345 		default:
   1346 			exit->reason = NVMM_EXIT_INVALID;
   1347 			break;
   1348 		}
   1349 
   1350 		/* If no reason to return to userland, keep rolling. */
   1351 		if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
   1352 			break;
   1353 		}
   1354 		if (curcpu()->ci_data.cpu_softints != 0) {
   1355 			break;
   1356 		}
   1357 		if (curlwp->l_flag & LW_USERRET) {
   1358 			break;
   1359 		}
   1360 		if (exit->reason != NVMM_EXIT_NONE) {
   1361 			break;
   1362 		}
   1363 	}
   1364 
   1365 	svm_vcpu_guest_misc_leave(vcpu);
   1366 	svm_vcpu_guest_dbregs_leave(vcpu);
   1367 
   1368 	kpreempt_enable();
   1369 
   1370 	exit->exitstate[NVMM_X64_EXITSTATE_CR8] = __SHIFTOUT(vmcb->ctrl.v,
   1371 	    VMCB_CTRL_V_TPR);
   1372 	exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS] = vmcb->state.rflags;
   1373 
   1374 	exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] =
   1375 	    ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0);
   1376 	exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] =
   1377 	    cpudata->int_window_exit;
   1378 	exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT] =
   1379 	    cpudata->nmi_window_exit;
   1380 
   1381 	return 0;
   1382 }
   1383 
   1384 /* -------------------------------------------------------------------------- */
   1385 
   1386 static int
   1387 svm_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
   1388 {
   1389 	struct pglist pglist;
   1390 	paddr_t _pa;
   1391 	vaddr_t _va;
   1392 	size_t i;
   1393 	int ret;
   1394 
   1395 	ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
   1396 	    &pglist, 1, 0);
   1397 	if (ret != 0)
   1398 		return ENOMEM;
   1399 	_pa = TAILQ_FIRST(&pglist)->phys_addr;
   1400 	_va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
   1401 	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
   1402 	if (_va == 0)
   1403 		goto error;
   1404 
   1405 	for (i = 0; i < npages; i++) {
   1406 		pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
   1407 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
   1408 	}
   1409 	pmap_update(pmap_kernel());
   1410 
   1411 	memset((void *)_va, 0, npages * PAGE_SIZE);
   1412 
   1413 	*pa = _pa;
   1414 	*va = _va;
   1415 	return 0;
   1416 
   1417 error:
   1418 	for (i = 0; i < npages; i++) {
   1419 		uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
   1420 	}
   1421 	return ENOMEM;
   1422 }
   1423 
   1424 static void
   1425 svm_memfree(paddr_t pa, vaddr_t va, size_t npages)
   1426 {
   1427 	size_t i;
   1428 
   1429 	pmap_kremove(va, npages * PAGE_SIZE);
   1430 	pmap_update(pmap_kernel());
   1431 	uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
   1432 	for (i = 0; i < npages; i++) {
   1433 		uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
   1434 	}
   1435 }
   1436 
   1437 /* -------------------------------------------------------------------------- */
   1438 
   1439 #define SVM_MSRBM_READ	__BIT(0)
   1440 #define SVM_MSRBM_WRITE	__BIT(1)
   1441 
   1442 static void
   1443 svm_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
   1444 {
   1445 	uint64_t byte;
   1446 	uint8_t bitoff;
   1447 
   1448 	if (msr < 0x00002000) {
   1449 		/* Range 1 */
   1450 		byte = ((msr - 0x00000000) >> 2UL) + 0x0000;
   1451 	} else if (msr >= 0xC0000000 && msr < 0xC0002000) {
   1452 		/* Range 2 */
   1453 		byte = ((msr - 0xC0000000) >> 2UL) + 0x0800;
   1454 	} else if (msr >= 0xC0010000 && msr < 0xC0012000) {
   1455 		/* Range 3 */
   1456 		byte = ((msr - 0xC0010000) >> 2UL) + 0x1000;
   1457 	} else {
   1458 		panic("%s: wrong range", __func__);
   1459 	}
   1460 
   1461 	bitoff = (msr & 0x3) << 1;
   1462 
   1463 	if (read) {
   1464 		bitmap[byte] &= ~(SVM_MSRBM_READ << bitoff);
   1465 	}
   1466 	if (write) {
   1467 		bitmap[byte] &= ~(SVM_MSRBM_WRITE << bitoff);
   1468 	}
   1469 }
   1470 
   1471 
   1472 
   1473 #define SVM_SEG_ATTRIB_TYPE		__BITS(4,0)
   1474 #define SVM_SEG_ATTRIB_DPL		__BITS(6,5)
   1475 #define SVM_SEG_ATTRIB_P		__BIT(7)
   1476 #define SVM_SEG_ATTRIB_AVL		__BIT(8)
   1477 #define SVM_SEG_ATTRIB_LONG		__BIT(9)
   1478 #define SVM_SEG_ATTRIB_DEF32		__BIT(10)
   1479 #define SVM_SEG_ATTRIB_GRAN		__BIT(11)
   1480 
   1481 static void
   1482 svm_vcpu_setstate_seg(const struct nvmm_x64_state_seg *seg,
   1483     struct vmcb_segment *vseg)
   1484 {
   1485 	vseg->selector = seg->selector;
   1486 	vseg->attrib =
   1487 	    __SHIFTIN(seg->attrib.type, SVM_SEG_ATTRIB_TYPE) |
   1488 	    __SHIFTIN(seg->attrib.dpl, SVM_SEG_ATTRIB_DPL) |
   1489 	    __SHIFTIN(seg->attrib.p, SVM_SEG_ATTRIB_P) |
   1490 	    __SHIFTIN(seg->attrib.avl, SVM_SEG_ATTRIB_AVL) |
   1491 	    __SHIFTIN(seg->attrib.lng, SVM_SEG_ATTRIB_LONG) |
   1492 	    __SHIFTIN(seg->attrib.def32, SVM_SEG_ATTRIB_DEF32) |
   1493 	    __SHIFTIN(seg->attrib.gran, SVM_SEG_ATTRIB_GRAN);
   1494 	vseg->limit = seg->limit;
   1495 	vseg->base = seg->base;
   1496 }
   1497 
   1498 static void
   1499 svm_vcpu_getstate_seg(struct nvmm_x64_state_seg *seg, struct vmcb_segment *vseg)
   1500 {
   1501 	seg->selector = vseg->selector;
   1502 	seg->attrib.type = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_TYPE);
   1503 	seg->attrib.dpl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DPL);
   1504 	seg->attrib.p = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_P);
   1505 	seg->attrib.avl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_AVL);
   1506 	seg->attrib.lng = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_LONG);
   1507 	seg->attrib.def32 = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DEF32);
   1508 	seg->attrib.gran = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_GRAN);
   1509 	seg->limit = vseg->limit;
   1510 	seg->base = vseg->base;
   1511 }
   1512 
   1513 static inline bool
   1514 svm_state_tlb_flush(const struct vmcb *vmcb, const struct nvmm_x64_state *state,
   1515     uint64_t flags)
   1516 {
   1517 	if (flags & NVMM_X64_STATE_CRS) {
   1518 		if ((vmcb->state.cr0 ^
   1519 		     state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
   1520 			return true;
   1521 		}
   1522 		if (vmcb->state.cr3 != state->crs[NVMM_X64_CR_CR3]) {
   1523 			return true;
   1524 		}
   1525 		if ((vmcb->state.cr4 ^
   1526 		     state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
   1527 			return true;
   1528 		}
   1529 	}
   1530 
   1531 	if (flags & NVMM_X64_STATE_MSRS) {
   1532 		if ((vmcb->state.efer ^
   1533 		     state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
   1534 			return true;
   1535 		}
   1536 	}
   1537 
   1538 	return false;
   1539 }
   1540 
   1541 static void
   1542 svm_vcpu_setstate(struct nvmm_cpu *vcpu, const void *data, uint64_t flags)
   1543 {
   1544 	const struct nvmm_x64_state *state = data;
   1545 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1546 	struct vmcb *vmcb = cpudata->vmcb;
   1547 	struct fxsave *fpustate;
   1548 
   1549 	if (svm_state_tlb_flush(vmcb, state, flags)) {
   1550 		cpudata->gtlb_want_flush = true;
   1551 	}
   1552 
   1553 	if (flags & NVMM_X64_STATE_SEGS) {
   1554 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_CS],
   1555 		    &vmcb->state.cs);
   1556 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_DS],
   1557 		    &vmcb->state.ds);
   1558 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_ES],
   1559 		    &vmcb->state.es);
   1560 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_FS],
   1561 		    &vmcb->state.fs);
   1562 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GS],
   1563 		    &vmcb->state.gs);
   1564 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_SS],
   1565 		    &vmcb->state.ss);
   1566 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GDT],
   1567 		    &vmcb->state.gdt);
   1568 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_IDT],
   1569 		    &vmcb->state.idt);
   1570 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_LDT],
   1571 		    &vmcb->state.ldt);
   1572 		svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_TR],
   1573 		    &vmcb->state.tr);
   1574 
   1575 		vmcb->state.cpl = state->segs[NVMM_X64_SEG_SS].attrib.dpl;
   1576 	}
   1577 
   1578 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   1579 	if (flags & NVMM_X64_STATE_GPRS) {
   1580 		memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
   1581 
   1582 		vmcb->state.rip = state->gprs[NVMM_X64_GPR_RIP];
   1583 		vmcb->state.rsp = state->gprs[NVMM_X64_GPR_RSP];
   1584 		vmcb->state.rax = state->gprs[NVMM_X64_GPR_RAX];
   1585 		vmcb->state.rflags = state->gprs[NVMM_X64_GPR_RFLAGS];
   1586 	}
   1587 
   1588 	if (flags & NVMM_X64_STATE_CRS) {
   1589 		vmcb->state.cr0 = state->crs[NVMM_X64_CR_CR0];
   1590 		vmcb->state.cr2 = state->crs[NVMM_X64_CR_CR2];
   1591 		vmcb->state.cr3 = state->crs[NVMM_X64_CR_CR3];
   1592 		vmcb->state.cr4 = state->crs[NVMM_X64_CR_CR4];
   1593 
   1594 		vmcb->ctrl.v &= ~VMCB_CTRL_V_TPR;
   1595 		vmcb->ctrl.v |= __SHIFTIN(state->crs[NVMM_X64_CR_CR8],
   1596 		    VMCB_CTRL_V_TPR);
   1597 
   1598 		if (svm_xcr0_mask != 0) {
   1599 			/* Clear illegal XCR0 bits, set mandatory X87 bit. */
   1600 			cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
   1601 			cpudata->gxcr0 &= svm_xcr0_mask;
   1602 			cpudata->gxcr0 |= XCR0_X87;
   1603 		}
   1604 	}
   1605 
   1606 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   1607 	if (flags & NVMM_X64_STATE_DRS) {
   1608 		memcpy(cpudata->drs, state->drs, sizeof(state->drs));
   1609 
   1610 		vmcb->state.dr6 = state->drs[NVMM_X64_DR_DR6];
   1611 		vmcb->state.dr7 = state->drs[NVMM_X64_DR_DR7];
   1612 	}
   1613 
   1614 	if (flags & NVMM_X64_STATE_MSRS) {
   1615 		/*
   1616 		 * EFER_SVME is mandatory.
   1617 		 */
   1618 		vmcb->state.efer = state->msrs[NVMM_X64_MSR_EFER] | EFER_SVME;
   1619 		vmcb->state.star = state->msrs[NVMM_X64_MSR_STAR];
   1620 		vmcb->state.lstar = state->msrs[NVMM_X64_MSR_LSTAR];
   1621 		vmcb->state.cstar = state->msrs[NVMM_X64_MSR_CSTAR];
   1622 		vmcb->state.sfmask = state->msrs[NVMM_X64_MSR_SFMASK];
   1623 		vmcb->state.kernelgsbase =
   1624 		    state->msrs[NVMM_X64_MSR_KERNELGSBASE];
   1625 		vmcb->state.sysenter_cs =
   1626 		    state->msrs[NVMM_X64_MSR_SYSENTER_CS];
   1627 		vmcb->state.sysenter_esp =
   1628 		    state->msrs[NVMM_X64_MSR_SYSENTER_ESP];
   1629 		vmcb->state.sysenter_eip =
   1630 		    state->msrs[NVMM_X64_MSR_SYSENTER_EIP];
   1631 		vmcb->state.g_pat = state->msrs[NVMM_X64_MSR_PAT];
   1632 	}
   1633 
   1634 	if (flags & NVMM_X64_STATE_MISC) {
   1635 		if (state->misc[NVMM_X64_MISC_INT_SHADOW]) {
   1636 			vmcb->ctrl.intr |= VMCB_CTRL_INTR_SHADOW;
   1637 		} else {
   1638 			vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW;
   1639 		}
   1640 
   1641 		if (state->misc[NVMM_X64_MISC_INT_WINDOW_EXIT]) {
   1642 			svm_event_waitexit_enable(vcpu, false);
   1643 		} else {
   1644 			svm_event_waitexit_disable(vcpu, false);
   1645 		}
   1646 
   1647 		if (state->misc[NVMM_X64_MISC_NMI_WINDOW_EXIT]) {
   1648 			svm_event_waitexit_enable(vcpu, true);
   1649 		} else {
   1650 			svm_event_waitexit_disable(vcpu, true);
   1651 		}
   1652 	}
   1653 
   1654 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   1655 	if (flags & NVMM_X64_STATE_FPU) {
   1656 		memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
   1657 		    sizeof(state->fpu));
   1658 
   1659 		fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
   1660 		fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
   1661 		fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
   1662 
   1663 		if (svm_xcr0_mask != 0) {
   1664 			/* Reset XSTATE_BV, to force a reload. */
   1665 			cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
   1666 		}
   1667 	}
   1668 
   1669 	svm_vmcb_cache_update(vmcb, flags);
   1670 }
   1671 
   1672 static void
   1673 svm_vcpu_getstate(struct nvmm_cpu *vcpu, void *data, uint64_t flags)
   1674 {
   1675 	struct nvmm_x64_state *state = (struct nvmm_x64_state *)data;
   1676 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1677 	struct vmcb *vmcb = cpudata->vmcb;
   1678 
   1679 	if (flags & NVMM_X64_STATE_SEGS) {
   1680 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_CS],
   1681 		    &vmcb->state.cs);
   1682 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_DS],
   1683 		    &vmcb->state.ds);
   1684 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_ES],
   1685 		    &vmcb->state.es);
   1686 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_FS],
   1687 		    &vmcb->state.fs);
   1688 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GS],
   1689 		    &vmcb->state.gs);
   1690 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_SS],
   1691 		    &vmcb->state.ss);
   1692 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GDT],
   1693 		    &vmcb->state.gdt);
   1694 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_IDT],
   1695 		    &vmcb->state.idt);
   1696 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_LDT],
   1697 		    &vmcb->state.ldt);
   1698 		svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_TR],
   1699 		    &vmcb->state.tr);
   1700 
   1701 		state->segs[NVMM_X64_SEG_SS].attrib.dpl = vmcb->state.cpl;
   1702 	}
   1703 
   1704 	CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
   1705 	if (flags & NVMM_X64_STATE_GPRS) {
   1706 		memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
   1707 
   1708 		state->gprs[NVMM_X64_GPR_RIP] = vmcb->state.rip;
   1709 		state->gprs[NVMM_X64_GPR_RSP] = vmcb->state.rsp;
   1710 		state->gprs[NVMM_X64_GPR_RAX] = vmcb->state.rax;
   1711 		state->gprs[NVMM_X64_GPR_RFLAGS] = vmcb->state.rflags;
   1712 	}
   1713 
   1714 	if (flags & NVMM_X64_STATE_CRS) {
   1715 		state->crs[NVMM_X64_CR_CR0] = vmcb->state.cr0;
   1716 		state->crs[NVMM_X64_CR_CR2] = vmcb->state.cr2;
   1717 		state->crs[NVMM_X64_CR_CR3] = vmcb->state.cr3;
   1718 		state->crs[NVMM_X64_CR_CR4] = vmcb->state.cr4;
   1719 		state->crs[NVMM_X64_CR_CR8] = __SHIFTOUT(vmcb->ctrl.v,
   1720 		    VMCB_CTRL_V_TPR);
   1721 		state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
   1722 	}
   1723 
   1724 	CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
   1725 	if (flags & NVMM_X64_STATE_DRS) {
   1726 		memcpy(state->drs, cpudata->drs, sizeof(state->drs));
   1727 
   1728 		state->drs[NVMM_X64_DR_DR6] = vmcb->state.dr6;
   1729 		state->drs[NVMM_X64_DR_DR7] = vmcb->state.dr7;
   1730 	}
   1731 
   1732 	if (flags & NVMM_X64_STATE_MSRS) {
   1733 		state->msrs[NVMM_X64_MSR_EFER] = vmcb->state.efer;
   1734 		state->msrs[NVMM_X64_MSR_STAR] = vmcb->state.star;
   1735 		state->msrs[NVMM_X64_MSR_LSTAR] = vmcb->state.lstar;
   1736 		state->msrs[NVMM_X64_MSR_CSTAR] = vmcb->state.cstar;
   1737 		state->msrs[NVMM_X64_MSR_SFMASK] = vmcb->state.sfmask;
   1738 		state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
   1739 		    vmcb->state.kernelgsbase;
   1740 		state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
   1741 		    vmcb->state.sysenter_cs;
   1742 		state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
   1743 		    vmcb->state.sysenter_esp;
   1744 		state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
   1745 		    vmcb->state.sysenter_eip;
   1746 		state->msrs[NVMM_X64_MSR_PAT] = vmcb->state.g_pat;
   1747 
   1748 		/* Hide SVME. */
   1749 		state->msrs[NVMM_X64_MSR_EFER] &= ~EFER_SVME;
   1750 	}
   1751 
   1752 	if (flags & NVMM_X64_STATE_MISC) {
   1753 		state->misc[NVMM_X64_MISC_INT_SHADOW] =
   1754 		    (vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0;
   1755 		state->misc[NVMM_X64_MISC_INT_WINDOW_EXIT] =
   1756 		    cpudata->int_window_exit;
   1757 		state->misc[NVMM_X64_MISC_NMI_WINDOW_EXIT] =
   1758 		    cpudata->nmi_window_exit;
   1759 	}
   1760 
   1761 	CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
   1762 	if (flags & NVMM_X64_STATE_FPU) {
   1763 		memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
   1764 		    sizeof(state->fpu));
   1765 	}
   1766 }
   1767 
   1768 /* -------------------------------------------------------------------------- */
   1769 
   1770 static void
   1771 svm_asid_alloc(struct nvmm_cpu *vcpu)
   1772 {
   1773 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1774 	struct vmcb *vmcb = cpudata->vmcb;
   1775 	size_t i, oct, bit;
   1776 
   1777 	mutex_enter(&svm_asidlock);
   1778 
   1779 	for (i = 0; i < svm_maxasid; i++) {
   1780 		oct = i / 8;
   1781 		bit = i % 8;
   1782 
   1783 		if (svm_asidmap[oct] & __BIT(bit)) {
   1784 			continue;
   1785 		}
   1786 
   1787 		svm_asidmap[oct] |= __BIT(bit);
   1788 		vmcb->ctrl.guest_asid = i;
   1789 		mutex_exit(&svm_asidlock);
   1790 		return;
   1791 	}
   1792 
   1793 	/*
   1794 	 * No free ASID. Use the last one, which is shared and requires
   1795 	 * special TLB handling.
   1796 	 */
   1797 	cpudata->shared_asid = true;
   1798 	vmcb->ctrl.guest_asid = svm_maxasid - 1;
   1799 	mutex_exit(&svm_asidlock);
   1800 }
   1801 
   1802 static void
   1803 svm_asid_free(struct nvmm_cpu *vcpu)
   1804 {
   1805 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1806 	struct vmcb *vmcb = cpudata->vmcb;
   1807 	size_t oct, bit;
   1808 
   1809 	if (cpudata->shared_asid) {
   1810 		return;
   1811 	}
   1812 
   1813 	oct = vmcb->ctrl.guest_asid / 8;
   1814 	bit = vmcb->ctrl.guest_asid % 8;
   1815 
   1816 	mutex_enter(&svm_asidlock);
   1817 	svm_asidmap[oct] &= ~__BIT(bit);
   1818 	mutex_exit(&svm_asidlock);
   1819 }
   1820 
   1821 static void
   1822 svm_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   1823 {
   1824 	struct svm_cpudata *cpudata = vcpu->cpudata;
   1825 	struct vmcb *vmcb = cpudata->vmcb;
   1826 
   1827 	/* Allow reads/writes of Control Registers. */
   1828 	vmcb->ctrl.intercept_cr = 0;
   1829 
   1830 	/* Allow reads/writes of Debug Registers. */
   1831 	vmcb->ctrl.intercept_dr = 0;
   1832 
   1833 	/* Allow exceptions 0 to 31. */
   1834 	vmcb->ctrl.intercept_vec = 0;
   1835 
   1836 	/*
   1837 	 * Allow:
   1838 	 *  - SMI [smm interrupts]
   1839 	 *  - VINTR [virtual interrupts]
   1840 	 *  - CR0_SPEC [CR0 writes changing other fields than CR0.TS or CR0.MP]
   1841 	 *  - RIDTR [reads of IDTR]
   1842 	 *  - RGDTR [reads of GDTR]
   1843 	 *  - RLDTR [reads of LDTR]
   1844 	 *  - RTR [reads of TR]
   1845 	 *  - WIDTR [writes of IDTR]
   1846 	 *  - WGDTR [writes of GDTR]
   1847 	 *  - WLDTR [writes of LDTR]
   1848 	 *  - WTR [writes of TR]
   1849 	 *  - RDTSC [rdtsc instruction]
   1850 	 *  - PUSHF [pushf instruction]
   1851 	 *  - POPF [popf instruction]
   1852 	 *  - IRET [iret instruction]
   1853 	 *  - INTN [int $n instructions]
   1854 	 *  - INVD [invd instruction]
   1855 	 *  - PAUSE [pause instruction]
   1856 	 *  - INVLPG [invplg instruction]
   1857 	 *  - TASKSW [task switches]
   1858 	 *
   1859 	 * Intercept the rest below.
   1860 	 */
   1861 	vmcb->ctrl.intercept_misc1 =
   1862 	    VMCB_CTRL_INTERCEPT_INTR |
   1863 	    VMCB_CTRL_INTERCEPT_NMI |
   1864 	    VMCB_CTRL_INTERCEPT_INIT |
   1865 	    VMCB_CTRL_INTERCEPT_RDPMC |
   1866 	    VMCB_CTRL_INTERCEPT_CPUID |
   1867 	    VMCB_CTRL_INTERCEPT_RSM |
   1868 	    VMCB_CTRL_INTERCEPT_HLT |
   1869 	    VMCB_CTRL_INTERCEPT_INVLPGA |
   1870 	    VMCB_CTRL_INTERCEPT_IOIO_PROT |
   1871 	    VMCB_CTRL_INTERCEPT_MSR_PROT |
   1872 	    VMCB_CTRL_INTERCEPT_FERR_FREEZE |
   1873 	    VMCB_CTRL_INTERCEPT_SHUTDOWN;
   1874 
   1875 	/*
   1876 	 * Allow:
   1877 	 *  - ICEBP [icebp instruction]
   1878 	 *  - WBINVD [wbinvd instruction]
   1879 	 *  - WCR_SPEC(0..15) [writes of CR0-15, received after instruction]
   1880 	 *
   1881 	 * Intercept the rest below.
   1882 	 */
   1883 	vmcb->ctrl.intercept_misc2 =
   1884 	    VMCB_CTRL_INTERCEPT_VMRUN |
   1885 	    VMCB_CTRL_INTERCEPT_VMMCALL |
   1886 	    VMCB_CTRL_INTERCEPT_VMLOAD |
   1887 	    VMCB_CTRL_INTERCEPT_VMSAVE |
   1888 	    VMCB_CTRL_INTERCEPT_STGI |
   1889 	    VMCB_CTRL_INTERCEPT_CLGI |
   1890 	    VMCB_CTRL_INTERCEPT_SKINIT |
   1891 	    VMCB_CTRL_INTERCEPT_RDTSCP |
   1892 	    VMCB_CTRL_INTERCEPT_MONITOR |
   1893 	    VMCB_CTRL_INTERCEPT_MWAIT |
   1894 	    VMCB_CTRL_INTERCEPT_XSETBV;
   1895 
   1896 	/* Intercept all I/O accesses. */
   1897 	memset(cpudata->iobm, 0xFF, IOBM_SIZE);
   1898 	vmcb->ctrl.iopm_base_pa = cpudata->iobm_pa;
   1899 
   1900 	/* Allow direct access to certain MSRs. */
   1901 	memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
   1902 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, false);
   1903 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
   1904 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
   1905 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
   1906 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
   1907 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
   1908 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
   1909 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
   1910 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
   1911 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
   1912 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
   1913 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_CR_PAT, true, true);
   1914 	svm_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
   1915 	vmcb->ctrl.msrpm_base_pa = cpudata->msrbm_pa;
   1916 
   1917 	/* Generate ASID. */
   1918 	svm_asid_alloc(vcpu);
   1919 
   1920 	/* Virtual TPR. */
   1921 	vmcb->ctrl.v = VMCB_CTRL_V_INTR_MASKING;
   1922 
   1923 	/* Enable Nested Paging. */
   1924 	vmcb->ctrl.enable1 = VMCB_CTRL_ENABLE_NP;
   1925 	vmcb->ctrl.n_cr3 = mach->vm->vm_map.pmap->pm_pdirpa[0];
   1926 
   1927 	/* Init XSAVE header. */
   1928 	cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
   1929 	cpudata->gfpu.xsh_xcomp_bv = 0;
   1930 
   1931 	/* Set guest TSC to zero, more or less. */
   1932 	cpudata->tsc_offset = -cpu_counter();
   1933 
   1934 	/* These MSRs are static. */
   1935 	cpudata->star = rdmsr(MSR_STAR);
   1936 	cpudata->lstar = rdmsr(MSR_LSTAR);
   1937 	cpudata->cstar = rdmsr(MSR_CSTAR);
   1938 	cpudata->sfmask = rdmsr(MSR_SFMASK);
   1939 
   1940 	/* Install the RESET state. */
   1941 	svm_vcpu_setstate(vcpu, &nvmm_x86_reset_state, NVMM_X64_STATE_ALL);
   1942 }
   1943 
   1944 static int
   1945 svm_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   1946 {
   1947 	struct svm_cpudata *cpudata;
   1948 	int error;
   1949 
   1950 	/* Allocate the SVM cpudata. */
   1951 	cpudata = (struct svm_cpudata *)uvm_km_alloc(kernel_map,
   1952 	    roundup(sizeof(*cpudata), PAGE_SIZE), 0,
   1953 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
   1954 	vcpu->cpudata = cpudata;
   1955 
   1956 	/* VMCB */
   1957 	error = svm_memalloc(&cpudata->vmcb_pa, (vaddr_t *)&cpudata->vmcb,
   1958 	    VMCB_NPAGES);
   1959 	if (error)
   1960 		goto error;
   1961 
   1962 	/* I/O Bitmap */
   1963 	error = svm_memalloc(&cpudata->iobm_pa, (vaddr_t *)&cpudata->iobm,
   1964 	    IOBM_NPAGES);
   1965 	if (error)
   1966 		goto error;
   1967 
   1968 	/* MSR Bitmap */
   1969 	error = svm_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
   1970 	    MSRBM_NPAGES);
   1971 	if (error)
   1972 		goto error;
   1973 
   1974 	/* Init the VCPU info. */
   1975 	svm_vcpu_init(mach, vcpu);
   1976 
   1977 	return 0;
   1978 
   1979 error:
   1980 	if (cpudata->vmcb_pa) {
   1981 		svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb,
   1982 		    VMCB_NPAGES);
   1983 	}
   1984 	if (cpudata->iobm_pa) {
   1985 		svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm,
   1986 		    IOBM_NPAGES);
   1987 	}
   1988 	if (cpudata->msrbm_pa) {
   1989 		svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
   1990 		    MSRBM_NPAGES);
   1991 	}
   1992 	uvm_km_free(kernel_map, (vaddr_t)cpudata,
   1993 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
   1994 	return error;
   1995 }
   1996 
   1997 static void
   1998 svm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
   1999 {
   2000 	struct svm_cpudata *cpudata = vcpu->cpudata;
   2001 
   2002 	svm_asid_free(vcpu);
   2003 
   2004 	svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, VMCB_NPAGES);
   2005 	svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, IOBM_NPAGES);
   2006 	svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
   2007 
   2008 	uvm_km_free(kernel_map, (vaddr_t)cpudata,
   2009 	    roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
   2010 }
   2011 
   2012 /* -------------------------------------------------------------------------- */
   2013 
   2014 static void
   2015 svm_tlb_flush(struct pmap *pm)
   2016 {
   2017 	struct nvmm_machine *mach = pm->pm_data;
   2018 	struct svm_machdata *machdata = mach->machdata;
   2019 
   2020 	atomic_inc_64(&machdata->mach_htlb_gen);
   2021 
   2022 	/* Generates IPIs, which cause #VMEXITs. */
   2023 	pmap_tlb_shootdown(pmap_kernel(), -1, PG_G, TLBSHOOT_UPDATE);
   2024 }
   2025 
   2026 static void
   2027 svm_machine_create(struct nvmm_machine *mach)
   2028 {
   2029 	struct svm_machdata *machdata;
   2030 
   2031 	/* Fill in pmap info. */
   2032 	mach->vm->vm_map.pmap->pm_data = (void *)mach;
   2033 	mach->vm->vm_map.pmap->pm_tlb_flush = svm_tlb_flush;
   2034 
   2035 	machdata = kmem_zalloc(sizeof(struct svm_machdata), KM_SLEEP);
   2036 	mach->machdata = machdata;
   2037 
   2038 	/* Start with an hTLB flush everywhere. */
   2039 	machdata->mach_htlb_gen = 1;
   2040 }
   2041 
   2042 static void
   2043 svm_machine_destroy(struct nvmm_machine *mach)
   2044 {
   2045 	kmem_free(mach->machdata, sizeof(struct svm_machdata));
   2046 }
   2047 
   2048 static int
   2049 svm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
   2050 {
   2051 	struct nvmm_x86_conf_cpuid *cpuid = data;
   2052 	struct svm_machdata *machdata = (struct svm_machdata *)mach->machdata;
   2053 	size_t i;
   2054 
   2055 	if (__predict_false(op != NVMM_X86_CONF_CPUID)) {
   2056 		return EINVAL;
   2057 	}
   2058 
   2059 	if (__predict_false((cpuid->set.eax & cpuid->del.eax) ||
   2060 	    (cpuid->set.ebx & cpuid->del.ebx) ||
   2061 	    (cpuid->set.ecx & cpuid->del.ecx) ||
   2062 	    (cpuid->set.edx & cpuid->del.edx))) {
   2063 		return EINVAL;
   2064 	}
   2065 
   2066 	/* If already here, replace. */
   2067 	for (i = 0; i < SVM_NCPUIDS; i++) {
   2068 		if (!machdata->cpuidpresent[i]) {
   2069 			continue;
   2070 		}
   2071 		if (machdata->cpuid[i].leaf == cpuid->leaf) {
   2072 			memcpy(&machdata->cpuid[i], cpuid,
   2073 			    sizeof(struct nvmm_x86_conf_cpuid));
   2074 			return 0;
   2075 		}
   2076 	}
   2077 
   2078 	/* Not here, insert. */
   2079 	for (i = 0; i < SVM_NCPUIDS; i++) {
   2080 		if (!machdata->cpuidpresent[i]) {
   2081 			machdata->cpuidpresent[i] = true;
   2082 			memcpy(&machdata->cpuid[i], cpuid,
   2083 			    sizeof(struct nvmm_x86_conf_cpuid));
   2084 			return 0;
   2085 		}
   2086 	}
   2087 
   2088 	return ENOBUFS;
   2089 }
   2090 
   2091 /* -------------------------------------------------------------------------- */
   2092 
   2093 static bool
   2094 svm_ident(void)
   2095 {
   2096 	u_int descs[4];
   2097 	uint64_t msr;
   2098 
   2099 	if (cpu_vendor != CPUVENDOR_AMD) {
   2100 		return false;
   2101 	}
   2102 	if (!(cpu_feature[3] & CPUID_SVM)) {
   2103 		return false;
   2104 	}
   2105 
   2106 	if (curcpu()->ci_max_ext_cpuid < 0x8000000a) {
   2107 		return false;
   2108 	}
   2109 	x86_cpuid(0x8000000a, descs);
   2110 
   2111 	/* Want Nested Paging. */
   2112 	if (!(descs[3] & CPUID_AMD_SVM_NP)) {
   2113 		return false;
   2114 	}
   2115 
   2116 	/* Want nRIP. */
   2117 	if (!(descs[3] & CPUID_AMD_SVM_NRIPS)) {
   2118 		return false;
   2119 	}
   2120 
   2121 	svm_decode_assist = (descs[3] & CPUID_AMD_SVM_DecodeAssist) != 0;
   2122 
   2123 	msr = rdmsr(MSR_VMCR);
   2124 	if ((msr & VMCR_SVMED) && (msr & VMCR_LOCK)) {
   2125 		return false;
   2126 	}
   2127 
   2128 	return true;
   2129 }
   2130 
   2131 static void
   2132 svm_init_asid(uint32_t maxasid)
   2133 {
   2134 	size_t i, j, allocsz;
   2135 
   2136 	mutex_init(&svm_asidlock, MUTEX_DEFAULT, IPL_NONE);
   2137 
   2138 	/* Arbitrarily limit. */
   2139 	maxasid = uimin(maxasid, 8192);
   2140 
   2141 	svm_maxasid = maxasid;
   2142 	allocsz = roundup(maxasid, 8) / 8;
   2143 	svm_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
   2144 
   2145 	/* ASID 0 is reserved for the host. */
   2146 	svm_asidmap[0] |= __BIT(0);
   2147 
   2148 	/* ASID n-1 is special, we share it. */
   2149 	i = (maxasid - 1) / 8;
   2150 	j = (maxasid - 1) % 8;
   2151 	svm_asidmap[i] |= __BIT(j);
   2152 }
   2153 
   2154 static void
   2155 svm_change_cpu(void *arg1, void *arg2)
   2156 {
   2157 	bool enable = (bool)arg1;
   2158 	uint64_t msr;
   2159 
   2160 	msr = rdmsr(MSR_VMCR);
   2161 	if (msr & VMCR_SVMED) {
   2162 		wrmsr(MSR_VMCR, msr & ~VMCR_SVMED);
   2163 	}
   2164 
   2165 	if (!enable) {
   2166 		wrmsr(MSR_VM_HSAVE_PA, 0);
   2167 	}
   2168 
   2169 	msr = rdmsr(MSR_EFER);
   2170 	if (enable) {
   2171 		msr |= EFER_SVME;
   2172 	} else {
   2173 		msr &= ~EFER_SVME;
   2174 	}
   2175 	wrmsr(MSR_EFER, msr);
   2176 
   2177 	if (enable) {
   2178 		wrmsr(MSR_VM_HSAVE_PA, hsave[cpu_index(curcpu())].pa);
   2179 	}
   2180 }
   2181 
   2182 static void
   2183 svm_init(void)
   2184 {
   2185 	CPU_INFO_ITERATOR cii;
   2186 	struct cpu_info *ci;
   2187 	struct vm_page *pg;
   2188 	u_int descs[4];
   2189 	uint64_t xc;
   2190 
   2191 	x86_cpuid(0x8000000a, descs);
   2192 
   2193 	/* The guest TLB flush command. */
   2194 	if (descs[3] & CPUID_AMD_SVM_FlushByASID) {
   2195 		svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_GUEST;
   2196 	} else {
   2197 		svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_ALL;
   2198 	}
   2199 
   2200 	/* Init the ASID. */
   2201 	svm_init_asid(descs[1]);
   2202 
   2203 	/* Init the XCR0 mask. */
   2204 	svm_xcr0_mask = SVM_XCR0_MASK_DEFAULT & x86_xsave_features;
   2205 
   2206 	memset(hsave, 0, sizeof(hsave));
   2207 	for (CPU_INFO_FOREACH(cii, ci)) {
   2208 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
   2209 		hsave[cpu_index(ci)].pa = VM_PAGE_TO_PHYS(pg);
   2210 	}
   2211 
   2212 	xc = xc_broadcast(0, svm_change_cpu, (void *)true, NULL);
   2213 	xc_wait(xc);
   2214 }
   2215 
   2216 static void
   2217 svm_fini_asid(void)
   2218 {
   2219 	size_t allocsz;
   2220 
   2221 	allocsz = roundup(svm_maxasid, 8) / 8;
   2222 	kmem_free(svm_asidmap, allocsz);
   2223 
   2224 	mutex_destroy(&svm_asidlock);
   2225 }
   2226 
   2227 static void
   2228 svm_fini(void)
   2229 {
   2230 	uint64_t xc;
   2231 	size_t i;
   2232 
   2233 	xc = xc_broadcast(0, svm_change_cpu, (void *)false, NULL);
   2234 	xc_wait(xc);
   2235 
   2236 	for (i = 0; i < MAXCPUS; i++) {
   2237 		if (hsave[i].pa != 0)
   2238 			uvm_pagefree(PHYS_TO_VM_PAGE(hsave[i].pa));
   2239 	}
   2240 
   2241 	svm_fini_asid();
   2242 }
   2243 
   2244 static void
   2245 svm_capability(struct nvmm_capability *cap)
   2246 {
   2247 	cap->u.x86.xcr0_mask = svm_xcr0_mask;
   2248 	cap->u.x86.mxcsr_mask = x86_fpu_mxcsr_mask;
   2249 	cap->u.x86.conf_cpuid_maxops = SVM_NCPUIDS;
   2250 }
   2251 
   2252 const struct nvmm_impl nvmm_x86_svm = {
   2253 	.ident = svm_ident,
   2254 	.init = svm_init,
   2255 	.fini = svm_fini,
   2256 	.capability = svm_capability,
   2257 	.conf_max = NVMM_X86_NCONF,
   2258 	.conf_sizes = svm_conf_sizes,
   2259 	.state_size = sizeof(struct nvmm_x64_state),
   2260 	.machine_create = svm_machine_create,
   2261 	.machine_destroy = svm_machine_destroy,
   2262 	.machine_configure = svm_machine_configure,
   2263 	.vcpu_create = svm_vcpu_create,
   2264 	.vcpu_destroy = svm_vcpu_destroy,
   2265 	.vcpu_setstate = svm_vcpu_setstate,
   2266 	.vcpu_getstate = svm_vcpu_getstate,
   2267 	.vcpu_inject = svm_vcpu_inject,
   2268 	.vcpu_run = svm_vcpu_run
   2269 };
   2270