1 1.90 skrll /* $NetBSD: nvmm_x86_svm.c,v 1.90 2025/08/15 11:36:44 skrll Exp $ */ 2 1.1 maxv 3 1.1 maxv /* 4 1.76 maxv * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net 5 1.1 maxv * All rights reserved. 6 1.1 maxv * 7 1.76 maxv * This code is part of the NVMM hypervisor. 8 1.1 maxv * 9 1.1 maxv * Redistribution and use in source and binary forms, with or without 10 1.1 maxv * modification, are permitted provided that the following conditions 11 1.1 maxv * are met: 12 1.1 maxv * 1. Redistributions of source code must retain the above copyright 13 1.1 maxv * notice, this list of conditions and the following disclaimer. 14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 maxv * notice, this list of conditions and the following disclaimer in the 16 1.1 maxv * documentation and/or other materials provided with the distribution. 17 1.1 maxv * 18 1.76 maxv * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 1.76 maxv * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 1.76 maxv * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 1.76 maxv * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 1.76 maxv * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 1.76 maxv * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 1.76 maxv * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 1.76 maxv * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 1.76 maxv * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 1.76 maxv * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 1.76 maxv * SUCH DAMAGE. 29 1.1 maxv */ 30 1.1 maxv 31 1.1 maxv #include <sys/cdefs.h> 32 1.90 skrll __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.90 2025/08/15 11:36:44 skrll Exp $"); 33 1.1 maxv 34 1.1 maxv #include <sys/param.h> 35 1.1 maxv #include <sys/systm.h> 36 1.1 maxv #include <sys/kernel.h> 37 1.1 maxv #include <sys/kmem.h> 38 1.1 maxv #include <sys/cpu.h> 39 1.1 maxv #include <sys/xcall.h> 40 1.35 maxv #include <sys/mman.h> 41 1.1 maxv 42 1.78 riastrad #include <uvm/uvm_extern.h> 43 1.79 riastrad #include <uvm/uvm_page.h> 44 1.1 maxv 45 1.87 imil #include <x86/apicvar.h> 46 1.1 maxv #include <x86/cputypes.h> 47 1.1 maxv #include <x86/specialreg.h> 48 1.1 maxv #include <x86/dbregs.h> 49 1.24 maxv #include <x86/cpu_counter.h> 50 1.79 riastrad 51 1.1 maxv #include <machine/cpuvar.h> 52 1.84 riastrad #include <machine/pmap_private.h> 53 1.1 maxv 54 1.1 maxv #include <dev/nvmm/nvmm.h> 55 1.1 maxv #include <dev/nvmm/nvmm_internal.h> 56 1.1 maxv #include <dev/nvmm/x86/nvmm_x86.h> 57 1.1 maxv 58 1.1 maxv int svm_vmrun(paddr_t, uint64_t *); 59 1.1 maxv 60 1.64 maxv static inline void 61 1.64 maxv svm_clgi(void) 62 1.64 maxv { 63 1.64 maxv asm volatile ("clgi" ::: "memory"); 64 1.64 maxv } 65 1.64 maxv 66 1.64 maxv static inline void 67 1.64 maxv svm_stgi(void) 68 1.64 maxv { 69 1.64 maxv asm volatile ("stgi" ::: "memory"); 70 1.64 maxv } 71 1.64 maxv 72 1.1 maxv #define MSR_VM_HSAVE_PA 0xC0010117 73 1.1 maxv 74 1.1 maxv /* -------------------------------------------------------------------------- */ 75 1.1 maxv 76 1.1 maxv #define VMCB_EXITCODE_CR0_READ 0x0000 77 1.1 maxv #define VMCB_EXITCODE_CR1_READ 0x0001 78 1.1 maxv #define VMCB_EXITCODE_CR2_READ 0x0002 79 1.1 maxv #define VMCB_EXITCODE_CR3_READ 0x0003 80 1.1 maxv #define VMCB_EXITCODE_CR4_READ 0x0004 81 1.1 maxv #define VMCB_EXITCODE_CR5_READ 0x0005 82 1.1 maxv #define VMCB_EXITCODE_CR6_READ 0x0006 83 1.1 maxv #define VMCB_EXITCODE_CR7_READ 0x0007 84 1.1 maxv #define VMCB_EXITCODE_CR8_READ 0x0008 85 1.1 maxv #define VMCB_EXITCODE_CR9_READ 0x0009 86 1.1 maxv #define VMCB_EXITCODE_CR10_READ 0x000A 87 1.1 maxv #define VMCB_EXITCODE_CR11_READ 0x000B 88 1.1 maxv #define VMCB_EXITCODE_CR12_READ 0x000C 89 1.1 maxv #define VMCB_EXITCODE_CR13_READ 0x000D 90 1.1 maxv #define VMCB_EXITCODE_CR14_READ 0x000E 91 1.1 maxv #define VMCB_EXITCODE_CR15_READ 0x000F 92 1.1 maxv #define VMCB_EXITCODE_CR0_WRITE 0x0010 93 1.1 maxv #define VMCB_EXITCODE_CR1_WRITE 0x0011 94 1.1 maxv #define VMCB_EXITCODE_CR2_WRITE 0x0012 95 1.1 maxv #define VMCB_EXITCODE_CR3_WRITE 0x0013 96 1.1 maxv #define VMCB_EXITCODE_CR4_WRITE 0x0014 97 1.1 maxv #define VMCB_EXITCODE_CR5_WRITE 0x0015 98 1.1 maxv #define VMCB_EXITCODE_CR6_WRITE 0x0016 99 1.1 maxv #define VMCB_EXITCODE_CR7_WRITE 0x0017 100 1.1 maxv #define VMCB_EXITCODE_CR8_WRITE 0x0018 101 1.1 maxv #define VMCB_EXITCODE_CR9_WRITE 0x0019 102 1.1 maxv #define VMCB_EXITCODE_CR10_WRITE 0x001A 103 1.1 maxv #define VMCB_EXITCODE_CR11_WRITE 0x001B 104 1.1 maxv #define VMCB_EXITCODE_CR12_WRITE 0x001C 105 1.1 maxv #define VMCB_EXITCODE_CR13_WRITE 0x001D 106 1.1 maxv #define VMCB_EXITCODE_CR14_WRITE 0x001E 107 1.1 maxv #define VMCB_EXITCODE_CR15_WRITE 0x001F 108 1.1 maxv #define VMCB_EXITCODE_DR0_READ 0x0020 109 1.1 maxv #define VMCB_EXITCODE_DR1_READ 0x0021 110 1.1 maxv #define VMCB_EXITCODE_DR2_READ 0x0022 111 1.1 maxv #define VMCB_EXITCODE_DR3_READ 0x0023 112 1.1 maxv #define VMCB_EXITCODE_DR4_READ 0x0024 113 1.1 maxv #define VMCB_EXITCODE_DR5_READ 0x0025 114 1.1 maxv #define VMCB_EXITCODE_DR6_READ 0x0026 115 1.1 maxv #define VMCB_EXITCODE_DR7_READ 0x0027 116 1.1 maxv #define VMCB_EXITCODE_DR8_READ 0x0028 117 1.1 maxv #define VMCB_EXITCODE_DR9_READ 0x0029 118 1.1 maxv #define VMCB_EXITCODE_DR10_READ 0x002A 119 1.1 maxv #define VMCB_EXITCODE_DR11_READ 0x002B 120 1.1 maxv #define VMCB_EXITCODE_DR12_READ 0x002C 121 1.1 maxv #define VMCB_EXITCODE_DR13_READ 0x002D 122 1.1 maxv #define VMCB_EXITCODE_DR14_READ 0x002E 123 1.1 maxv #define VMCB_EXITCODE_DR15_READ 0x002F 124 1.1 maxv #define VMCB_EXITCODE_DR0_WRITE 0x0030 125 1.1 maxv #define VMCB_EXITCODE_DR1_WRITE 0x0031 126 1.1 maxv #define VMCB_EXITCODE_DR2_WRITE 0x0032 127 1.1 maxv #define VMCB_EXITCODE_DR3_WRITE 0x0033 128 1.1 maxv #define VMCB_EXITCODE_DR4_WRITE 0x0034 129 1.1 maxv #define VMCB_EXITCODE_DR5_WRITE 0x0035 130 1.1 maxv #define VMCB_EXITCODE_DR6_WRITE 0x0036 131 1.1 maxv #define VMCB_EXITCODE_DR7_WRITE 0x0037 132 1.1 maxv #define VMCB_EXITCODE_DR8_WRITE 0x0038 133 1.1 maxv #define VMCB_EXITCODE_DR9_WRITE 0x0039 134 1.1 maxv #define VMCB_EXITCODE_DR10_WRITE 0x003A 135 1.1 maxv #define VMCB_EXITCODE_DR11_WRITE 0x003B 136 1.1 maxv #define VMCB_EXITCODE_DR12_WRITE 0x003C 137 1.1 maxv #define VMCB_EXITCODE_DR13_WRITE 0x003D 138 1.1 maxv #define VMCB_EXITCODE_DR14_WRITE 0x003E 139 1.1 maxv #define VMCB_EXITCODE_DR15_WRITE 0x003F 140 1.1 maxv #define VMCB_EXITCODE_EXCP0 0x0040 141 1.1 maxv #define VMCB_EXITCODE_EXCP1 0x0041 142 1.1 maxv #define VMCB_EXITCODE_EXCP2 0x0042 143 1.1 maxv #define VMCB_EXITCODE_EXCP3 0x0043 144 1.1 maxv #define VMCB_EXITCODE_EXCP4 0x0044 145 1.1 maxv #define VMCB_EXITCODE_EXCP5 0x0045 146 1.1 maxv #define VMCB_EXITCODE_EXCP6 0x0046 147 1.1 maxv #define VMCB_EXITCODE_EXCP7 0x0047 148 1.1 maxv #define VMCB_EXITCODE_EXCP8 0x0048 149 1.1 maxv #define VMCB_EXITCODE_EXCP9 0x0049 150 1.1 maxv #define VMCB_EXITCODE_EXCP10 0x004A 151 1.1 maxv #define VMCB_EXITCODE_EXCP11 0x004B 152 1.1 maxv #define VMCB_EXITCODE_EXCP12 0x004C 153 1.1 maxv #define VMCB_EXITCODE_EXCP13 0x004D 154 1.1 maxv #define VMCB_EXITCODE_EXCP14 0x004E 155 1.1 maxv #define VMCB_EXITCODE_EXCP15 0x004F 156 1.1 maxv #define VMCB_EXITCODE_EXCP16 0x0050 157 1.1 maxv #define VMCB_EXITCODE_EXCP17 0x0051 158 1.1 maxv #define VMCB_EXITCODE_EXCP18 0x0052 159 1.1 maxv #define VMCB_EXITCODE_EXCP19 0x0053 160 1.1 maxv #define VMCB_EXITCODE_EXCP20 0x0054 161 1.1 maxv #define VMCB_EXITCODE_EXCP21 0x0055 162 1.1 maxv #define VMCB_EXITCODE_EXCP22 0x0056 163 1.1 maxv #define VMCB_EXITCODE_EXCP23 0x0057 164 1.1 maxv #define VMCB_EXITCODE_EXCP24 0x0058 165 1.1 maxv #define VMCB_EXITCODE_EXCP25 0x0059 166 1.1 maxv #define VMCB_EXITCODE_EXCP26 0x005A 167 1.1 maxv #define VMCB_EXITCODE_EXCP27 0x005B 168 1.1 maxv #define VMCB_EXITCODE_EXCP28 0x005C 169 1.1 maxv #define VMCB_EXITCODE_EXCP29 0x005D 170 1.1 maxv #define VMCB_EXITCODE_EXCP30 0x005E 171 1.1 maxv #define VMCB_EXITCODE_EXCP31 0x005F 172 1.1 maxv #define VMCB_EXITCODE_INTR 0x0060 173 1.1 maxv #define VMCB_EXITCODE_NMI 0x0061 174 1.1 maxv #define VMCB_EXITCODE_SMI 0x0062 175 1.1 maxv #define VMCB_EXITCODE_INIT 0x0063 176 1.1 maxv #define VMCB_EXITCODE_VINTR 0x0064 177 1.1 maxv #define VMCB_EXITCODE_CR0_SEL_WRITE 0x0065 178 1.1 maxv #define VMCB_EXITCODE_IDTR_READ 0x0066 179 1.1 maxv #define VMCB_EXITCODE_GDTR_READ 0x0067 180 1.1 maxv #define VMCB_EXITCODE_LDTR_READ 0x0068 181 1.1 maxv #define VMCB_EXITCODE_TR_READ 0x0069 182 1.1 maxv #define VMCB_EXITCODE_IDTR_WRITE 0x006A 183 1.1 maxv #define VMCB_EXITCODE_GDTR_WRITE 0x006B 184 1.1 maxv #define VMCB_EXITCODE_LDTR_WRITE 0x006C 185 1.1 maxv #define VMCB_EXITCODE_TR_WRITE 0x006D 186 1.1 maxv #define VMCB_EXITCODE_RDTSC 0x006E 187 1.1 maxv #define VMCB_EXITCODE_RDPMC 0x006F 188 1.1 maxv #define VMCB_EXITCODE_PUSHF 0x0070 189 1.1 maxv #define VMCB_EXITCODE_POPF 0x0071 190 1.1 maxv #define VMCB_EXITCODE_CPUID 0x0072 191 1.1 maxv #define VMCB_EXITCODE_RSM 0x0073 192 1.1 maxv #define VMCB_EXITCODE_IRET 0x0074 193 1.1 maxv #define VMCB_EXITCODE_SWINT 0x0075 194 1.1 maxv #define VMCB_EXITCODE_INVD 0x0076 195 1.1 maxv #define VMCB_EXITCODE_PAUSE 0x0077 196 1.1 maxv #define VMCB_EXITCODE_HLT 0x0078 197 1.1 maxv #define VMCB_EXITCODE_INVLPG 0x0079 198 1.1 maxv #define VMCB_EXITCODE_INVLPGA 0x007A 199 1.1 maxv #define VMCB_EXITCODE_IOIO 0x007B 200 1.1 maxv #define VMCB_EXITCODE_MSR 0x007C 201 1.1 maxv #define VMCB_EXITCODE_TASK_SWITCH 0x007D 202 1.1 maxv #define VMCB_EXITCODE_FERR_FREEZE 0x007E 203 1.1 maxv #define VMCB_EXITCODE_SHUTDOWN 0x007F 204 1.1 maxv #define VMCB_EXITCODE_VMRUN 0x0080 205 1.1 maxv #define VMCB_EXITCODE_VMMCALL 0x0081 206 1.1 maxv #define VMCB_EXITCODE_VMLOAD 0x0082 207 1.1 maxv #define VMCB_EXITCODE_VMSAVE 0x0083 208 1.1 maxv #define VMCB_EXITCODE_STGI 0x0084 209 1.1 maxv #define VMCB_EXITCODE_CLGI 0x0085 210 1.1 maxv #define VMCB_EXITCODE_SKINIT 0x0086 211 1.1 maxv #define VMCB_EXITCODE_RDTSCP 0x0087 212 1.1 maxv #define VMCB_EXITCODE_ICEBP 0x0088 213 1.1 maxv #define VMCB_EXITCODE_WBINVD 0x0089 214 1.1 maxv #define VMCB_EXITCODE_MONITOR 0x008A 215 1.1 maxv #define VMCB_EXITCODE_MWAIT 0x008B 216 1.1 maxv #define VMCB_EXITCODE_MWAIT_CONDITIONAL 0x008C 217 1.1 maxv #define VMCB_EXITCODE_XSETBV 0x008D 218 1.47 maxv #define VMCB_EXITCODE_RDPRU 0x008E 219 1.1 maxv #define VMCB_EXITCODE_EFER_WRITE_TRAP 0x008F 220 1.1 maxv #define VMCB_EXITCODE_CR0_WRITE_TRAP 0x0090 221 1.1 maxv #define VMCB_EXITCODE_CR1_WRITE_TRAP 0x0091 222 1.1 maxv #define VMCB_EXITCODE_CR2_WRITE_TRAP 0x0092 223 1.1 maxv #define VMCB_EXITCODE_CR3_WRITE_TRAP 0x0093 224 1.1 maxv #define VMCB_EXITCODE_CR4_WRITE_TRAP 0x0094 225 1.1 maxv #define VMCB_EXITCODE_CR5_WRITE_TRAP 0x0095 226 1.1 maxv #define VMCB_EXITCODE_CR6_WRITE_TRAP 0x0096 227 1.1 maxv #define VMCB_EXITCODE_CR7_WRITE_TRAP 0x0097 228 1.1 maxv #define VMCB_EXITCODE_CR8_WRITE_TRAP 0x0098 229 1.1 maxv #define VMCB_EXITCODE_CR9_WRITE_TRAP 0x0099 230 1.1 maxv #define VMCB_EXITCODE_CR10_WRITE_TRAP 0x009A 231 1.1 maxv #define VMCB_EXITCODE_CR11_WRITE_TRAP 0x009B 232 1.1 maxv #define VMCB_EXITCODE_CR12_WRITE_TRAP 0x009C 233 1.1 maxv #define VMCB_EXITCODE_CR13_WRITE_TRAP 0x009D 234 1.1 maxv #define VMCB_EXITCODE_CR14_WRITE_TRAP 0x009E 235 1.1 maxv #define VMCB_EXITCODE_CR15_WRITE_TRAP 0x009F 236 1.67 maxv #define VMCB_EXITCODE_INVLPGB 0x00A0 237 1.67 maxv #define VMCB_EXITCODE_INVLPGB_ILLEGAL 0x00A1 238 1.67 maxv #define VMCB_EXITCODE_INVPCID 0x00A2 239 1.47 maxv #define VMCB_EXITCODE_MCOMMIT 0x00A3 240 1.67 maxv #define VMCB_EXITCODE_TLBSYNC 0x00A4 241 1.1 maxv #define VMCB_EXITCODE_NPF 0x0400 242 1.1 maxv #define VMCB_EXITCODE_AVIC_INCOMP_IPI 0x0401 243 1.1 maxv #define VMCB_EXITCODE_AVIC_NOACCEL 0x0402 244 1.1 maxv #define VMCB_EXITCODE_VMGEXIT 0x0403 245 1.67 maxv #define VMCB_EXITCODE_BUSY -2ULL 246 1.66 maxv #define VMCB_EXITCODE_INVALID -1ULL 247 1.1 maxv 248 1.1 maxv /* -------------------------------------------------------------------------- */ 249 1.1 maxv 250 1.1 maxv struct vmcb_ctrl { 251 1.1 maxv uint32_t intercept_cr; 252 1.1 maxv #define VMCB_CTRL_INTERCEPT_RCR(x) __BIT( 0 + x) 253 1.1 maxv #define VMCB_CTRL_INTERCEPT_WCR(x) __BIT(16 + x) 254 1.1 maxv 255 1.1 maxv uint32_t intercept_dr; 256 1.1 maxv #define VMCB_CTRL_INTERCEPT_RDR(x) __BIT( 0 + x) 257 1.1 maxv #define VMCB_CTRL_INTERCEPT_WDR(x) __BIT(16 + x) 258 1.1 maxv 259 1.1 maxv uint32_t intercept_vec; 260 1.1 maxv #define VMCB_CTRL_INTERCEPT_VEC(x) __BIT(x) 261 1.1 maxv 262 1.1 maxv uint32_t intercept_misc1; 263 1.1 maxv #define VMCB_CTRL_INTERCEPT_INTR __BIT(0) 264 1.1 maxv #define VMCB_CTRL_INTERCEPT_NMI __BIT(1) 265 1.1 maxv #define VMCB_CTRL_INTERCEPT_SMI __BIT(2) 266 1.1 maxv #define VMCB_CTRL_INTERCEPT_INIT __BIT(3) 267 1.1 maxv #define VMCB_CTRL_INTERCEPT_VINTR __BIT(4) 268 1.1 maxv #define VMCB_CTRL_INTERCEPT_CR0_SPEC __BIT(5) 269 1.1 maxv #define VMCB_CTRL_INTERCEPT_RIDTR __BIT(6) 270 1.1 maxv #define VMCB_CTRL_INTERCEPT_RGDTR __BIT(7) 271 1.1 maxv #define VMCB_CTRL_INTERCEPT_RLDTR __BIT(8) 272 1.1 maxv #define VMCB_CTRL_INTERCEPT_RTR __BIT(9) 273 1.1 maxv #define VMCB_CTRL_INTERCEPT_WIDTR __BIT(10) 274 1.1 maxv #define VMCB_CTRL_INTERCEPT_WGDTR __BIT(11) 275 1.1 maxv #define VMCB_CTRL_INTERCEPT_WLDTR __BIT(12) 276 1.1 maxv #define VMCB_CTRL_INTERCEPT_WTR __BIT(13) 277 1.1 maxv #define VMCB_CTRL_INTERCEPT_RDTSC __BIT(14) 278 1.1 maxv #define VMCB_CTRL_INTERCEPT_RDPMC __BIT(15) 279 1.1 maxv #define VMCB_CTRL_INTERCEPT_PUSHF __BIT(16) 280 1.1 maxv #define VMCB_CTRL_INTERCEPT_POPF __BIT(17) 281 1.1 maxv #define VMCB_CTRL_INTERCEPT_CPUID __BIT(18) 282 1.1 maxv #define VMCB_CTRL_INTERCEPT_RSM __BIT(19) 283 1.1 maxv #define VMCB_CTRL_INTERCEPT_IRET __BIT(20) 284 1.1 maxv #define VMCB_CTRL_INTERCEPT_INTN __BIT(21) 285 1.1 maxv #define VMCB_CTRL_INTERCEPT_INVD __BIT(22) 286 1.1 maxv #define VMCB_CTRL_INTERCEPT_PAUSE __BIT(23) 287 1.1 maxv #define VMCB_CTRL_INTERCEPT_HLT __BIT(24) 288 1.1 maxv #define VMCB_CTRL_INTERCEPT_INVLPG __BIT(25) 289 1.1 maxv #define VMCB_CTRL_INTERCEPT_INVLPGA __BIT(26) 290 1.1 maxv #define VMCB_CTRL_INTERCEPT_IOIO_PROT __BIT(27) 291 1.1 maxv #define VMCB_CTRL_INTERCEPT_MSR_PROT __BIT(28) 292 1.1 maxv #define VMCB_CTRL_INTERCEPT_TASKSW __BIT(29) 293 1.1 maxv #define VMCB_CTRL_INTERCEPT_FERR_FREEZE __BIT(30) 294 1.1 maxv #define VMCB_CTRL_INTERCEPT_SHUTDOWN __BIT(31) 295 1.1 maxv 296 1.1 maxv uint32_t intercept_misc2; 297 1.1 maxv #define VMCB_CTRL_INTERCEPT_VMRUN __BIT(0) 298 1.1 maxv #define VMCB_CTRL_INTERCEPT_VMMCALL __BIT(1) 299 1.1 maxv #define VMCB_CTRL_INTERCEPT_VMLOAD __BIT(2) 300 1.1 maxv #define VMCB_CTRL_INTERCEPT_VMSAVE __BIT(3) 301 1.1 maxv #define VMCB_CTRL_INTERCEPT_STGI __BIT(4) 302 1.1 maxv #define VMCB_CTRL_INTERCEPT_CLGI __BIT(5) 303 1.1 maxv #define VMCB_CTRL_INTERCEPT_SKINIT __BIT(6) 304 1.1 maxv #define VMCB_CTRL_INTERCEPT_RDTSCP __BIT(7) 305 1.1 maxv #define VMCB_CTRL_INTERCEPT_ICEBP __BIT(8) 306 1.1 maxv #define VMCB_CTRL_INTERCEPT_WBINVD __BIT(9) 307 1.1 maxv #define VMCB_CTRL_INTERCEPT_MONITOR __BIT(10) 308 1.48 maxv #define VMCB_CTRL_INTERCEPT_MWAIT __BIT(11) 309 1.48 maxv #define VMCB_CTRL_INTERCEPT_MWAIT_ARMED __BIT(12) 310 1.1 maxv #define VMCB_CTRL_INTERCEPT_XSETBV __BIT(13) 311 1.47 maxv #define VMCB_CTRL_INTERCEPT_RDPRU __BIT(14) 312 1.1 maxv #define VMCB_CTRL_INTERCEPT_EFER_SPEC __BIT(15) 313 1.1 maxv #define VMCB_CTRL_INTERCEPT_WCR_SPEC(x) __BIT(16 + x) 314 1.1 maxv 315 1.47 maxv uint32_t intercept_misc3; 316 1.67 maxv #define VMCB_CTRL_INTERCEPT_INVLPGB_ALL __BIT(0) 317 1.67 maxv #define VMCB_CTRL_INTERCEPT_INVLPGB_ILL __BIT(1) 318 1.67 maxv #define VMCB_CTRL_INTERCEPT_PCID __BIT(2) 319 1.47 maxv #define VMCB_CTRL_INTERCEPT_MCOMMIT __BIT(3) 320 1.67 maxv #define VMCB_CTRL_INTERCEPT_TLBSYNC __BIT(4) 321 1.47 maxv 322 1.47 maxv uint8_t rsvd1[36]; 323 1.1 maxv uint16_t pause_filt_thresh; 324 1.1 maxv uint16_t pause_filt_cnt; 325 1.1 maxv uint64_t iopm_base_pa; 326 1.1 maxv uint64_t msrpm_base_pa; 327 1.1 maxv uint64_t tsc_offset; 328 1.1 maxv uint32_t guest_asid; 329 1.1 maxv 330 1.1 maxv uint32_t tlb_ctrl; 331 1.1 maxv #define VMCB_CTRL_TLB_CTRL_FLUSH_ALL 0x01 332 1.1 maxv #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST 0x03 333 1.1 maxv #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST_NONGLOBAL 0x07 334 1.1 maxv 335 1.1 maxv uint64_t v; 336 1.34 maxv #define VMCB_CTRL_V_TPR __BITS(3,0) 337 1.1 maxv #define VMCB_CTRL_V_IRQ __BIT(8) 338 1.1 maxv #define VMCB_CTRL_V_VGIF __BIT(9) 339 1.1 maxv #define VMCB_CTRL_V_INTR_PRIO __BITS(19,16) 340 1.1 maxv #define VMCB_CTRL_V_IGN_TPR __BIT(20) 341 1.1 maxv #define VMCB_CTRL_V_INTR_MASKING __BIT(24) 342 1.1 maxv #define VMCB_CTRL_V_GUEST_VGIF __BIT(25) 343 1.1 maxv #define VMCB_CTRL_V_AVIC_EN __BIT(31) 344 1.1 maxv #define VMCB_CTRL_V_INTR_VECTOR __BITS(39,32) 345 1.1 maxv 346 1.1 maxv uint64_t intr; 347 1.1 maxv #define VMCB_CTRL_INTR_SHADOW __BIT(0) 348 1.67 maxv #define VMCB_CTRL_INTR_MASK __BIT(1) 349 1.1 maxv 350 1.1 maxv uint64_t exitcode; 351 1.1 maxv uint64_t exitinfo1; 352 1.1 maxv uint64_t exitinfo2; 353 1.1 maxv 354 1.1 maxv uint64_t exitintinfo; 355 1.1 maxv #define VMCB_CTRL_EXITINTINFO_VECTOR __BITS(7,0) 356 1.1 maxv #define VMCB_CTRL_EXITINTINFO_TYPE __BITS(10,8) 357 1.1 maxv #define VMCB_CTRL_EXITINTINFO_EV __BIT(11) 358 1.1 maxv #define VMCB_CTRL_EXITINTINFO_V __BIT(31) 359 1.1 maxv #define VMCB_CTRL_EXITINTINFO_ERRORCODE __BITS(63,32) 360 1.1 maxv 361 1.1 maxv uint64_t enable1; 362 1.1 maxv #define VMCB_CTRL_ENABLE_NP __BIT(0) 363 1.1 maxv #define VMCB_CTRL_ENABLE_SEV __BIT(1) 364 1.1 maxv #define VMCB_CTRL_ENABLE_ES_SEV __BIT(2) 365 1.47 maxv #define VMCB_CTRL_ENABLE_GMET __BIT(3) 366 1.47 maxv #define VMCB_CTRL_ENABLE_VTE __BIT(5) 367 1.1 maxv 368 1.1 maxv uint64_t avic; 369 1.1 maxv #define VMCB_CTRL_AVIC_APIC_BAR __BITS(51,0) 370 1.1 maxv 371 1.1 maxv uint64_t ghcb; 372 1.1 maxv 373 1.1 maxv uint64_t eventinj; 374 1.1 maxv #define VMCB_CTRL_EVENTINJ_VECTOR __BITS(7,0) 375 1.1 maxv #define VMCB_CTRL_EVENTINJ_TYPE __BITS(10,8) 376 1.1 maxv #define VMCB_CTRL_EVENTINJ_EV __BIT(11) 377 1.1 maxv #define VMCB_CTRL_EVENTINJ_V __BIT(31) 378 1.1 maxv #define VMCB_CTRL_EVENTINJ_ERRORCODE __BITS(63,32) 379 1.1 maxv 380 1.1 maxv uint64_t n_cr3; 381 1.1 maxv 382 1.1 maxv uint64_t enable2; 383 1.1 maxv #define VMCB_CTRL_ENABLE_LBR __BIT(0) 384 1.1 maxv #define VMCB_CTRL_ENABLE_VVMSAVE __BIT(1) 385 1.1 maxv 386 1.1 maxv uint32_t vmcb_clean; 387 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_I __BIT(0) 388 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_IOPM __BIT(1) 389 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_ASID __BIT(2) 390 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_TPR __BIT(3) 391 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_NP __BIT(4) 392 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_CR __BIT(5) 393 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_DR __BIT(6) 394 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_DT __BIT(7) 395 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_SEG __BIT(8) 396 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_CR2 __BIT(9) 397 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_LBR __BIT(10) 398 1.1 maxv #define VMCB_CTRL_VMCB_CLEAN_AVIC __BIT(11) 399 1.1 maxv 400 1.1 maxv uint32_t rsvd2; 401 1.1 maxv uint64_t nrip; 402 1.1 maxv uint8_t inst_len; 403 1.1 maxv uint8_t inst_bytes[15]; 404 1.11 maxv uint64_t avic_abpp; 405 1.11 maxv uint64_t rsvd3; 406 1.11 maxv uint64_t avic_ltp; 407 1.11 maxv 408 1.11 maxv uint64_t avic_phys; 409 1.11 maxv #define VMCB_CTRL_AVIC_PHYS_TABLE_PTR __BITS(51,12) 410 1.11 maxv #define VMCB_CTRL_AVIC_PHYS_MAX_INDEX __BITS(7,0) 411 1.11 maxv 412 1.11 maxv uint64_t rsvd4; 413 1.67 maxv uint64_t vmsa_ptr; 414 1.11 maxv 415 1.11 maxv uint8_t pad[752]; 416 1.1 maxv } __packed; 417 1.1 maxv 418 1.1 maxv CTASSERT(sizeof(struct vmcb_ctrl) == 1024); 419 1.1 maxv 420 1.1 maxv struct vmcb_segment { 421 1.1 maxv uint16_t selector; 422 1.1 maxv uint16_t attrib; /* hidden */ 423 1.1 maxv uint32_t limit; /* hidden */ 424 1.1 maxv uint64_t base; /* hidden */ 425 1.1 maxv } __packed; 426 1.1 maxv 427 1.1 maxv CTASSERT(sizeof(struct vmcb_segment) == 16); 428 1.1 maxv 429 1.1 maxv struct vmcb_state { 430 1.1 maxv struct vmcb_segment es; 431 1.1 maxv struct vmcb_segment cs; 432 1.1 maxv struct vmcb_segment ss; 433 1.1 maxv struct vmcb_segment ds; 434 1.1 maxv struct vmcb_segment fs; 435 1.1 maxv struct vmcb_segment gs; 436 1.1 maxv struct vmcb_segment gdt; 437 1.1 maxv struct vmcb_segment ldt; 438 1.1 maxv struct vmcb_segment idt; 439 1.1 maxv struct vmcb_segment tr; 440 1.1 maxv uint8_t rsvd1[43]; 441 1.1 maxv uint8_t cpl; 442 1.1 maxv uint8_t rsvd2[4]; 443 1.1 maxv uint64_t efer; 444 1.1 maxv uint8_t rsvd3[112]; 445 1.1 maxv uint64_t cr4; 446 1.1 maxv uint64_t cr3; 447 1.1 maxv uint64_t cr0; 448 1.1 maxv uint64_t dr7; 449 1.1 maxv uint64_t dr6; 450 1.1 maxv uint64_t rflags; 451 1.1 maxv uint64_t rip; 452 1.1 maxv uint8_t rsvd4[88]; 453 1.1 maxv uint64_t rsp; 454 1.1 maxv uint8_t rsvd5[24]; 455 1.1 maxv uint64_t rax; 456 1.1 maxv uint64_t star; 457 1.1 maxv uint64_t lstar; 458 1.1 maxv uint64_t cstar; 459 1.1 maxv uint64_t sfmask; 460 1.1 maxv uint64_t kernelgsbase; 461 1.1 maxv uint64_t sysenter_cs; 462 1.1 maxv uint64_t sysenter_esp; 463 1.1 maxv uint64_t sysenter_eip; 464 1.1 maxv uint64_t cr2; 465 1.1 maxv uint8_t rsvd6[32]; 466 1.1 maxv uint64_t g_pat; 467 1.1 maxv uint64_t dbgctl; 468 1.1 maxv uint64_t br_from; 469 1.1 maxv uint64_t br_to; 470 1.1 maxv uint64_t int_from; 471 1.1 maxv uint64_t int_to; 472 1.1 maxv uint8_t pad[2408]; 473 1.1 maxv } __packed; 474 1.1 maxv 475 1.1 maxv CTASSERT(sizeof(struct vmcb_state) == 0xC00); 476 1.1 maxv 477 1.1 maxv struct vmcb { 478 1.1 maxv struct vmcb_ctrl ctrl; 479 1.1 maxv struct vmcb_state state; 480 1.1 maxv } __packed; 481 1.1 maxv 482 1.1 maxv CTASSERT(sizeof(struct vmcb) == PAGE_SIZE); 483 1.1 maxv CTASSERT(offsetof(struct vmcb, state) == 0x400); 484 1.1 maxv 485 1.1 maxv /* -------------------------------------------------------------------------- */ 486 1.1 maxv 487 1.43 maxv static void svm_vcpu_state_provide(struct nvmm_cpu *, uint64_t); 488 1.43 maxv static void svm_vcpu_state_commit(struct nvmm_cpu *); 489 1.43 maxv 490 1.1 maxv struct svm_hsave { 491 1.1 maxv paddr_t pa; 492 1.1 maxv }; 493 1.1 maxv 494 1.1 maxv static struct svm_hsave hsave[MAXCPUS]; 495 1.1 maxv 496 1.1 maxv static uint8_t *svm_asidmap __read_mostly; 497 1.1 maxv static uint32_t svm_maxasid __read_mostly; 498 1.1 maxv static kmutex_t svm_asidlock __cacheline_aligned; 499 1.1 maxv 500 1.1 maxv static bool svm_decode_assist __read_mostly; 501 1.1 maxv static uint32_t svm_ctrl_tlb_flush __read_mostly; 502 1.1 maxv 503 1.1 maxv #define SVM_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE) 504 1.1 maxv static uint64_t svm_xcr0_mask __read_mostly; 505 1.1 maxv 506 1.1 maxv #define SVM_NCPUIDS 32 507 1.1 maxv 508 1.1 maxv #define VMCB_NPAGES 1 509 1.1 maxv 510 1.1 maxv #define MSRBM_NPAGES 2 511 1.1 maxv #define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE) 512 1.1 maxv 513 1.1 maxv #define IOBM_NPAGES 3 514 1.1 maxv #define IOBM_SIZE (IOBM_NPAGES * PAGE_SIZE) 515 1.1 maxv 516 1.1 maxv /* Does not include EFER_LMSLE. */ 517 1.1 maxv #define EFER_VALID \ 518 1.1 maxv (EFER_SCE|EFER_LME|EFER_LMA|EFER_NXE|EFER_SVME|EFER_FFXSR|EFER_TCE) 519 1.1 maxv 520 1.1 maxv #define EFER_TLB_FLUSH \ 521 1.1 maxv (EFER_NXE|EFER_LMA|EFER_LME) 522 1.1 maxv #define CR0_TLB_FLUSH \ 523 1.1 maxv (CR0_PG|CR0_WP|CR0_CD|CR0_NW) 524 1.1 maxv #define CR4_TLB_FLUSH \ 525 1.68 maxv (CR4_PSE|CR4_PAE|CR4_PGE|CR4_PCIDE|CR4_SMEP) 526 1.1 maxv 527 1.85 riastrad #define CR4_VALID \ 528 1.85 riastrad (CR4_VME | \ 529 1.85 riastrad CR4_PVI | \ 530 1.85 riastrad CR4_TSD | \ 531 1.85 riastrad CR4_DE | \ 532 1.85 riastrad CR4_PSE | \ 533 1.85 riastrad CR4_PAE | \ 534 1.85 riastrad CR4_MCE | \ 535 1.85 riastrad CR4_PGE | \ 536 1.85 riastrad CR4_PCE | \ 537 1.85 riastrad CR4_OSFXSR | \ 538 1.85 riastrad CR4_OSXMMEXCPT | \ 539 1.85 riastrad CR4_UMIP | \ 540 1.85 riastrad /* CR4_LA57 excluded */ \ 541 1.85 riastrad /* bit 13 reserved on AMD */ \ 542 1.85 riastrad /* bit 14 reserved on AMD */ \ 543 1.85 riastrad /* bit 15 reserved on AMD */ \ 544 1.85 riastrad CR4_FSGSBASE | \ 545 1.85 riastrad CR4_PCIDE | \ 546 1.85 riastrad CR4_OSXSAVE | \ 547 1.85 riastrad /* bit 19 reserved on AMD */ \ 548 1.85 riastrad CR4_SMEP | \ 549 1.85 riastrad CR4_SMAP \ 550 1.85 riastrad /* CR4_PKE excluded */ \ 551 1.85 riastrad /* CR4_CET excluded */ \ 552 1.85 riastrad /* bits 24:63 reserved on AMD */) 553 1.85 riastrad 554 1.1 maxv /* -------------------------------------------------------------------------- */ 555 1.1 maxv 556 1.1 maxv struct svm_machdata { 557 1.29 maxv volatile uint64_t mach_htlb_gen; 558 1.1 maxv }; 559 1.1 maxv 560 1.51 maxv static const size_t svm_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = { 561 1.51 maxv [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] = 562 1.52 maxv sizeof(struct nvmm_vcpu_conf_cpuid), 563 1.52 maxv [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] = 564 1.52 maxv sizeof(struct nvmm_vcpu_conf_tpr) 565 1.1 maxv }; 566 1.1 maxv 567 1.1 maxv struct svm_cpudata { 568 1.1 maxv /* General */ 569 1.1 maxv bool shared_asid; 570 1.28 maxv bool gtlb_want_flush; 571 1.36 maxv bool gtsc_want_update; 572 1.29 maxv uint64_t vcpu_htlb_gen; 573 1.1 maxv 574 1.1 maxv /* VMCB */ 575 1.1 maxv struct vmcb *vmcb; 576 1.1 maxv paddr_t vmcb_pa; 577 1.1 maxv 578 1.1 maxv /* I/O bitmap */ 579 1.1 maxv uint8_t *iobm; 580 1.1 maxv paddr_t iobm_pa; 581 1.1 maxv 582 1.1 maxv /* MSR bitmap */ 583 1.1 maxv uint8_t *msrbm; 584 1.1 maxv paddr_t msrbm_pa; 585 1.1 maxv 586 1.1 maxv /* Host state */ 587 1.13 maxv uint64_t hxcr0; 588 1.1 maxv uint64_t star; 589 1.1 maxv uint64_t lstar; 590 1.1 maxv uint64_t cstar; 591 1.1 maxv uint64_t sfmask; 592 1.14 maxv uint64_t fsbase; 593 1.14 maxv uint64_t kernelgsbase; 594 1.1 maxv 595 1.37 maxv /* Intr state */ 596 1.10 maxv bool int_window_exit; 597 1.10 maxv bool nmi_window_exit; 598 1.37 maxv bool evt_pending; 599 1.10 maxv 600 1.1 maxv /* Guest state */ 601 1.13 maxv uint64_t gxcr0; 602 1.13 maxv uint64_t gprs[NVMM_X64_NGPR]; 603 1.13 maxv uint64_t drs[NVMM_X64_NDR]; 604 1.36 maxv uint64_t gtsc; 605 1.16 maxv struct xsave_header gfpu __aligned(64); 606 1.51 maxv 607 1.51 maxv /* VCPU configuration. */ 608 1.51 maxv bool cpuidpresent[SVM_NCPUIDS]; 609 1.51 maxv struct nvmm_vcpu_conf_cpuid cpuid[SVM_NCPUIDS]; 610 1.1 maxv }; 611 1.1 maxv 612 1.12 maxv static void 613 1.12 maxv svm_vmcb_cache_default(struct vmcb *vmcb) 614 1.12 maxv { 615 1.12 maxv vmcb->ctrl.vmcb_clean = 616 1.12 maxv VMCB_CTRL_VMCB_CLEAN_I | 617 1.12 maxv VMCB_CTRL_VMCB_CLEAN_IOPM | 618 1.12 maxv VMCB_CTRL_VMCB_CLEAN_ASID | 619 1.12 maxv VMCB_CTRL_VMCB_CLEAN_TPR | 620 1.12 maxv VMCB_CTRL_VMCB_CLEAN_NP | 621 1.12 maxv VMCB_CTRL_VMCB_CLEAN_CR | 622 1.12 maxv VMCB_CTRL_VMCB_CLEAN_DR | 623 1.12 maxv VMCB_CTRL_VMCB_CLEAN_DT | 624 1.12 maxv VMCB_CTRL_VMCB_CLEAN_SEG | 625 1.12 maxv VMCB_CTRL_VMCB_CLEAN_CR2 | 626 1.12 maxv VMCB_CTRL_VMCB_CLEAN_LBR | 627 1.12 maxv VMCB_CTRL_VMCB_CLEAN_AVIC; 628 1.12 maxv } 629 1.12 maxv 630 1.12 maxv static void 631 1.12 maxv svm_vmcb_cache_update(struct vmcb *vmcb, uint64_t flags) 632 1.12 maxv { 633 1.12 maxv if (flags & NVMM_X64_STATE_SEGS) { 634 1.12 maxv vmcb->ctrl.vmcb_clean &= 635 1.12 maxv ~(VMCB_CTRL_VMCB_CLEAN_SEG | VMCB_CTRL_VMCB_CLEAN_DT); 636 1.12 maxv } 637 1.12 maxv if (flags & NVMM_X64_STATE_CRS) { 638 1.12 maxv vmcb->ctrl.vmcb_clean &= 639 1.13 maxv ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_CR2 | 640 1.13 maxv VMCB_CTRL_VMCB_CLEAN_TPR); 641 1.12 maxv } 642 1.12 maxv if (flags & NVMM_X64_STATE_DRS) { 643 1.12 maxv vmcb->ctrl.vmcb_clean &= ~VMCB_CTRL_VMCB_CLEAN_DR; 644 1.12 maxv } 645 1.12 maxv if (flags & NVMM_X64_STATE_MSRS) { 646 1.12 maxv /* CR for EFER, NP for PAT. */ 647 1.12 maxv vmcb->ctrl.vmcb_clean &= 648 1.12 maxv ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_NP); 649 1.12 maxv } 650 1.12 maxv } 651 1.12 maxv 652 1.12 maxv static inline void 653 1.12 maxv svm_vmcb_cache_flush(struct vmcb *vmcb, uint64_t flags) 654 1.12 maxv { 655 1.12 maxv vmcb->ctrl.vmcb_clean &= ~flags; 656 1.12 maxv } 657 1.12 maxv 658 1.12 maxv static inline void 659 1.12 maxv svm_vmcb_cache_flush_all(struct vmcb *vmcb) 660 1.12 maxv { 661 1.12 maxv vmcb->ctrl.vmcb_clean = 0; 662 1.12 maxv } 663 1.12 maxv 664 1.1 maxv #define SVM_EVENT_TYPE_HW_INT 0 665 1.1 maxv #define SVM_EVENT_TYPE_NMI 2 666 1.1 maxv #define SVM_EVENT_TYPE_EXC 3 667 1.1 maxv #define SVM_EVENT_TYPE_SW_INT 4 668 1.1 maxv 669 1.1 maxv static void 670 1.10 maxv svm_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi) 671 1.1 maxv { 672 1.10 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 673 1.10 maxv struct vmcb *vmcb = cpudata->vmcb; 674 1.10 maxv 675 1.1 maxv if (nmi) { 676 1.1 maxv vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_IRET; 677 1.10 maxv cpudata->nmi_window_exit = true; 678 1.1 maxv } else { 679 1.1 maxv vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_VINTR; 680 1.10 maxv vmcb->ctrl.v |= (VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR); 681 1.12 maxv svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR); 682 1.10 maxv cpudata->int_window_exit = true; 683 1.1 maxv } 684 1.12 maxv 685 1.12 maxv svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I); 686 1.1 maxv } 687 1.1 maxv 688 1.1 maxv static void 689 1.10 maxv svm_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi) 690 1.1 maxv { 691 1.10 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 692 1.10 maxv struct vmcb *vmcb = cpudata->vmcb; 693 1.10 maxv 694 1.1 maxv if (nmi) { 695 1.1 maxv vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_IRET; 696 1.10 maxv cpudata->nmi_window_exit = false; 697 1.1 maxv } else { 698 1.1 maxv vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_VINTR; 699 1.10 maxv vmcb->ctrl.v &= ~(VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR); 700 1.12 maxv svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR); 701 1.10 maxv cpudata->int_window_exit = false; 702 1.1 maxv } 703 1.12 maxv 704 1.12 maxv svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I); 705 1.1 maxv } 706 1.1 maxv 707 1.73 maxv static inline bool 708 1.73 maxv svm_excp_has_rf(uint8_t vector) 709 1.73 maxv { 710 1.73 maxv switch (vector) { 711 1.73 maxv case 1: /* #DB */ 712 1.73 maxv case 4: /* #OF */ 713 1.73 maxv case 8: /* #DF */ 714 1.73 maxv case 18: /* #MC */ 715 1.73 maxv return false; 716 1.73 maxv default: 717 1.73 maxv return true; 718 1.73 maxv } 719 1.73 maxv } 720 1.73 maxv 721 1.1 maxv static inline int 722 1.73 maxv svm_excp_has_error(uint8_t vector) 723 1.1 maxv { 724 1.1 maxv switch (vector) { 725 1.1 maxv case 8: /* #DF */ 726 1.1 maxv case 10: /* #TS */ 727 1.1 maxv case 11: /* #NP */ 728 1.1 maxv case 12: /* #SS */ 729 1.1 maxv case 13: /* #GP */ 730 1.1 maxv case 14: /* #PF */ 731 1.1 maxv case 17: /* #AC */ 732 1.1 maxv case 30: /* #SX */ 733 1.1 maxv return 1; 734 1.1 maxv default: 735 1.1 maxv return 0; 736 1.1 maxv } 737 1.1 maxv } 738 1.1 maxv 739 1.1 maxv static int 740 1.45 maxv svm_vcpu_inject(struct nvmm_cpu *vcpu) 741 1.1 maxv { 742 1.45 maxv struct nvmm_comm_page *comm = vcpu->comm; 743 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 744 1.1 maxv struct vmcb *vmcb = cpudata->vmcb; 745 1.51 maxv u_int evtype; 746 1.51 maxv uint8_t vector; 747 1.51 maxv uint64_t error; 748 1.1 maxv int type = 0, err = 0; 749 1.1 maxv 750 1.45 maxv evtype = comm->event.type; 751 1.45 maxv vector = comm->event.vector; 752 1.51 maxv error = comm->event.u.excp.error; 753 1.45 maxv __insn_barrier(); 754 1.45 maxv 755 1.45 maxv switch (evtype) { 756 1.51 maxv case NVMM_VCPU_EVENT_EXCP: 757 1.51 maxv type = SVM_EVENT_TYPE_EXC; 758 1.51 maxv if (vector == 2 || vector >= 32) 759 1.51 maxv return EINVAL; 760 1.51 maxv if (vector == 3 || vector == 0) 761 1.51 maxv return EINVAL; 762 1.73 maxv if (svm_excp_has_rf(vector)) { 763 1.73 maxv vmcb->state.rflags |= PSL_RF; 764 1.73 maxv } 765 1.73 maxv err = svm_excp_has_error(vector); 766 1.51 maxv break; 767 1.51 maxv case NVMM_VCPU_EVENT_INTR: 768 1.1 maxv type = SVM_EVENT_TYPE_HW_INT; 769 1.45 maxv if (vector == 2) { 770 1.1 maxv type = SVM_EVENT_TYPE_NMI; 771 1.10 maxv svm_event_waitexit_enable(vcpu, true); 772 1.1 maxv } 773 1.1 maxv err = 0; 774 1.1 maxv break; 775 1.1 maxv default: 776 1.1 maxv return EINVAL; 777 1.1 maxv } 778 1.1 maxv 779 1.1 maxv vmcb->ctrl.eventinj = 780 1.90 skrll __SHIFTIN(vector, VMCB_CTRL_EVENTINJ_VECTOR) | 781 1.90 skrll __SHIFTIN(type, VMCB_CTRL_EVENTINJ_TYPE) | 782 1.90 skrll __SHIFTIN(err, VMCB_CTRL_EVENTINJ_EV) | 783 1.90 skrll __SHIFTIN(1, VMCB_CTRL_EVENTINJ_V) | 784 1.90 skrll __SHIFTIN(error, VMCB_CTRL_EVENTINJ_ERRORCODE); 785 1.1 maxv 786 1.37 maxv cpudata->evt_pending = true; 787 1.37 maxv 788 1.1 maxv return 0; 789 1.1 maxv } 790 1.1 maxv 791 1.1 maxv static void 792 1.45 maxv svm_inject_ud(struct nvmm_cpu *vcpu) 793 1.1 maxv { 794 1.45 maxv struct nvmm_comm_page *comm = vcpu->comm; 795 1.1 maxv int ret __diagused; 796 1.1 maxv 797 1.51 maxv comm->event.type = NVMM_VCPU_EVENT_EXCP; 798 1.45 maxv comm->event.vector = 6; 799 1.51 maxv comm->event.u.excp.error = 0; 800 1.1 maxv 801 1.45 maxv ret = svm_vcpu_inject(vcpu); 802 1.1 maxv KASSERT(ret == 0); 803 1.1 maxv } 804 1.1 maxv 805 1.1 maxv static void 806 1.45 maxv svm_inject_gp(struct nvmm_cpu *vcpu) 807 1.1 maxv { 808 1.45 maxv struct nvmm_comm_page *comm = vcpu->comm; 809 1.1 maxv int ret __diagused; 810 1.1 maxv 811 1.51 maxv comm->event.type = NVMM_VCPU_EVENT_EXCP; 812 1.45 maxv comm->event.vector = 13; 813 1.51 maxv comm->event.u.excp.error = 0; 814 1.1 maxv 815 1.45 maxv ret = svm_vcpu_inject(vcpu); 816 1.1 maxv KASSERT(ret == 0); 817 1.1 maxv } 818 1.1 maxv 819 1.80 maxv static inline int 820 1.45 maxv svm_vcpu_event_commit(struct nvmm_cpu *vcpu) 821 1.45 maxv { 822 1.45 maxv if (__predict_true(!vcpu->comm->event_commit)) { 823 1.45 maxv return 0; 824 1.45 maxv } 825 1.45 maxv vcpu->comm->event_commit = false; 826 1.45 maxv return svm_vcpu_inject(vcpu); 827 1.45 maxv } 828 1.45 maxv 829 1.17 maxv static inline void 830 1.17 maxv svm_inkernel_advance(struct vmcb *vmcb) 831 1.1 maxv { 832 1.17 maxv /* 833 1.17 maxv * Maybe we should also apply single-stepping and debug exceptions. 834 1.17 maxv * Matters for guest-ring3, because it can execute 'cpuid' under a 835 1.17 maxv * debugger. 836 1.17 maxv */ 837 1.17 maxv vmcb->state.rip = vmcb->ctrl.nrip; 838 1.73 maxv vmcb->state.rflags &= ~PSL_RF; 839 1.17 maxv vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW; 840 1.1 maxv } 841 1.1 maxv 842 1.69 maxv #define SVM_CPUID_MAX_BASIC 0xD 843 1.87 imil #define SVM_CPUID_MAX_HYPERVISOR 0x40000010 844 1.70 maxv #define SVM_CPUID_MAX_EXTENDED 0x8000001F 845 1.69 maxv static uint32_t svm_cpuid_max_basic __read_mostly; 846 1.70 maxv static uint32_t svm_cpuid_max_extended __read_mostly; 847 1.69 maxv 848 1.69 maxv static void 849 1.69 maxv svm_inkernel_exec_cpuid(struct svm_cpudata *cpudata, uint64_t eax, uint64_t ecx) 850 1.69 maxv { 851 1.69 maxv u_int descs[4]; 852 1.69 maxv 853 1.69 maxv x86_cpuid2(eax, ecx, descs); 854 1.69 maxv cpudata->vmcb->state.rax = descs[0]; 855 1.69 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1]; 856 1.69 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2]; 857 1.69 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3]; 858 1.69 maxv } 859 1.61 maxv 860 1.1 maxv static void 861 1.1 maxv svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx) 862 1.1 maxv { 863 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 864 1.25 maxv uint64_t cr4; 865 1.1 maxv 866 1.86 riastrad 867 1.86 riastrad /* 868 1.86 riastrad * `If a value entered for CPUID.EAX is higher than the maximum 869 1.86 riastrad * input value for basic or extended function for that 870 1.88 andvar * processor then the data for the highest basic information 871 1.86 riastrad * leaf is returned.' 872 1.86 riastrad * 873 1.86 riastrad * --Intel 64 and IA-32 Architectures Software Developer's 874 1.86 riastrad * Manual, Vol. 2A, Order Number: 325383-077US, April 2022, 875 1.86 riastrad * Sec. 3.2 `Instructions (A-L)', CPUID--CPU Identification, 876 1.88 andvar * p. 3-214. 877 1.86 riastrad * 878 1.86 riastrad * We take the same to hold for the hypervisor range, 879 1.86 riastrad * 0x40000000-0x4fffffff. 880 1.89 riastrad * 881 1.89 riastrad * (Sync with nvmm_x86_vmx.c.) 882 1.86 riastrad */ 883 1.86 riastrad if (eax < 0x40000000) { /* basic CPUID range */ 884 1.69 maxv if (__predict_false(eax > svm_cpuid_max_basic)) { 885 1.69 maxv eax = svm_cpuid_max_basic; 886 1.69 maxv svm_inkernel_exec_cpuid(cpudata, eax, ecx); 887 1.69 maxv } 888 1.86 riastrad } else if (eax < 0x80000000) { /* hypervisor CPUID range */ 889 1.69 maxv if (__predict_false(eax > SVM_CPUID_MAX_HYPERVISOR)) { 890 1.69 maxv eax = svm_cpuid_max_basic; 891 1.69 maxv svm_inkernel_exec_cpuid(cpudata, eax, ecx); 892 1.69 maxv } 893 1.86 riastrad } else { /* extended CPUID range */ 894 1.70 maxv if (__predict_false(eax > svm_cpuid_max_extended)) { 895 1.70 maxv eax = svm_cpuid_max_basic; 896 1.70 maxv svm_inkernel_exec_cpuid(cpudata, eax, ecx); 897 1.70 maxv } 898 1.69 maxv } 899 1.69 maxv 900 1.1 maxv switch (eax) { 901 1.86 riastrad 902 1.86 riastrad /* 903 1.86 riastrad * basic CPUID range 904 1.86 riastrad */ 905 1.69 maxv case 0x00000000: 906 1.69 maxv cpudata->vmcb->state.rax = svm_cpuid_max_basic; 907 1.69 maxv break; 908 1.25 maxv case 0x00000001: 909 1.33 maxv cpudata->vmcb->state.rax &= nvmm_cpuid_00000001.eax; 910 1.33 maxv 911 1.13 maxv cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID; 912 1.13 maxv cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid, 913 1.1 maxv CPUID_LOCAL_APIC_ID); 914 1.25 maxv 915 1.33 maxv cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx; 916 1.33 maxv cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ; 917 1.33 maxv 918 1.33 maxv cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx; 919 1.33 maxv 920 1.25 maxv /* CPUID2_OSXSAVE depends on CR4. */ 921 1.25 maxv cr4 = cpudata->vmcb->state.cr4; 922 1.25 maxv if (!(cr4 & CR4_OSXSAVE)) { 923 1.25 maxv cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE; 924 1.25 maxv } 925 1.1 maxv break; 926 1.60 maxv case 0x00000002: /* Empty */ 927 1.60 maxv case 0x00000003: /* Empty */ 928 1.60 maxv case 0x00000004: /* Empty */ 929 1.60 maxv case 0x00000005: /* Monitor/MWait */ 930 1.60 maxv case 0x00000006: /* Power Management Related Features */ 931 1.33 maxv cpudata->vmcb->state.rax = 0; 932 1.33 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 933 1.33 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 934 1.33 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 935 1.33 maxv break; 936 1.60 maxv case 0x00000007: /* Structured Extended Features */ 937 1.69 maxv switch (ecx) { 938 1.69 maxv case 0: 939 1.69 maxv cpudata->vmcb->state.rax = 0; 940 1.69 maxv cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx; 941 1.69 maxv cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx; 942 1.69 maxv cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx; 943 1.69 maxv break; 944 1.69 maxv default: 945 1.69 maxv cpudata->vmcb->state.rax = 0; 946 1.69 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 947 1.69 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 948 1.69 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 949 1.69 maxv break; 950 1.69 maxv } 951 1.33 maxv break; 952 1.60 maxv case 0x00000008: /* Empty */ 953 1.60 maxv case 0x00000009: /* Empty */ 954 1.60 maxv case 0x0000000A: /* Empty */ 955 1.60 maxv case 0x0000000B: /* Empty */ 956 1.60 maxv case 0x0000000C: /* Empty */ 957 1.60 maxv cpudata->vmcb->state.rax = 0; 958 1.60 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 959 1.60 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 960 1.60 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 961 1.60 maxv break; 962 1.60 maxv case 0x0000000D: /* Processor Extended State Enumeration */ 963 1.25 maxv if (svm_xcr0_mask == 0) { 964 1.1 maxv break; 965 1.1 maxv } 966 1.25 maxv switch (ecx) { 967 1.25 maxv case 0: 968 1.26 maxv cpudata->vmcb->state.rax = svm_xcr0_mask & 0xFFFFFFFF; 969 1.25 maxv if (cpudata->gxcr0 & XCR0_SSE) { 970 1.25 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave); 971 1.25 maxv } else { 972 1.25 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87); 973 1.25 maxv } 974 1.25 maxv cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */ 975 1.39 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64; 976 1.25 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = svm_xcr0_mask >> 32; 977 1.25 maxv break; 978 1.25 maxv case 1: 979 1.54 maxv cpudata->vmcb->state.rax &= 980 1.54 maxv (CPUID_PES1_XSAVEOPT | CPUID_PES1_XSAVEC | 981 1.54 maxv CPUID_PES1_XGETBV); 982 1.54 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 983 1.54 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 984 1.54 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 985 1.54 maxv break; 986 1.54 maxv default: 987 1.54 maxv cpudata->vmcb->state.rax = 0; 988 1.54 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 989 1.54 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 990 1.54 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 991 1.25 maxv break; 992 1.1 maxv } 993 1.1 maxv break; 994 1.60 maxv 995 1.86 riastrad /* 996 1.86 riastrad * hypervisor CPUID range 997 1.86 riastrad */ 998 1.60 maxv case 0x40000000: /* Hypervisor Information */ 999 1.61 maxv cpudata->vmcb->state.rax = SVM_CPUID_MAX_HYPERVISOR; 1000 1.16 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1001 1.16 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1002 1.16 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1003 1.13 maxv memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4); 1004 1.13 maxv memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4); 1005 1.13 maxv memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4); 1006 1.10 maxv break; 1007 1.87 imil case 0x40000010: /* VMware-style TSC and LAPIC freq */ 1008 1.87 imil cpudata->gprs[NVMM_X64_GPR_RAX] = curcpu()->ci_data.cpu_cc_freq / 1000; 1009 1.87 imil if (has_lapic()) 1010 1.87 imil cpudata->gprs[NVMM_X64_GPR_RBX] = lapic_per_second / 1000; 1011 1.87 imil else 1012 1.87 imil cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1013 1.87 imil cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1014 1.87 imil cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1015 1.87 imil break; 1016 1.60 maxv 1017 1.86 riastrad /* 1018 1.86 riastrad * extended CPUID range 1019 1.86 riastrad */ 1020 1.70 maxv case 0x80000000: 1021 1.70 maxv cpudata->vmcb->state.rax = svm_cpuid_max_extended; 1022 1.70 maxv break; 1023 1.25 maxv case 0x80000001: 1024 1.33 maxv cpudata->vmcb->state.rax &= nvmm_cpuid_80000001.eax; 1025 1.33 maxv cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx; 1026 1.33 maxv cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx; 1027 1.33 maxv cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx; 1028 1.10 maxv break; 1029 1.70 maxv case 0x80000002: /* Extended Processor Name String */ 1030 1.70 maxv case 0x80000003: /* Extended Processor Name String */ 1031 1.70 maxv case 0x80000004: /* Extended Processor Name String */ 1032 1.70 maxv case 0x80000005: /* L1 Cache and TLB Information */ 1033 1.70 maxv case 0x80000006: /* L2 Cache and TLB and L3 Cache Information */ 1034 1.70 maxv break; 1035 1.70 maxv case 0x80000007: /* Processor Power Management and RAS Capabilities */ 1036 1.70 maxv cpudata->vmcb->state.rax &= nvmm_cpuid_80000007.eax; 1037 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000007.ebx; 1038 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000007.ecx; 1039 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000007.edx; 1040 1.70 maxv break; 1041 1.70 maxv case 0x80000008: /* Processor Capacity Parameters and Ext Feat Ident */ 1042 1.70 maxv cpudata->vmcb->state.rax &= nvmm_cpuid_80000008.eax; 1043 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000008.ebx; 1044 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000008.ecx; 1045 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000008.edx; 1046 1.70 maxv break; 1047 1.70 maxv case 0x80000009: /* Empty */ 1048 1.70 maxv case 0x8000000A: /* SVM Features */ 1049 1.70 maxv case 0x8000000B: /* Empty */ 1050 1.70 maxv case 0x8000000C: /* Empty */ 1051 1.70 maxv case 0x8000000D: /* Empty */ 1052 1.70 maxv case 0x8000000E: /* Empty */ 1053 1.70 maxv case 0x8000000F: /* Empty */ 1054 1.70 maxv case 0x80000010: /* Empty */ 1055 1.70 maxv case 0x80000011: /* Empty */ 1056 1.70 maxv case 0x80000012: /* Empty */ 1057 1.70 maxv case 0x80000013: /* Empty */ 1058 1.70 maxv case 0x80000014: /* Empty */ 1059 1.70 maxv case 0x80000015: /* Empty */ 1060 1.70 maxv case 0x80000016: /* Empty */ 1061 1.70 maxv case 0x80000017: /* Empty */ 1062 1.70 maxv case 0x80000018: /* Empty */ 1063 1.70 maxv cpudata->vmcb->state.rax = 0; 1064 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1065 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1066 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1067 1.70 maxv break; 1068 1.70 maxv case 0x80000019: /* TLB Characteristics for 1GB pages */ 1069 1.70 maxv case 0x8000001A: /* Instruction Optimizations */ 1070 1.70 maxv break; 1071 1.70 maxv case 0x8000001B: /* Instruction-Based Sampling Capabilities */ 1072 1.70 maxv case 0x8000001C: /* Lightweight Profiling Capabilities */ 1073 1.70 maxv cpudata->vmcb->state.rax = 0; 1074 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1075 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1076 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1077 1.70 maxv break; 1078 1.70 maxv case 0x8000001D: /* Cache Topology Information */ 1079 1.70 maxv case 0x8000001E: /* Processor Topology Information */ 1080 1.70 maxv break; /* TODO? */ 1081 1.70 maxv case 0x8000001F: /* Encrypted Memory Capabilities */ 1082 1.70 maxv cpudata->vmcb->state.rax = 0; 1083 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1084 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1085 1.70 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1086 1.70 maxv break; 1087 1.70 maxv 1088 1.1 maxv default: 1089 1.1 maxv break; 1090 1.1 maxv } 1091 1.1 maxv } 1092 1.1 maxv 1093 1.1 maxv static void 1094 1.51 maxv svm_exit_insn(struct vmcb *vmcb, struct nvmm_vcpu_exit *exit, uint64_t reason) 1095 1.51 maxv { 1096 1.51 maxv exit->u.insn.npc = vmcb->ctrl.nrip; 1097 1.51 maxv exit->reason = reason; 1098 1.51 maxv } 1099 1.51 maxv 1100 1.51 maxv static void 1101 1.1 maxv svm_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1102 1.51 maxv struct nvmm_vcpu_exit *exit) 1103 1.1 maxv { 1104 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1105 1.51 maxv struct nvmm_vcpu_conf_cpuid *cpuid; 1106 1.1 maxv uint64_t eax, ecx; 1107 1.1 maxv size_t i; 1108 1.1 maxv 1109 1.1 maxv eax = cpudata->vmcb->state.rax; 1110 1.13 maxv ecx = cpudata->gprs[NVMM_X64_GPR_RCX]; 1111 1.71 maxv svm_inkernel_exec_cpuid(cpudata, eax, ecx); 1112 1.38 maxv svm_inkernel_handle_cpuid(vcpu, eax, ecx); 1113 1.38 maxv 1114 1.1 maxv for (i = 0; i < SVM_NCPUIDS; i++) { 1115 1.51 maxv if (!cpudata->cpuidpresent[i]) { 1116 1.1 maxv continue; 1117 1.1 maxv } 1118 1.51 maxv cpuid = &cpudata->cpuid[i]; 1119 1.1 maxv if (cpuid->leaf != eax) { 1120 1.1 maxv continue; 1121 1.1 maxv } 1122 1.1 maxv 1123 1.51 maxv if (cpuid->exit) { 1124 1.51 maxv svm_exit_insn(cpudata->vmcb, exit, NVMM_VCPU_EXIT_CPUID); 1125 1.51 maxv return; 1126 1.51 maxv } 1127 1.51 maxv KASSERT(cpuid->mask); 1128 1.51 maxv 1129 1.1 maxv /* del */ 1130 1.51 maxv cpudata->vmcb->state.rax &= ~cpuid->u.mask.del.eax; 1131 1.51 maxv cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx; 1132 1.51 maxv cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx; 1133 1.51 maxv cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx; 1134 1.1 maxv 1135 1.1 maxv /* set */ 1136 1.51 maxv cpudata->vmcb->state.rax |= cpuid->u.mask.set.eax; 1137 1.51 maxv cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx; 1138 1.51 maxv cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx; 1139 1.51 maxv cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx; 1140 1.1 maxv 1141 1.1 maxv break; 1142 1.1 maxv } 1143 1.1 maxv 1144 1.17 maxv svm_inkernel_advance(cpudata->vmcb); 1145 1.51 maxv exit->reason = NVMM_VCPU_EXIT_NONE; 1146 1.1 maxv } 1147 1.1 maxv 1148 1.10 maxv static void 1149 1.10 maxv svm_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1150 1.51 maxv struct nvmm_vcpu_exit *exit) 1151 1.10 maxv { 1152 1.10 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1153 1.17 maxv struct vmcb *vmcb = cpudata->vmcb; 1154 1.10 maxv 1155 1.17 maxv if (cpudata->int_window_exit && (vmcb->state.rflags & PSL_I)) { 1156 1.17 maxv svm_event_waitexit_disable(vcpu, false); 1157 1.17 maxv } 1158 1.17 maxv 1159 1.17 maxv svm_inkernel_advance(cpudata->vmcb); 1160 1.51 maxv exit->reason = NVMM_VCPU_EXIT_HALTED; 1161 1.10 maxv } 1162 1.10 maxv 1163 1.1 maxv #define SVM_EXIT_IO_PORT __BITS(31,16) 1164 1.1 maxv #define SVM_EXIT_IO_SEG __BITS(12,10) 1165 1.1 maxv #define SVM_EXIT_IO_A64 __BIT(9) 1166 1.1 maxv #define SVM_EXIT_IO_A32 __BIT(8) 1167 1.1 maxv #define SVM_EXIT_IO_A16 __BIT(7) 1168 1.1 maxv #define SVM_EXIT_IO_SZ32 __BIT(6) 1169 1.1 maxv #define SVM_EXIT_IO_SZ16 __BIT(5) 1170 1.1 maxv #define SVM_EXIT_IO_SZ8 __BIT(4) 1171 1.1 maxv #define SVM_EXIT_IO_REP __BIT(3) 1172 1.1 maxv #define SVM_EXIT_IO_STR __BIT(2) 1173 1.4 maxv #define SVM_EXIT_IO_IN __BIT(0) 1174 1.1 maxv 1175 1.1 maxv static void 1176 1.1 maxv svm_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1177 1.51 maxv struct nvmm_vcpu_exit *exit) 1178 1.1 maxv { 1179 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1180 1.1 maxv uint64_t info = cpudata->vmcb->ctrl.exitinfo1; 1181 1.1 maxv uint64_t nextpc = cpudata->vmcb->ctrl.exitinfo2; 1182 1.1 maxv 1183 1.51 maxv exit->reason = NVMM_VCPU_EXIT_IO; 1184 1.1 maxv 1185 1.51 maxv exit->u.io.in = (info & SVM_EXIT_IO_IN) != 0; 1186 1.1 maxv exit->u.io.port = __SHIFTOUT(info, SVM_EXIT_IO_PORT); 1187 1.1 maxv 1188 1.1 maxv if (svm_decode_assist) { 1189 1.1 maxv KASSERT(__SHIFTOUT(info, SVM_EXIT_IO_SEG) < 6); 1190 1.32 maxv exit->u.io.seg = __SHIFTOUT(info, SVM_EXIT_IO_SEG); 1191 1.1 maxv } else { 1192 1.8 maxv exit->u.io.seg = -1; 1193 1.1 maxv } 1194 1.1 maxv 1195 1.1 maxv if (info & SVM_EXIT_IO_A64) { 1196 1.1 maxv exit->u.io.address_size = 8; 1197 1.1 maxv } else if (info & SVM_EXIT_IO_A32) { 1198 1.1 maxv exit->u.io.address_size = 4; 1199 1.1 maxv } else if (info & SVM_EXIT_IO_A16) { 1200 1.1 maxv exit->u.io.address_size = 2; 1201 1.1 maxv } 1202 1.1 maxv 1203 1.1 maxv if (info & SVM_EXIT_IO_SZ32) { 1204 1.1 maxv exit->u.io.operand_size = 4; 1205 1.1 maxv } else if (info & SVM_EXIT_IO_SZ16) { 1206 1.1 maxv exit->u.io.operand_size = 2; 1207 1.1 maxv } else if (info & SVM_EXIT_IO_SZ8) { 1208 1.1 maxv exit->u.io.operand_size = 1; 1209 1.1 maxv } 1210 1.1 maxv 1211 1.1 maxv exit->u.io.rep = (info & SVM_EXIT_IO_REP) != 0; 1212 1.1 maxv exit->u.io.str = (info & SVM_EXIT_IO_STR) != 0; 1213 1.1 maxv exit->u.io.npc = nextpc; 1214 1.43 maxv 1215 1.43 maxv svm_vcpu_state_provide(vcpu, 1216 1.43 maxv NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS | 1217 1.43 maxv NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS); 1218 1.1 maxv } 1219 1.1 maxv 1220 1.10 maxv static const uint64_t msr_ignore_list[] = { 1221 1.10 maxv 0xc0010055, /* MSR_CMPHALT */ 1222 1.10 maxv MSR_DE_CFG, 1223 1.10 maxv MSR_IC_CFG, 1224 1.10 maxv MSR_UCODE_AMD_PATCHLEVEL 1225 1.10 maxv }; 1226 1.10 maxv 1227 1.1 maxv static bool 1228 1.1 maxv svm_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1229 1.51 maxv struct nvmm_vcpu_exit *exit) 1230 1.1 maxv { 1231 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1232 1.19 maxv struct vmcb *vmcb = cpudata->vmcb; 1233 1.10 maxv uint64_t val; 1234 1.10 maxv size_t i; 1235 1.1 maxv 1236 1.51 maxv if (exit->reason == NVMM_VCPU_EXIT_RDMSR) { 1237 1.74 maxv if (exit->u.rdmsr.msr == MSR_EFER) { 1238 1.74 maxv val = vmcb->state.efer & ~EFER_SVME; 1239 1.74 maxv vmcb->state.rax = (val & 0xFFFFFFFF); 1240 1.74 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 1241 1.74 maxv goto handled; 1242 1.74 maxv } 1243 1.51 maxv if (exit->u.rdmsr.msr == MSR_NB_CFG) { 1244 1.10 maxv val = NB_CFG_INITAPICCPUIDLO; 1245 1.19 maxv vmcb->state.rax = (val & 0xFFFFFFFF); 1246 1.13 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 1247 1.10 maxv goto handled; 1248 1.10 maxv } 1249 1.10 maxv for (i = 0; i < __arraycount(msr_ignore_list); i++) { 1250 1.51 maxv if (msr_ignore_list[i] != exit->u.rdmsr.msr) 1251 1.10 maxv continue; 1252 1.10 maxv val = 0; 1253 1.19 maxv vmcb->state.rax = (val & 0xFFFFFFFF); 1254 1.13 maxv cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 1255 1.1 maxv goto handled; 1256 1.1 maxv } 1257 1.51 maxv } else { 1258 1.51 maxv if (exit->u.wrmsr.msr == MSR_EFER) { 1259 1.51 maxv if (__predict_false(exit->u.wrmsr.val & ~EFER_VALID)) { 1260 1.19 maxv goto error; 1261 1.1 maxv } 1262 1.51 maxv if ((vmcb->state.efer ^ exit->u.wrmsr.val) & 1263 1.1 maxv EFER_TLB_FLUSH) { 1264 1.28 maxv cpudata->gtlb_want_flush = true; 1265 1.1 maxv } 1266 1.51 maxv vmcb->state.efer = exit->u.wrmsr.val | EFER_SVME; 1267 1.24 maxv svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_CR); 1268 1.24 maxv goto handled; 1269 1.24 maxv } 1270 1.51 maxv if (exit->u.wrmsr.msr == MSR_TSC) { 1271 1.51 maxv cpudata->gtsc = exit->u.wrmsr.val; 1272 1.36 maxv cpudata->gtsc_want_update = true; 1273 1.1 maxv goto handled; 1274 1.1 maxv } 1275 1.10 maxv for (i = 0; i < __arraycount(msr_ignore_list); i++) { 1276 1.51 maxv if (msr_ignore_list[i] != exit->u.wrmsr.msr) 1277 1.10 maxv continue; 1278 1.10 maxv goto handled; 1279 1.10 maxv } 1280 1.1 maxv } 1281 1.1 maxv 1282 1.1 maxv return false; 1283 1.1 maxv 1284 1.1 maxv handled: 1285 1.17 maxv svm_inkernel_advance(cpudata->vmcb); 1286 1.1 maxv return true; 1287 1.19 maxv 1288 1.19 maxv error: 1289 1.45 maxv svm_inject_gp(vcpu); 1290 1.19 maxv return true; 1291 1.1 maxv } 1292 1.1 maxv 1293 1.51 maxv static inline void 1294 1.51 maxv svm_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1295 1.51 maxv struct nvmm_vcpu_exit *exit) 1296 1.1 maxv { 1297 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1298 1.1 maxv 1299 1.51 maxv exit->reason = NVMM_VCPU_EXIT_RDMSR; 1300 1.51 maxv exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF); 1301 1.51 maxv exit->u.rdmsr.npc = cpudata->vmcb->ctrl.nrip; 1302 1.51 maxv 1303 1.51 maxv if (svm_inkernel_handle_msr(mach, vcpu, exit)) { 1304 1.51 maxv exit->reason = NVMM_VCPU_EXIT_NONE; 1305 1.51 maxv return; 1306 1.1 maxv } 1307 1.1 maxv 1308 1.51 maxv svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS); 1309 1.51 maxv } 1310 1.51 maxv 1311 1.51 maxv static inline void 1312 1.51 maxv svm_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1313 1.51 maxv struct nvmm_vcpu_exit *exit) 1314 1.51 maxv { 1315 1.51 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1316 1.51 maxv uint64_t rdx, rax; 1317 1.1 maxv 1318 1.51 maxv rdx = cpudata->gprs[NVMM_X64_GPR_RDX]; 1319 1.51 maxv rax = cpudata->vmcb->state.rax; 1320 1.51 maxv 1321 1.51 maxv exit->reason = NVMM_VCPU_EXIT_WRMSR; 1322 1.51 maxv exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF); 1323 1.51 maxv exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF); 1324 1.51 maxv exit->u.wrmsr.npc = cpudata->vmcb->ctrl.nrip; 1325 1.1 maxv 1326 1.1 maxv if (svm_inkernel_handle_msr(mach, vcpu, exit)) { 1327 1.51 maxv exit->reason = NVMM_VCPU_EXIT_NONE; 1328 1.1 maxv return; 1329 1.1 maxv } 1330 1.1 maxv 1331 1.51 maxv svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS); 1332 1.51 maxv } 1333 1.51 maxv 1334 1.51 maxv static void 1335 1.51 maxv svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1336 1.51 maxv struct nvmm_vcpu_exit *exit) 1337 1.51 maxv { 1338 1.51 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1339 1.51 maxv uint64_t info = cpudata->vmcb->ctrl.exitinfo1; 1340 1.43 maxv 1341 1.51 maxv if (info == 0) { 1342 1.51 maxv svm_exit_rdmsr(mach, vcpu, exit); 1343 1.51 maxv } else { 1344 1.51 maxv svm_exit_wrmsr(mach, vcpu, exit); 1345 1.51 maxv } 1346 1.1 maxv } 1347 1.1 maxv 1348 1.1 maxv static void 1349 1.1 maxv svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1350 1.51 maxv struct nvmm_vcpu_exit *exit) 1351 1.1 maxv { 1352 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1353 1.1 maxv gpaddr_t gpa = cpudata->vmcb->ctrl.exitinfo2; 1354 1.1 maxv 1355 1.51 maxv exit->reason = NVMM_VCPU_EXIT_MEMORY; 1356 1.27 maxv if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_W) 1357 1.35 maxv exit->u.mem.prot = PROT_WRITE; 1358 1.77 maxv else if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_I) 1359 1.35 maxv exit->u.mem.prot = PROT_EXEC; 1360 1.27 maxv else 1361 1.35 maxv exit->u.mem.prot = PROT_READ; 1362 1.27 maxv exit->u.mem.gpa = gpa; 1363 1.27 maxv exit->u.mem.inst_len = cpudata->vmcb->ctrl.inst_len; 1364 1.27 maxv memcpy(exit->u.mem.inst_bytes, cpudata->vmcb->ctrl.inst_bytes, 1365 1.27 maxv sizeof(exit->u.mem.inst_bytes)); 1366 1.43 maxv 1367 1.43 maxv svm_vcpu_state_provide(vcpu, 1368 1.43 maxv NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS | 1369 1.43 maxv NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS); 1370 1.1 maxv } 1371 1.1 maxv 1372 1.1 maxv static void 1373 1.1 maxv svm_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1374 1.51 maxv struct nvmm_vcpu_exit *exit) 1375 1.1 maxv { 1376 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1377 1.1 maxv struct vmcb *vmcb = cpudata->vmcb; 1378 1.1 maxv uint64_t val; 1379 1.1 maxv 1380 1.51 maxv exit->reason = NVMM_VCPU_EXIT_NONE; 1381 1.1 maxv 1382 1.13 maxv val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) | 1383 1.3 maxv (vmcb->state.rax & 0xFFFFFFFF); 1384 1.1 maxv 1385 1.13 maxv if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) { 1386 1.1 maxv goto error; 1387 1.1 maxv } else if (__predict_false(vmcb->state.cpl != 0)) { 1388 1.1 maxv goto error; 1389 1.1 maxv } else if (__predict_false((val & ~svm_xcr0_mask) != 0)) { 1390 1.1 maxv goto error; 1391 1.1 maxv } else if (__predict_false((val & XCR0_X87) == 0)) { 1392 1.1 maxv goto error; 1393 1.1 maxv } 1394 1.1 maxv 1395 1.13 maxv cpudata->gxcr0 = val; 1396 1.1 maxv 1397 1.17 maxv svm_inkernel_advance(cpudata->vmcb); 1398 1.1 maxv return; 1399 1.1 maxv 1400 1.1 maxv error: 1401 1.45 maxv svm_inject_gp(vcpu); 1402 1.1 maxv } 1403 1.1 maxv 1404 1.40 maxv static void 1405 1.51 maxv svm_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code) 1406 1.40 maxv { 1407 1.40 maxv exit->u.inv.hwcode = code; 1408 1.51 maxv exit->reason = NVMM_VCPU_EXIT_INVALID; 1409 1.40 maxv } 1410 1.40 maxv 1411 1.29 maxv /* -------------------------------------------------------------------------- */ 1412 1.29 maxv 1413 1.1 maxv static void 1414 1.1 maxv svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu) 1415 1.1 maxv { 1416 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1417 1.1 maxv 1418 1.65 maxv fpu_kern_enter(); 1419 1.82 mgorny /* TODO: should we use *XSAVE64 here? */ 1420 1.82 mgorny fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask, false); 1421 1.16 maxv 1422 1.16 maxv if (svm_xcr0_mask != 0) { 1423 1.13 maxv cpudata->hxcr0 = rdxcr(0); 1424 1.13 maxv wrxcr(0, cpudata->gxcr0); 1425 1.1 maxv } 1426 1.1 maxv } 1427 1.1 maxv 1428 1.1 maxv static void 1429 1.1 maxv svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu) 1430 1.1 maxv { 1431 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1432 1.1 maxv 1433 1.16 maxv if (svm_xcr0_mask != 0) { 1434 1.16 maxv cpudata->gxcr0 = rdxcr(0); 1435 1.16 maxv wrxcr(0, cpudata->hxcr0); 1436 1.16 maxv } 1437 1.16 maxv 1438 1.82 mgorny /* TODO: should we use *XSAVE64 here? */ 1439 1.82 mgorny fpu_area_save(&cpudata->gfpu, svm_xcr0_mask, false); 1440 1.65 maxv fpu_kern_leave(); 1441 1.1 maxv } 1442 1.1 maxv 1443 1.1 maxv static void 1444 1.1 maxv svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu) 1445 1.1 maxv { 1446 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1447 1.1 maxv 1448 1.1 maxv x86_dbregs_save(curlwp); 1449 1.1 maxv 1450 1.15 maxv ldr7(0); 1451 1.15 maxv 1452 1.13 maxv ldr0(cpudata->drs[NVMM_X64_DR_DR0]); 1453 1.13 maxv ldr1(cpudata->drs[NVMM_X64_DR_DR1]); 1454 1.13 maxv ldr2(cpudata->drs[NVMM_X64_DR_DR2]); 1455 1.13 maxv ldr3(cpudata->drs[NVMM_X64_DR_DR3]); 1456 1.1 maxv } 1457 1.1 maxv 1458 1.1 maxv static void 1459 1.1 maxv svm_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu) 1460 1.1 maxv { 1461 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1462 1.1 maxv 1463 1.13 maxv cpudata->drs[NVMM_X64_DR_DR0] = rdr0(); 1464 1.13 maxv cpudata->drs[NVMM_X64_DR_DR1] = rdr1(); 1465 1.13 maxv cpudata->drs[NVMM_X64_DR_DR2] = rdr2(); 1466 1.13 maxv cpudata->drs[NVMM_X64_DR_DR3] = rdr3(); 1467 1.1 maxv 1468 1.1 maxv x86_dbregs_restore(curlwp); 1469 1.1 maxv } 1470 1.1 maxv 1471 1.1 maxv static void 1472 1.1 maxv svm_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu) 1473 1.1 maxv { 1474 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1475 1.1 maxv 1476 1.14 maxv cpudata->fsbase = rdmsr(MSR_FSBASE); 1477 1.14 maxv cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE); 1478 1.1 maxv } 1479 1.1 maxv 1480 1.1 maxv static void 1481 1.1 maxv svm_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu) 1482 1.1 maxv { 1483 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1484 1.1 maxv 1485 1.1 maxv wrmsr(MSR_STAR, cpudata->star); 1486 1.1 maxv wrmsr(MSR_LSTAR, cpudata->lstar); 1487 1.1 maxv wrmsr(MSR_CSTAR, cpudata->cstar); 1488 1.1 maxv wrmsr(MSR_SFMASK, cpudata->sfmask); 1489 1.14 maxv wrmsr(MSR_FSBASE, cpudata->fsbase); 1490 1.14 maxv wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase); 1491 1.1 maxv } 1492 1.1 maxv 1493 1.28 maxv /* -------------------------------------------------------------------------- */ 1494 1.28 maxv 1495 1.28 maxv static inline void 1496 1.28 maxv svm_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu) 1497 1.28 maxv { 1498 1.28 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1499 1.28 maxv 1500 1.28 maxv if (vcpu->hcpu_last != hcpu || cpudata->shared_asid) { 1501 1.28 maxv cpudata->gtlb_want_flush = true; 1502 1.28 maxv } 1503 1.28 maxv } 1504 1.28 maxv 1505 1.29 maxv static inline void 1506 1.29 maxv svm_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu) 1507 1.29 maxv { 1508 1.29 maxv /* 1509 1.29 maxv * Nothing to do. If an hTLB flush was needed, either the VCPU was 1510 1.29 maxv * executing on this hCPU and the hTLB already got flushed, or it 1511 1.29 maxv * was executing on another hCPU in which case the catchup is done 1512 1.29 maxv * in svm_gtlb_catchup(). 1513 1.29 maxv */ 1514 1.29 maxv } 1515 1.29 maxv 1516 1.29 maxv static inline uint64_t 1517 1.29 maxv svm_htlb_flush(struct svm_machdata *machdata, struct svm_cpudata *cpudata) 1518 1.29 maxv { 1519 1.29 maxv struct vmcb *vmcb = cpudata->vmcb; 1520 1.29 maxv uint64_t machgen; 1521 1.29 maxv 1522 1.29 maxv machgen = machdata->mach_htlb_gen; 1523 1.29 maxv if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) { 1524 1.29 maxv return machgen; 1525 1.29 maxv } 1526 1.29 maxv 1527 1.29 maxv vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush; 1528 1.29 maxv return machgen; 1529 1.29 maxv } 1530 1.29 maxv 1531 1.29 maxv static inline void 1532 1.29 maxv svm_htlb_flush_ack(struct svm_cpudata *cpudata, uint64_t machgen) 1533 1.29 maxv { 1534 1.29 maxv struct vmcb *vmcb = cpudata->vmcb; 1535 1.29 maxv 1536 1.29 maxv if (__predict_true(vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID)) { 1537 1.29 maxv cpudata->vcpu_htlb_gen = machgen; 1538 1.29 maxv } 1539 1.29 maxv } 1540 1.29 maxv 1541 1.41 maxv static inline void 1542 1.41 maxv svm_exit_evt(struct svm_cpudata *cpudata, struct vmcb *vmcb) 1543 1.41 maxv { 1544 1.41 maxv cpudata->evt_pending = false; 1545 1.41 maxv 1546 1.41 maxv if (__predict_false(vmcb->ctrl.exitintinfo & VMCB_CTRL_EXITINTINFO_V)) { 1547 1.41 maxv vmcb->ctrl.eventinj = vmcb->ctrl.exitintinfo; 1548 1.41 maxv cpudata->evt_pending = true; 1549 1.41 maxv } 1550 1.41 maxv } 1551 1.41 maxv 1552 1.1 maxv static int 1553 1.1 maxv svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1554 1.51 maxv struct nvmm_vcpu_exit *exit) 1555 1.1 maxv { 1556 1.43 maxv struct nvmm_comm_page *comm = vcpu->comm; 1557 1.29 maxv struct svm_machdata *machdata = mach->machdata; 1558 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1559 1.1 maxv struct vmcb *vmcb = cpudata->vmcb; 1560 1.29 maxv uint64_t machgen; 1561 1.64 maxv int hcpu; 1562 1.1 maxv 1563 1.73 maxv svm_vcpu_state_commit(vcpu); 1564 1.73 maxv comm->state_cached = 0; 1565 1.73 maxv 1566 1.45 maxv if (__predict_false(svm_vcpu_event_commit(vcpu) != 0)) { 1567 1.45 maxv return EINVAL; 1568 1.45 maxv } 1569 1.43 maxv 1570 1.1 maxv kpreempt_disable(); 1571 1.1 maxv hcpu = cpu_number(); 1572 1.1 maxv 1573 1.28 maxv svm_gtlb_catchup(vcpu, hcpu); 1574 1.29 maxv svm_htlb_catchup(vcpu, hcpu); 1575 1.1 maxv 1576 1.1 maxv if (vcpu->hcpu_last != hcpu) { 1577 1.12 maxv svm_vmcb_cache_flush_all(vmcb); 1578 1.36 maxv cpudata->gtsc_want_update = true; 1579 1.1 maxv } 1580 1.1 maxv 1581 1.1 maxv svm_vcpu_guest_dbregs_enter(vcpu); 1582 1.1 maxv svm_vcpu_guest_misc_enter(vcpu); 1583 1.1 maxv 1584 1.1 maxv while (1) { 1585 1.28 maxv if (cpudata->gtlb_want_flush) { 1586 1.20 maxv vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush; 1587 1.20 maxv } else { 1588 1.20 maxv vmcb->ctrl.tlb_ctrl = 0; 1589 1.20 maxv } 1590 1.20 maxv 1591 1.36 maxv if (__predict_false(cpudata->gtsc_want_update)) { 1592 1.36 maxv vmcb->ctrl.tsc_offset = cpudata->gtsc - rdtsc(); 1593 1.36 maxv svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I); 1594 1.36 maxv } 1595 1.36 maxv 1596 1.81 maxv svm_vcpu_guest_fpu_enter(vcpu); 1597 1.64 maxv svm_clgi(); 1598 1.29 maxv machgen = svm_htlb_flush(machdata, cpudata); 1599 1.13 maxv svm_vmrun(cpudata->vmcb_pa, cpudata->gprs); 1600 1.29 maxv svm_htlb_flush_ack(cpudata, machgen); 1601 1.64 maxv svm_stgi(); 1602 1.81 maxv svm_vcpu_guest_fpu_leave(vcpu); 1603 1.1 maxv 1604 1.1 maxv svm_vmcb_cache_default(vmcb); 1605 1.1 maxv 1606 1.1 maxv if (vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID) { 1607 1.28 maxv cpudata->gtlb_want_flush = false; 1608 1.36 maxv cpudata->gtsc_want_update = false; 1609 1.1 maxv vcpu->hcpu_last = hcpu; 1610 1.1 maxv } 1611 1.41 maxv svm_exit_evt(cpudata, vmcb); 1612 1.1 maxv 1613 1.1 maxv switch (vmcb->ctrl.exitcode) { 1614 1.1 maxv case VMCB_EXITCODE_INTR: 1615 1.1 maxv case VMCB_EXITCODE_NMI: 1616 1.51 maxv exit->reason = NVMM_VCPU_EXIT_NONE; 1617 1.1 maxv break; 1618 1.1 maxv case VMCB_EXITCODE_VINTR: 1619 1.10 maxv svm_event_waitexit_disable(vcpu, false); 1620 1.51 maxv exit->reason = NVMM_VCPU_EXIT_INT_READY; 1621 1.1 maxv break; 1622 1.1 maxv case VMCB_EXITCODE_IRET: 1623 1.10 maxv svm_event_waitexit_disable(vcpu, true); 1624 1.51 maxv exit->reason = NVMM_VCPU_EXIT_NMI_READY; 1625 1.1 maxv break; 1626 1.1 maxv case VMCB_EXITCODE_CPUID: 1627 1.1 maxv svm_exit_cpuid(mach, vcpu, exit); 1628 1.1 maxv break; 1629 1.1 maxv case VMCB_EXITCODE_HLT: 1630 1.10 maxv svm_exit_hlt(mach, vcpu, exit); 1631 1.1 maxv break; 1632 1.1 maxv case VMCB_EXITCODE_IOIO: 1633 1.1 maxv svm_exit_io(mach, vcpu, exit); 1634 1.1 maxv break; 1635 1.1 maxv case VMCB_EXITCODE_MSR: 1636 1.1 maxv svm_exit_msr(mach, vcpu, exit); 1637 1.1 maxv break; 1638 1.1 maxv case VMCB_EXITCODE_SHUTDOWN: 1639 1.51 maxv exit->reason = NVMM_VCPU_EXIT_SHUTDOWN; 1640 1.1 maxv break; 1641 1.1 maxv case VMCB_EXITCODE_RDPMC: 1642 1.1 maxv case VMCB_EXITCODE_RSM: 1643 1.1 maxv case VMCB_EXITCODE_INVLPGA: 1644 1.1 maxv case VMCB_EXITCODE_VMRUN: 1645 1.1 maxv case VMCB_EXITCODE_VMMCALL: 1646 1.1 maxv case VMCB_EXITCODE_VMLOAD: 1647 1.1 maxv case VMCB_EXITCODE_VMSAVE: 1648 1.1 maxv case VMCB_EXITCODE_STGI: 1649 1.1 maxv case VMCB_EXITCODE_CLGI: 1650 1.1 maxv case VMCB_EXITCODE_SKINIT: 1651 1.1 maxv case VMCB_EXITCODE_RDTSCP: 1652 1.67 maxv case VMCB_EXITCODE_RDPRU: 1653 1.67 maxv case VMCB_EXITCODE_INVLPGB: 1654 1.67 maxv case VMCB_EXITCODE_INVPCID: 1655 1.67 maxv case VMCB_EXITCODE_MCOMMIT: 1656 1.67 maxv case VMCB_EXITCODE_TLBSYNC: 1657 1.45 maxv svm_inject_ud(vcpu); 1658 1.51 maxv exit->reason = NVMM_VCPU_EXIT_NONE; 1659 1.1 maxv break; 1660 1.1 maxv case VMCB_EXITCODE_MONITOR: 1661 1.51 maxv svm_exit_insn(vmcb, exit, NVMM_VCPU_EXIT_MONITOR); 1662 1.1 maxv break; 1663 1.1 maxv case VMCB_EXITCODE_MWAIT: 1664 1.1 maxv case VMCB_EXITCODE_MWAIT_CONDITIONAL: 1665 1.51 maxv svm_exit_insn(vmcb, exit, NVMM_VCPU_EXIT_MWAIT); 1666 1.1 maxv break; 1667 1.1 maxv case VMCB_EXITCODE_XSETBV: 1668 1.1 maxv svm_exit_xsetbv(mach, vcpu, exit); 1669 1.1 maxv break; 1670 1.1 maxv case VMCB_EXITCODE_NPF: 1671 1.1 maxv svm_exit_npf(mach, vcpu, exit); 1672 1.1 maxv break; 1673 1.1 maxv case VMCB_EXITCODE_FERR_FREEZE: /* ? */ 1674 1.1 maxv default: 1675 1.40 maxv svm_exit_invalid(exit, vmcb->ctrl.exitcode); 1676 1.1 maxv break; 1677 1.1 maxv } 1678 1.1 maxv 1679 1.1 maxv /* If no reason to return to userland, keep rolling. */ 1680 1.83 reinoud if (nvmm_return_needed(vcpu, exit)) { 1681 1.10 maxv break; 1682 1.10 maxv } 1683 1.51 maxv if (exit->reason != NVMM_VCPU_EXIT_NONE) { 1684 1.1 maxv break; 1685 1.1 maxv } 1686 1.1 maxv } 1687 1.1 maxv 1688 1.36 maxv cpudata->gtsc = rdtsc() + vmcb->ctrl.tsc_offset; 1689 1.36 maxv 1690 1.1 maxv svm_vcpu_guest_misc_leave(vcpu); 1691 1.1 maxv svm_vcpu_guest_dbregs_leave(vcpu); 1692 1.1 maxv 1693 1.1 maxv kpreempt_enable(); 1694 1.1 maxv 1695 1.53 maxv exit->exitstate.rflags = vmcb->state.rflags; 1696 1.53 maxv exit->exitstate.cr8 = __SHIFTOUT(vmcb->ctrl.v, VMCB_CTRL_V_TPR); 1697 1.53 maxv exit->exitstate.int_shadow = 1698 1.10 maxv ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0); 1699 1.53 maxv exit->exitstate.int_window_exiting = cpudata->int_window_exit; 1700 1.53 maxv exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit; 1701 1.53 maxv exit->exitstate.evt_pending = cpudata->evt_pending; 1702 1.10 maxv 1703 1.1 maxv return 0; 1704 1.1 maxv } 1705 1.1 maxv 1706 1.1 maxv /* -------------------------------------------------------------------------- */ 1707 1.1 maxv 1708 1.1 maxv static int 1709 1.1 maxv svm_memalloc(paddr_t *pa, vaddr_t *va, size_t npages) 1710 1.1 maxv { 1711 1.1 maxv struct pglist pglist; 1712 1.1 maxv paddr_t _pa; 1713 1.1 maxv vaddr_t _va; 1714 1.1 maxv size_t i; 1715 1.1 maxv int ret; 1716 1.1 maxv 1717 1.1 maxv ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0, 1718 1.1 maxv &pglist, 1, 0); 1719 1.1 maxv if (ret != 0) 1720 1.1 maxv return ENOMEM; 1721 1.55 ad _pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); 1722 1.1 maxv _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0, 1723 1.1 maxv UVM_KMF_VAONLY | UVM_KMF_NOWAIT); 1724 1.1 maxv if (_va == 0) 1725 1.1 maxv goto error; 1726 1.1 maxv 1727 1.1 maxv for (i = 0; i < npages; i++) { 1728 1.1 maxv pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE, 1729 1.1 maxv VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK); 1730 1.1 maxv } 1731 1.5 maxv pmap_update(pmap_kernel()); 1732 1.1 maxv 1733 1.1 maxv memset((void *)_va, 0, npages * PAGE_SIZE); 1734 1.1 maxv 1735 1.1 maxv *pa = _pa; 1736 1.1 maxv *va = _va; 1737 1.1 maxv return 0; 1738 1.1 maxv 1739 1.1 maxv error: 1740 1.1 maxv for (i = 0; i < npages; i++) { 1741 1.1 maxv uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE)); 1742 1.1 maxv } 1743 1.1 maxv return ENOMEM; 1744 1.1 maxv } 1745 1.1 maxv 1746 1.1 maxv static void 1747 1.1 maxv svm_memfree(paddr_t pa, vaddr_t va, size_t npages) 1748 1.1 maxv { 1749 1.1 maxv size_t i; 1750 1.1 maxv 1751 1.1 maxv pmap_kremove(va, npages * PAGE_SIZE); 1752 1.1 maxv pmap_update(pmap_kernel()); 1753 1.1 maxv uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY); 1754 1.1 maxv for (i = 0; i < npages; i++) { 1755 1.1 maxv uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE)); 1756 1.1 maxv } 1757 1.1 maxv } 1758 1.1 maxv 1759 1.1 maxv /* -------------------------------------------------------------------------- */ 1760 1.1 maxv 1761 1.1 maxv #define SVM_MSRBM_READ __BIT(0) 1762 1.1 maxv #define SVM_MSRBM_WRITE __BIT(1) 1763 1.1 maxv 1764 1.1 maxv static void 1765 1.1 maxv svm_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write) 1766 1.1 maxv { 1767 1.1 maxv uint64_t byte; 1768 1.1 maxv uint8_t bitoff; 1769 1.1 maxv 1770 1.1 maxv if (msr < 0x00002000) { 1771 1.1 maxv /* Range 1 */ 1772 1.1 maxv byte = ((msr - 0x00000000) >> 2UL) + 0x0000; 1773 1.1 maxv } else if (msr >= 0xC0000000 && msr < 0xC0002000) { 1774 1.1 maxv /* Range 2 */ 1775 1.1 maxv byte = ((msr - 0xC0000000) >> 2UL) + 0x0800; 1776 1.1 maxv } else if (msr >= 0xC0010000 && msr < 0xC0012000) { 1777 1.1 maxv /* Range 3 */ 1778 1.1 maxv byte = ((msr - 0xC0010000) >> 2UL) + 0x1000; 1779 1.1 maxv } else { 1780 1.1 maxv panic("%s: wrong range", __func__); 1781 1.1 maxv } 1782 1.1 maxv 1783 1.1 maxv bitoff = (msr & 0x3) << 1; 1784 1.1 maxv 1785 1.1 maxv if (read) { 1786 1.1 maxv bitmap[byte] &= ~(SVM_MSRBM_READ << bitoff); 1787 1.1 maxv } 1788 1.1 maxv if (write) { 1789 1.1 maxv bitmap[byte] &= ~(SVM_MSRBM_WRITE << bitoff); 1790 1.1 maxv } 1791 1.1 maxv } 1792 1.1 maxv 1793 1.32 maxv #define SVM_SEG_ATTRIB_TYPE __BITS(3,0) 1794 1.32 maxv #define SVM_SEG_ATTRIB_S __BIT(4) 1795 1.1 maxv #define SVM_SEG_ATTRIB_DPL __BITS(6,5) 1796 1.1 maxv #define SVM_SEG_ATTRIB_P __BIT(7) 1797 1.1 maxv #define SVM_SEG_ATTRIB_AVL __BIT(8) 1798 1.32 maxv #define SVM_SEG_ATTRIB_L __BIT(9) 1799 1.32 maxv #define SVM_SEG_ATTRIB_DEF __BIT(10) 1800 1.32 maxv #define SVM_SEG_ATTRIB_G __BIT(11) 1801 1.1 maxv 1802 1.1 maxv static void 1803 1.30 maxv svm_vcpu_setstate_seg(const struct nvmm_x64_state_seg *seg, 1804 1.30 maxv struct vmcb_segment *vseg) 1805 1.1 maxv { 1806 1.1 maxv vseg->selector = seg->selector; 1807 1.1 maxv vseg->attrib = 1808 1.1 maxv __SHIFTIN(seg->attrib.type, SVM_SEG_ATTRIB_TYPE) | 1809 1.32 maxv __SHIFTIN(seg->attrib.s, SVM_SEG_ATTRIB_S) | 1810 1.1 maxv __SHIFTIN(seg->attrib.dpl, SVM_SEG_ATTRIB_DPL) | 1811 1.1 maxv __SHIFTIN(seg->attrib.p, SVM_SEG_ATTRIB_P) | 1812 1.1 maxv __SHIFTIN(seg->attrib.avl, SVM_SEG_ATTRIB_AVL) | 1813 1.32 maxv __SHIFTIN(seg->attrib.l, SVM_SEG_ATTRIB_L) | 1814 1.32 maxv __SHIFTIN(seg->attrib.def, SVM_SEG_ATTRIB_DEF) | 1815 1.32 maxv __SHIFTIN(seg->attrib.g, SVM_SEG_ATTRIB_G); 1816 1.1 maxv vseg->limit = seg->limit; 1817 1.1 maxv vseg->base = seg->base; 1818 1.1 maxv } 1819 1.1 maxv 1820 1.1 maxv static void 1821 1.1 maxv svm_vcpu_getstate_seg(struct nvmm_x64_state_seg *seg, struct vmcb_segment *vseg) 1822 1.1 maxv { 1823 1.1 maxv seg->selector = vseg->selector; 1824 1.1 maxv seg->attrib.type = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_TYPE); 1825 1.32 maxv seg->attrib.s = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_S); 1826 1.1 maxv seg->attrib.dpl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DPL); 1827 1.1 maxv seg->attrib.p = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_P); 1828 1.1 maxv seg->attrib.avl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_AVL); 1829 1.32 maxv seg->attrib.l = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_L); 1830 1.32 maxv seg->attrib.def = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DEF); 1831 1.32 maxv seg->attrib.g = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_G); 1832 1.1 maxv seg->limit = vseg->limit; 1833 1.1 maxv seg->base = vseg->base; 1834 1.1 maxv } 1835 1.1 maxv 1836 1.13 maxv static inline bool 1837 1.30 maxv svm_state_tlb_flush(const struct vmcb *vmcb, const struct nvmm_x64_state *state, 1838 1.13 maxv uint64_t flags) 1839 1.1 maxv { 1840 1.1 maxv if (flags & NVMM_X64_STATE_CRS) { 1841 1.13 maxv if ((vmcb->state.cr0 ^ 1842 1.13 maxv state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) { 1843 1.1 maxv return true; 1844 1.1 maxv } 1845 1.13 maxv if (vmcb->state.cr3 != state->crs[NVMM_X64_CR_CR3]) { 1846 1.1 maxv return true; 1847 1.1 maxv } 1848 1.13 maxv if ((vmcb->state.cr4 ^ 1849 1.13 maxv state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) { 1850 1.1 maxv return true; 1851 1.1 maxv } 1852 1.1 maxv } 1853 1.1 maxv 1854 1.1 maxv if (flags & NVMM_X64_STATE_MSRS) { 1855 1.13 maxv if ((vmcb->state.efer ^ 1856 1.13 maxv state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) { 1857 1.1 maxv return true; 1858 1.1 maxv } 1859 1.1 maxv } 1860 1.1 maxv 1861 1.1 maxv return false; 1862 1.1 maxv } 1863 1.1 maxv 1864 1.1 maxv static void 1865 1.43 maxv svm_vcpu_setstate(struct nvmm_cpu *vcpu) 1866 1.1 maxv { 1867 1.43 maxv struct nvmm_comm_page *comm = vcpu->comm; 1868 1.43 maxv const struct nvmm_x64_state *state = &comm->state; 1869 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 1870 1.1 maxv struct vmcb *vmcb = cpudata->vmcb; 1871 1.1 maxv struct fxsave *fpustate; 1872 1.43 maxv uint64_t flags; 1873 1.43 maxv 1874 1.43 maxv flags = comm->state_wanted; 1875 1.1 maxv 1876 1.13 maxv if (svm_state_tlb_flush(vmcb, state, flags)) { 1877 1.28 maxv cpudata->gtlb_want_flush = true; 1878 1.1 maxv } 1879 1.1 maxv 1880 1.1 maxv if (flags & NVMM_X64_STATE_SEGS) { 1881 1.13 maxv svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_CS], 1882 1.1 maxv &vmcb->state.cs); 1883 1.13 maxv svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_DS], 1884 1.1 maxv &vmcb->state.ds); 1885 1.13 maxv svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_ES], 1886 1.1 maxv &vmcb->state.es); 1887 1.13 maxv svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_FS], 1888 1.1 maxv &vmcb->state.fs); 1889 1.13 maxv svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GS], 1890 1.1 maxv &vmcb->state.gs); 1891 1.13 maxv svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_SS], 1892 1.1 maxv &vmcb->state.ss); 1893 1.13 maxv svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GDT], 1894 1.1 maxv &vmcb->state.gdt); 1895 1.13 maxv svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_IDT], 1896 1.1 maxv &vmcb->state.idt); 1897 1.13 maxv svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_LDT], 1898 1.1 maxv &vmcb->state.ldt); 1899 1.13 maxv svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_TR], 1900 1.1 maxv &vmcb->state.tr); 1901 1.23 maxv 1902 1.23 maxv vmcb->state.cpl = state->segs[NVMM_X64_SEG_SS].attrib.dpl; 1903 1.1 maxv } 1904 1.1 maxv 1905 1.13 maxv CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs)); 1906 1.1 maxv if (flags & NVMM_X64_STATE_GPRS) { 1907 1.13 maxv memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs)); 1908 1.1 maxv 1909 1.13 maxv vmcb->state.rip = state->gprs[NVMM_X64_GPR_RIP]; 1910 1.13 maxv vmcb->state.rsp = state->gprs[NVMM_X64_GPR_RSP]; 1911 1.13 maxv vmcb->state.rax = state->gprs[NVMM_X64_GPR_RAX]; 1912 1.13 maxv vmcb->state.rflags = state->gprs[NVMM_X64_GPR_RFLAGS]; 1913 1.1 maxv } 1914 1.1 maxv 1915 1.1 maxv if (flags & NVMM_X64_STATE_CRS) { 1916 1.13 maxv vmcb->state.cr0 = state->crs[NVMM_X64_CR_CR0]; 1917 1.13 maxv vmcb->state.cr2 = state->crs[NVMM_X64_CR_CR2]; 1918 1.13 maxv vmcb->state.cr3 = state->crs[NVMM_X64_CR_CR3]; 1919 1.13 maxv vmcb->state.cr4 = state->crs[NVMM_X64_CR_CR4]; 1920 1.85 riastrad vmcb->state.cr4 &= CR4_VALID; 1921 1.1 maxv 1922 1.1 maxv vmcb->ctrl.v &= ~VMCB_CTRL_V_TPR; 1923 1.13 maxv vmcb->ctrl.v |= __SHIFTIN(state->crs[NVMM_X64_CR_CR8], 1924 1.1 maxv VMCB_CTRL_V_TPR); 1925 1.1 maxv 1926 1.1 maxv if (svm_xcr0_mask != 0) { 1927 1.16 maxv /* Clear illegal XCR0 bits, set mandatory X87 bit. */ 1928 1.13 maxv cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0]; 1929 1.13 maxv cpudata->gxcr0 &= svm_xcr0_mask; 1930 1.13 maxv cpudata->gxcr0 |= XCR0_X87; 1931 1.1 maxv } 1932 1.1 maxv } 1933 1.1 maxv 1934 1.13 maxv CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs)); 1935 1.1 maxv if (flags & NVMM_X64_STATE_DRS) { 1936 1.13 maxv memcpy(cpudata->drs, state->drs, sizeof(state->drs)); 1937 1.1 maxv 1938 1.13 maxv vmcb->state.dr6 = state->drs[NVMM_X64_DR_DR6]; 1939 1.13 maxv vmcb->state.dr7 = state->drs[NVMM_X64_DR_DR7]; 1940 1.1 maxv } 1941 1.1 maxv 1942 1.1 maxv if (flags & NVMM_X64_STATE_MSRS) { 1943 1.30 maxv /* 1944 1.30 maxv * EFER_SVME is mandatory. 1945 1.30 maxv */ 1946 1.13 maxv vmcb->state.efer = state->msrs[NVMM_X64_MSR_EFER] | EFER_SVME; 1947 1.13 maxv vmcb->state.star = state->msrs[NVMM_X64_MSR_STAR]; 1948 1.13 maxv vmcb->state.lstar = state->msrs[NVMM_X64_MSR_LSTAR]; 1949 1.13 maxv vmcb->state.cstar = state->msrs[NVMM_X64_MSR_CSTAR]; 1950 1.13 maxv vmcb->state.sfmask = state->msrs[NVMM_X64_MSR_SFMASK]; 1951 1.1 maxv vmcb->state.kernelgsbase = 1952 1.13 maxv state->msrs[NVMM_X64_MSR_KERNELGSBASE]; 1953 1.1 maxv vmcb->state.sysenter_cs = 1954 1.13 maxv state->msrs[NVMM_X64_MSR_SYSENTER_CS]; 1955 1.1 maxv vmcb->state.sysenter_esp = 1956 1.13 maxv state->msrs[NVMM_X64_MSR_SYSENTER_ESP]; 1957 1.1 maxv vmcb->state.sysenter_eip = 1958 1.13 maxv state->msrs[NVMM_X64_MSR_SYSENTER_EIP]; 1959 1.13 maxv vmcb->state.g_pat = state->msrs[NVMM_X64_MSR_PAT]; 1960 1.36 maxv 1961 1.36 maxv cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC]; 1962 1.36 maxv cpudata->gtsc_want_update = true; 1963 1.1 maxv } 1964 1.1 maxv 1965 1.37 maxv if (flags & NVMM_X64_STATE_INTR) { 1966 1.37 maxv if (state->intr.int_shadow) { 1967 1.10 maxv vmcb->ctrl.intr |= VMCB_CTRL_INTR_SHADOW; 1968 1.10 maxv } else { 1969 1.10 maxv vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW; 1970 1.10 maxv } 1971 1.10 maxv 1972 1.37 maxv if (state->intr.int_window_exiting) { 1973 1.10 maxv svm_event_waitexit_enable(vcpu, false); 1974 1.10 maxv } else { 1975 1.10 maxv svm_event_waitexit_disable(vcpu, false); 1976 1.10 maxv } 1977 1.10 maxv 1978 1.37 maxv if (state->intr.nmi_window_exiting) { 1979 1.10 maxv svm_event_waitexit_enable(vcpu, true); 1980 1.10 maxv } else { 1981 1.10 maxv svm_event_waitexit_disable(vcpu, true); 1982 1.10 maxv } 1983 1.1 maxv } 1984 1.1 maxv 1985 1.13 maxv CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu)); 1986 1.1 maxv if (flags & NVMM_X64_STATE_FPU) { 1987 1.13 maxv memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu, 1988 1.13 maxv sizeof(state->fpu)); 1989 1.1 maxv 1990 1.1 maxv fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave; 1991 1.1 maxv fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask; 1992 1.1 maxv fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask; 1993 1.16 maxv 1994 1.16 maxv if (svm_xcr0_mask != 0) { 1995 1.16 maxv /* Reset XSTATE_BV, to force a reload. */ 1996 1.16 maxv cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask; 1997 1.16 maxv } 1998 1.1 maxv } 1999 1.12 maxv 2000 1.12 maxv svm_vmcb_cache_update(vmcb, flags); 2001 1.43 maxv 2002 1.43 maxv comm->state_wanted = 0; 2003 1.43 maxv comm->state_cached |= flags; 2004 1.1 maxv } 2005 1.1 maxv 2006 1.1 maxv static void 2007 1.43 maxv svm_vcpu_getstate(struct nvmm_cpu *vcpu) 2008 1.1 maxv { 2009 1.43 maxv struct nvmm_comm_page *comm = vcpu->comm; 2010 1.43 maxv struct nvmm_x64_state *state = &comm->state; 2011 1.1 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 2012 1.1 maxv struct vmcb *vmcb = cpudata->vmcb; 2013 1.43 maxv uint64_t flags; 2014 1.43 maxv 2015 1.43 maxv flags = comm->state_wanted; 2016 1.1 maxv 2017 1.1 maxv if (flags & NVMM_X64_STATE_SEGS) { 2018 1.13 maxv svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_CS], 2019 1.1 maxv &vmcb->state.cs); 2020 1.13 maxv svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_DS], 2021 1.1 maxv &vmcb->state.ds); 2022 1.13 maxv svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_ES], 2023 1.1 maxv &vmcb->state.es); 2024 1.13 maxv svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_FS], 2025 1.1 maxv &vmcb->state.fs); 2026 1.13 maxv svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GS], 2027 1.1 maxv &vmcb->state.gs); 2028 1.13 maxv svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_SS], 2029 1.1 maxv &vmcb->state.ss); 2030 1.13 maxv svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GDT], 2031 1.1 maxv &vmcb->state.gdt); 2032 1.13 maxv svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_IDT], 2033 1.1 maxv &vmcb->state.idt); 2034 1.13 maxv svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_LDT], 2035 1.1 maxv &vmcb->state.ldt); 2036 1.13 maxv svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_TR], 2037 1.1 maxv &vmcb->state.tr); 2038 1.23 maxv 2039 1.23 maxv state->segs[NVMM_X64_SEG_SS].attrib.dpl = vmcb->state.cpl; 2040 1.1 maxv } 2041 1.1 maxv 2042 1.13 maxv CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs)); 2043 1.1 maxv if (flags & NVMM_X64_STATE_GPRS) { 2044 1.13 maxv memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs)); 2045 1.1 maxv 2046 1.13 maxv state->gprs[NVMM_X64_GPR_RIP] = vmcb->state.rip; 2047 1.13 maxv state->gprs[NVMM_X64_GPR_RSP] = vmcb->state.rsp; 2048 1.13 maxv state->gprs[NVMM_X64_GPR_RAX] = vmcb->state.rax; 2049 1.13 maxv state->gprs[NVMM_X64_GPR_RFLAGS] = vmcb->state.rflags; 2050 1.1 maxv } 2051 1.1 maxv 2052 1.1 maxv if (flags & NVMM_X64_STATE_CRS) { 2053 1.13 maxv state->crs[NVMM_X64_CR_CR0] = vmcb->state.cr0; 2054 1.13 maxv state->crs[NVMM_X64_CR_CR2] = vmcb->state.cr2; 2055 1.13 maxv state->crs[NVMM_X64_CR_CR3] = vmcb->state.cr3; 2056 1.13 maxv state->crs[NVMM_X64_CR_CR4] = vmcb->state.cr4; 2057 1.13 maxv state->crs[NVMM_X64_CR_CR8] = __SHIFTOUT(vmcb->ctrl.v, 2058 1.1 maxv VMCB_CTRL_V_TPR); 2059 1.13 maxv state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0; 2060 1.1 maxv } 2061 1.1 maxv 2062 1.13 maxv CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs)); 2063 1.1 maxv if (flags & NVMM_X64_STATE_DRS) { 2064 1.13 maxv memcpy(state->drs, cpudata->drs, sizeof(state->drs)); 2065 1.1 maxv 2066 1.13 maxv state->drs[NVMM_X64_DR_DR6] = vmcb->state.dr6; 2067 1.13 maxv state->drs[NVMM_X64_DR_DR7] = vmcb->state.dr7; 2068 1.1 maxv } 2069 1.1 maxv 2070 1.1 maxv if (flags & NVMM_X64_STATE_MSRS) { 2071 1.13 maxv state->msrs[NVMM_X64_MSR_EFER] = vmcb->state.efer; 2072 1.13 maxv state->msrs[NVMM_X64_MSR_STAR] = vmcb->state.star; 2073 1.13 maxv state->msrs[NVMM_X64_MSR_LSTAR] = vmcb->state.lstar; 2074 1.13 maxv state->msrs[NVMM_X64_MSR_CSTAR] = vmcb->state.cstar; 2075 1.13 maxv state->msrs[NVMM_X64_MSR_SFMASK] = vmcb->state.sfmask; 2076 1.13 maxv state->msrs[NVMM_X64_MSR_KERNELGSBASE] = 2077 1.1 maxv vmcb->state.kernelgsbase; 2078 1.13 maxv state->msrs[NVMM_X64_MSR_SYSENTER_CS] = 2079 1.1 maxv vmcb->state.sysenter_cs; 2080 1.13 maxv state->msrs[NVMM_X64_MSR_SYSENTER_ESP] = 2081 1.1 maxv vmcb->state.sysenter_esp; 2082 1.13 maxv state->msrs[NVMM_X64_MSR_SYSENTER_EIP] = 2083 1.1 maxv vmcb->state.sysenter_eip; 2084 1.13 maxv state->msrs[NVMM_X64_MSR_PAT] = vmcb->state.g_pat; 2085 1.36 maxv state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc; 2086 1.1 maxv 2087 1.1 maxv /* Hide SVME. */ 2088 1.13 maxv state->msrs[NVMM_X64_MSR_EFER] &= ~EFER_SVME; 2089 1.1 maxv } 2090 1.1 maxv 2091 1.37 maxv if (flags & NVMM_X64_STATE_INTR) { 2092 1.37 maxv state->intr.int_shadow = 2093 1.10 maxv (vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0; 2094 1.37 maxv state->intr.int_window_exiting = cpudata->int_window_exit; 2095 1.37 maxv state->intr.nmi_window_exiting = cpudata->nmi_window_exit; 2096 1.37 maxv state->intr.evt_pending = cpudata->evt_pending; 2097 1.1 maxv } 2098 1.1 maxv 2099 1.13 maxv CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu)); 2100 1.1 maxv if (flags & NVMM_X64_STATE_FPU) { 2101 1.13 maxv memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave, 2102 1.13 maxv sizeof(state->fpu)); 2103 1.1 maxv } 2104 1.43 maxv 2105 1.43 maxv comm->state_wanted = 0; 2106 1.43 maxv comm->state_cached |= flags; 2107 1.43 maxv } 2108 1.43 maxv 2109 1.43 maxv static void 2110 1.43 maxv svm_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags) 2111 1.43 maxv { 2112 1.43 maxv vcpu->comm->state_wanted = flags; 2113 1.43 maxv svm_vcpu_getstate(vcpu); 2114 1.43 maxv } 2115 1.43 maxv 2116 1.43 maxv static void 2117 1.43 maxv svm_vcpu_state_commit(struct nvmm_cpu *vcpu) 2118 1.43 maxv { 2119 1.43 maxv vcpu->comm->state_wanted = vcpu->comm->state_commit; 2120 1.43 maxv vcpu->comm->state_commit = 0; 2121 1.43 maxv svm_vcpu_setstate(vcpu); 2122 1.1 maxv } 2123 1.1 maxv 2124 1.1 maxv /* -------------------------------------------------------------------------- */ 2125 1.1 maxv 2126 1.1 maxv static void 2127 1.30 maxv svm_asid_alloc(struct nvmm_cpu *vcpu) 2128 1.30 maxv { 2129 1.30 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 2130 1.30 maxv struct vmcb *vmcb = cpudata->vmcb; 2131 1.30 maxv size_t i, oct, bit; 2132 1.30 maxv 2133 1.30 maxv mutex_enter(&svm_asidlock); 2134 1.30 maxv 2135 1.30 maxv for (i = 0; i < svm_maxasid; i++) { 2136 1.30 maxv oct = i / 8; 2137 1.30 maxv bit = i % 8; 2138 1.30 maxv 2139 1.30 maxv if (svm_asidmap[oct] & __BIT(bit)) { 2140 1.30 maxv continue; 2141 1.30 maxv } 2142 1.30 maxv 2143 1.30 maxv svm_asidmap[oct] |= __BIT(bit); 2144 1.30 maxv vmcb->ctrl.guest_asid = i; 2145 1.30 maxv mutex_exit(&svm_asidlock); 2146 1.30 maxv return; 2147 1.30 maxv } 2148 1.30 maxv 2149 1.30 maxv /* 2150 1.30 maxv * No free ASID. Use the last one, which is shared and requires 2151 1.30 maxv * special TLB handling. 2152 1.30 maxv */ 2153 1.30 maxv cpudata->shared_asid = true; 2154 1.30 maxv vmcb->ctrl.guest_asid = svm_maxasid - 1; 2155 1.30 maxv mutex_exit(&svm_asidlock); 2156 1.30 maxv } 2157 1.30 maxv 2158 1.30 maxv static void 2159 1.30 maxv svm_asid_free(struct nvmm_cpu *vcpu) 2160 1.30 maxv { 2161 1.30 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 2162 1.30 maxv struct vmcb *vmcb = cpudata->vmcb; 2163 1.30 maxv size_t oct, bit; 2164 1.30 maxv 2165 1.30 maxv if (cpudata->shared_asid) { 2166 1.30 maxv return; 2167 1.30 maxv } 2168 1.30 maxv 2169 1.30 maxv oct = vmcb->ctrl.guest_asid / 8; 2170 1.30 maxv bit = vmcb->ctrl.guest_asid % 8; 2171 1.30 maxv 2172 1.30 maxv mutex_enter(&svm_asidlock); 2173 1.30 maxv svm_asidmap[oct] &= ~__BIT(bit); 2174 1.30 maxv mutex_exit(&svm_asidlock); 2175 1.30 maxv } 2176 1.30 maxv 2177 1.30 maxv static void 2178 1.30 maxv svm_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 2179 1.30 maxv { 2180 1.30 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 2181 1.30 maxv struct vmcb *vmcb = cpudata->vmcb; 2182 1.30 maxv 2183 1.30 maxv /* Allow reads/writes of Control Registers. */ 2184 1.30 maxv vmcb->ctrl.intercept_cr = 0; 2185 1.30 maxv 2186 1.30 maxv /* Allow reads/writes of Debug Registers. */ 2187 1.30 maxv vmcb->ctrl.intercept_dr = 0; 2188 1.30 maxv 2189 1.30 maxv /* Allow exceptions 0 to 31. */ 2190 1.30 maxv vmcb->ctrl.intercept_vec = 0; 2191 1.30 maxv 2192 1.30 maxv /* 2193 1.30 maxv * Allow: 2194 1.30 maxv * - SMI [smm interrupts] 2195 1.30 maxv * - VINTR [virtual interrupts] 2196 1.30 maxv * - CR0_SPEC [CR0 writes changing other fields than CR0.TS or CR0.MP] 2197 1.30 maxv * - RIDTR [reads of IDTR] 2198 1.30 maxv * - RGDTR [reads of GDTR] 2199 1.30 maxv * - RLDTR [reads of LDTR] 2200 1.30 maxv * - RTR [reads of TR] 2201 1.30 maxv * - WIDTR [writes of IDTR] 2202 1.30 maxv * - WGDTR [writes of GDTR] 2203 1.30 maxv * - WLDTR [writes of LDTR] 2204 1.30 maxv * - WTR [writes of TR] 2205 1.30 maxv * - RDTSC [rdtsc instruction] 2206 1.30 maxv * - PUSHF [pushf instruction] 2207 1.30 maxv * - POPF [popf instruction] 2208 1.30 maxv * - IRET [iret instruction] 2209 1.30 maxv * - INTN [int $n instructions] 2210 1.30 maxv * - PAUSE [pause instruction] 2211 1.30 maxv * - INVLPG [invplg instruction] 2212 1.30 maxv * - TASKSW [task switches] 2213 1.30 maxv * 2214 1.30 maxv * Intercept the rest below. 2215 1.30 maxv */ 2216 1.30 maxv vmcb->ctrl.intercept_misc1 = 2217 1.30 maxv VMCB_CTRL_INTERCEPT_INTR | 2218 1.30 maxv VMCB_CTRL_INTERCEPT_NMI | 2219 1.30 maxv VMCB_CTRL_INTERCEPT_INIT | 2220 1.30 maxv VMCB_CTRL_INTERCEPT_RDPMC | 2221 1.30 maxv VMCB_CTRL_INTERCEPT_CPUID | 2222 1.30 maxv VMCB_CTRL_INTERCEPT_RSM | 2223 1.72 maxv VMCB_CTRL_INTERCEPT_INVD | 2224 1.30 maxv VMCB_CTRL_INTERCEPT_HLT | 2225 1.30 maxv VMCB_CTRL_INTERCEPT_INVLPGA | 2226 1.30 maxv VMCB_CTRL_INTERCEPT_IOIO_PROT | 2227 1.30 maxv VMCB_CTRL_INTERCEPT_MSR_PROT | 2228 1.30 maxv VMCB_CTRL_INTERCEPT_FERR_FREEZE | 2229 1.30 maxv VMCB_CTRL_INTERCEPT_SHUTDOWN; 2230 1.30 maxv 2231 1.30 maxv /* 2232 1.30 maxv * Allow: 2233 1.30 maxv * - ICEBP [icebp instruction] 2234 1.30 maxv * - WBINVD [wbinvd instruction] 2235 1.30 maxv * - WCR_SPEC(0..15) [writes of CR0-15, received after instruction] 2236 1.30 maxv * 2237 1.30 maxv * Intercept the rest below. 2238 1.30 maxv */ 2239 1.30 maxv vmcb->ctrl.intercept_misc2 = 2240 1.30 maxv VMCB_CTRL_INTERCEPT_VMRUN | 2241 1.30 maxv VMCB_CTRL_INTERCEPT_VMMCALL | 2242 1.30 maxv VMCB_CTRL_INTERCEPT_VMLOAD | 2243 1.30 maxv VMCB_CTRL_INTERCEPT_VMSAVE | 2244 1.30 maxv VMCB_CTRL_INTERCEPT_STGI | 2245 1.30 maxv VMCB_CTRL_INTERCEPT_CLGI | 2246 1.30 maxv VMCB_CTRL_INTERCEPT_SKINIT | 2247 1.30 maxv VMCB_CTRL_INTERCEPT_RDTSCP | 2248 1.30 maxv VMCB_CTRL_INTERCEPT_MONITOR | 2249 1.30 maxv VMCB_CTRL_INTERCEPT_MWAIT | 2250 1.67 maxv VMCB_CTRL_INTERCEPT_XSETBV | 2251 1.67 maxv VMCB_CTRL_INTERCEPT_RDPRU; 2252 1.67 maxv 2253 1.67 maxv /* 2254 1.67 maxv * Intercept everything. 2255 1.67 maxv */ 2256 1.67 maxv vmcb->ctrl.intercept_misc3 = 2257 1.67 maxv VMCB_CTRL_INTERCEPT_INVLPGB_ALL | 2258 1.67 maxv VMCB_CTRL_INTERCEPT_PCID | 2259 1.67 maxv VMCB_CTRL_INTERCEPT_MCOMMIT | 2260 1.67 maxv VMCB_CTRL_INTERCEPT_TLBSYNC; 2261 1.30 maxv 2262 1.30 maxv /* Intercept all I/O accesses. */ 2263 1.30 maxv memset(cpudata->iobm, 0xFF, IOBM_SIZE); 2264 1.30 maxv vmcb->ctrl.iopm_base_pa = cpudata->iobm_pa; 2265 1.30 maxv 2266 1.30 maxv /* Allow direct access to certain MSRs. */ 2267 1.30 maxv memset(cpudata->msrbm, 0xFF, MSRBM_SIZE); 2268 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true); 2269 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true); 2270 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true); 2271 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true); 2272 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true); 2273 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true); 2274 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true); 2275 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true); 2276 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true); 2277 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true); 2278 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_CR_PAT, true, true); 2279 1.30 maxv svm_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false); 2280 1.30 maxv vmcb->ctrl.msrpm_base_pa = cpudata->msrbm_pa; 2281 1.30 maxv 2282 1.30 maxv /* Generate ASID. */ 2283 1.30 maxv svm_asid_alloc(vcpu); 2284 1.30 maxv 2285 1.30 maxv /* Virtual TPR. */ 2286 1.30 maxv vmcb->ctrl.v = VMCB_CTRL_V_INTR_MASKING; 2287 1.30 maxv 2288 1.30 maxv /* Enable Nested Paging. */ 2289 1.30 maxv vmcb->ctrl.enable1 = VMCB_CTRL_ENABLE_NP; 2290 1.30 maxv vmcb->ctrl.n_cr3 = mach->vm->vm_map.pmap->pm_pdirpa[0]; 2291 1.30 maxv 2292 1.30 maxv /* Init XSAVE header. */ 2293 1.30 maxv cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask; 2294 1.30 maxv cpudata->gfpu.xsh_xcomp_bv = 0; 2295 1.30 maxv 2296 1.30 maxv /* These MSRs are static. */ 2297 1.30 maxv cpudata->star = rdmsr(MSR_STAR); 2298 1.30 maxv cpudata->lstar = rdmsr(MSR_LSTAR); 2299 1.30 maxv cpudata->cstar = rdmsr(MSR_CSTAR); 2300 1.30 maxv cpudata->sfmask = rdmsr(MSR_SFMASK); 2301 1.31 maxv 2302 1.31 maxv /* Install the RESET state. */ 2303 1.43 maxv memcpy(&vcpu->comm->state, &nvmm_x86_reset_state, 2304 1.43 maxv sizeof(nvmm_x86_reset_state)); 2305 1.43 maxv vcpu->comm->state_wanted = NVMM_X64_STATE_ALL; 2306 1.43 maxv vcpu->comm->state_cached = 0; 2307 1.43 maxv svm_vcpu_setstate(vcpu); 2308 1.30 maxv } 2309 1.30 maxv 2310 1.30 maxv static int 2311 1.30 maxv svm_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 2312 1.30 maxv { 2313 1.30 maxv struct svm_cpudata *cpudata; 2314 1.30 maxv int error; 2315 1.30 maxv 2316 1.30 maxv /* Allocate the SVM cpudata. */ 2317 1.30 maxv cpudata = (struct svm_cpudata *)uvm_km_alloc(kernel_map, 2318 1.30 maxv roundup(sizeof(*cpudata), PAGE_SIZE), 0, 2319 1.30 maxv UVM_KMF_WIRED|UVM_KMF_ZERO); 2320 1.30 maxv vcpu->cpudata = cpudata; 2321 1.30 maxv 2322 1.30 maxv /* VMCB */ 2323 1.30 maxv error = svm_memalloc(&cpudata->vmcb_pa, (vaddr_t *)&cpudata->vmcb, 2324 1.30 maxv VMCB_NPAGES); 2325 1.30 maxv if (error) 2326 1.30 maxv goto error; 2327 1.30 maxv 2328 1.30 maxv /* I/O Bitmap */ 2329 1.30 maxv error = svm_memalloc(&cpudata->iobm_pa, (vaddr_t *)&cpudata->iobm, 2330 1.30 maxv IOBM_NPAGES); 2331 1.30 maxv if (error) 2332 1.30 maxv goto error; 2333 1.30 maxv 2334 1.30 maxv /* MSR Bitmap */ 2335 1.30 maxv error = svm_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm, 2336 1.30 maxv MSRBM_NPAGES); 2337 1.30 maxv if (error) 2338 1.30 maxv goto error; 2339 1.30 maxv 2340 1.30 maxv /* Init the VCPU info. */ 2341 1.30 maxv svm_vcpu_init(mach, vcpu); 2342 1.30 maxv 2343 1.30 maxv return 0; 2344 1.30 maxv 2345 1.30 maxv error: 2346 1.30 maxv if (cpudata->vmcb_pa) { 2347 1.30 maxv svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, 2348 1.30 maxv VMCB_NPAGES); 2349 1.30 maxv } 2350 1.30 maxv if (cpudata->iobm_pa) { 2351 1.30 maxv svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, 2352 1.30 maxv IOBM_NPAGES); 2353 1.30 maxv } 2354 1.30 maxv if (cpudata->msrbm_pa) { 2355 1.30 maxv svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, 2356 1.30 maxv MSRBM_NPAGES); 2357 1.30 maxv } 2358 1.30 maxv uvm_km_free(kernel_map, (vaddr_t)cpudata, 2359 1.30 maxv roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED); 2360 1.30 maxv return error; 2361 1.30 maxv } 2362 1.30 maxv 2363 1.30 maxv static void 2364 1.30 maxv svm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 2365 1.30 maxv { 2366 1.30 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 2367 1.30 maxv 2368 1.30 maxv svm_asid_free(vcpu); 2369 1.30 maxv 2370 1.30 maxv svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, VMCB_NPAGES); 2371 1.30 maxv svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, IOBM_NPAGES); 2372 1.30 maxv svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES); 2373 1.30 maxv 2374 1.30 maxv uvm_km_free(kernel_map, (vaddr_t)cpudata, 2375 1.30 maxv roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED); 2376 1.30 maxv } 2377 1.30 maxv 2378 1.52 maxv /* -------------------------------------------------------------------------- */ 2379 1.52 maxv 2380 1.51 maxv static int 2381 1.52 maxv svm_vcpu_configure_cpuid(struct svm_cpudata *cpudata, void *data) 2382 1.51 maxv { 2383 1.52 maxv struct nvmm_vcpu_conf_cpuid *cpuid = data; 2384 1.51 maxv size_t i; 2385 1.51 maxv 2386 1.51 maxv if (__predict_false(cpuid->mask && cpuid->exit)) { 2387 1.51 maxv return EINVAL; 2388 1.51 maxv } 2389 1.51 maxv if (__predict_false(cpuid->mask && 2390 1.51 maxv ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) || 2391 1.51 maxv (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) || 2392 1.51 maxv (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) || 2393 1.51 maxv (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) { 2394 1.51 maxv return EINVAL; 2395 1.51 maxv } 2396 1.51 maxv 2397 1.51 maxv /* If unset, delete, to restore the default behavior. */ 2398 1.51 maxv if (!cpuid->mask && !cpuid->exit) { 2399 1.51 maxv for (i = 0; i < SVM_NCPUIDS; i++) { 2400 1.51 maxv if (!cpudata->cpuidpresent[i]) { 2401 1.51 maxv continue; 2402 1.51 maxv } 2403 1.51 maxv if (cpudata->cpuid[i].leaf == cpuid->leaf) { 2404 1.51 maxv cpudata->cpuidpresent[i] = false; 2405 1.51 maxv } 2406 1.51 maxv } 2407 1.51 maxv return 0; 2408 1.51 maxv } 2409 1.51 maxv 2410 1.51 maxv /* If already here, replace. */ 2411 1.51 maxv for (i = 0; i < SVM_NCPUIDS; i++) { 2412 1.51 maxv if (!cpudata->cpuidpresent[i]) { 2413 1.51 maxv continue; 2414 1.51 maxv } 2415 1.51 maxv if (cpudata->cpuid[i].leaf == cpuid->leaf) { 2416 1.51 maxv memcpy(&cpudata->cpuid[i], cpuid, 2417 1.51 maxv sizeof(struct nvmm_vcpu_conf_cpuid)); 2418 1.51 maxv return 0; 2419 1.51 maxv } 2420 1.51 maxv } 2421 1.51 maxv 2422 1.51 maxv /* Not here, insert. */ 2423 1.51 maxv for (i = 0; i < SVM_NCPUIDS; i++) { 2424 1.51 maxv if (!cpudata->cpuidpresent[i]) { 2425 1.51 maxv cpudata->cpuidpresent[i] = true; 2426 1.51 maxv memcpy(&cpudata->cpuid[i], cpuid, 2427 1.51 maxv sizeof(struct nvmm_vcpu_conf_cpuid)); 2428 1.51 maxv return 0; 2429 1.51 maxv } 2430 1.51 maxv } 2431 1.51 maxv 2432 1.51 maxv return ENOBUFS; 2433 1.51 maxv } 2434 1.51 maxv 2435 1.52 maxv static int 2436 1.52 maxv svm_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data) 2437 1.52 maxv { 2438 1.52 maxv struct svm_cpudata *cpudata = vcpu->cpudata; 2439 1.52 maxv 2440 1.52 maxv switch (op) { 2441 1.52 maxv case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID): 2442 1.52 maxv return svm_vcpu_configure_cpuid(cpudata, data); 2443 1.52 maxv default: 2444 1.52 maxv return EINVAL; 2445 1.52 maxv } 2446 1.52 maxv } 2447 1.52 maxv 2448 1.30 maxv /* -------------------------------------------------------------------------- */ 2449 1.30 maxv 2450 1.30 maxv static void 2451 1.1 maxv svm_tlb_flush(struct pmap *pm) 2452 1.1 maxv { 2453 1.1 maxv struct nvmm_machine *mach = pm->pm_data; 2454 1.29 maxv struct svm_machdata *machdata = mach->machdata; 2455 1.29 maxv 2456 1.29 maxv atomic_inc_64(&machdata->mach_htlb_gen); 2457 1.1 maxv 2458 1.29 maxv /* Generates IPIs, which cause #VMEXITs. */ 2459 1.58 ad pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM); 2460 1.1 maxv } 2461 1.1 maxv 2462 1.1 maxv static void 2463 1.1 maxv svm_machine_create(struct nvmm_machine *mach) 2464 1.1 maxv { 2465 1.29 maxv struct svm_machdata *machdata; 2466 1.29 maxv 2467 1.1 maxv /* Fill in pmap info. */ 2468 1.1 maxv mach->vm->vm_map.pmap->pm_data = (void *)mach; 2469 1.1 maxv mach->vm->vm_map.pmap->pm_tlb_flush = svm_tlb_flush; 2470 1.1 maxv 2471 1.29 maxv machdata = kmem_zalloc(sizeof(struct svm_machdata), KM_SLEEP); 2472 1.29 maxv mach->machdata = machdata; 2473 1.29 maxv 2474 1.29 maxv /* Start with an hTLB flush everywhere. */ 2475 1.29 maxv machdata->mach_htlb_gen = 1; 2476 1.1 maxv } 2477 1.1 maxv 2478 1.1 maxv static void 2479 1.1 maxv svm_machine_destroy(struct nvmm_machine *mach) 2480 1.1 maxv { 2481 1.1 maxv kmem_free(mach->machdata, sizeof(struct svm_machdata)); 2482 1.1 maxv } 2483 1.1 maxv 2484 1.1 maxv static int 2485 1.1 maxv svm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data) 2486 1.1 maxv { 2487 1.51 maxv panic("%s: impossible", __func__); 2488 1.1 maxv } 2489 1.1 maxv 2490 1.1 maxv /* -------------------------------------------------------------------------- */ 2491 1.1 maxv 2492 1.1 maxv static bool 2493 1.1 maxv svm_ident(void) 2494 1.1 maxv { 2495 1.1 maxv u_int descs[4]; 2496 1.1 maxv uint64_t msr; 2497 1.1 maxv 2498 1.1 maxv if (cpu_vendor != CPUVENDOR_AMD) { 2499 1.1 maxv return false; 2500 1.1 maxv } 2501 1.1 maxv if (!(cpu_feature[3] & CPUID_SVM)) { 2502 1.59 maxv printf("NVMM: SVM not supported\n"); 2503 1.1 maxv return false; 2504 1.1 maxv } 2505 1.1 maxv 2506 1.1 maxv if (curcpu()->ci_max_ext_cpuid < 0x8000000a) { 2507 1.59 maxv printf("NVMM: CPUID leaf not available\n"); 2508 1.1 maxv return false; 2509 1.1 maxv } 2510 1.1 maxv x86_cpuid(0x8000000a, descs); 2511 1.1 maxv 2512 1.75 maxv /* Expect revision 1. */ 2513 1.75 maxv if (__SHIFTOUT(descs[0], CPUID_AMD_SVM_REV) != 1) { 2514 1.75 maxv printf("NVMM: SVM revision not supported\n"); 2515 1.75 maxv return false; 2516 1.75 maxv } 2517 1.75 maxv 2518 1.1 maxv /* Want Nested Paging. */ 2519 1.1 maxv if (!(descs[3] & CPUID_AMD_SVM_NP)) { 2520 1.59 maxv printf("NVMM: SVM-NP not supported\n"); 2521 1.1 maxv return false; 2522 1.1 maxv } 2523 1.1 maxv 2524 1.1 maxv /* Want nRIP. */ 2525 1.1 maxv if (!(descs[3] & CPUID_AMD_SVM_NRIPS)) { 2526 1.59 maxv printf("NVMM: SVM-NRIPS not supported\n"); 2527 1.1 maxv return false; 2528 1.1 maxv } 2529 1.1 maxv 2530 1.1 maxv svm_decode_assist = (descs[3] & CPUID_AMD_SVM_DecodeAssist) != 0; 2531 1.1 maxv 2532 1.1 maxv msr = rdmsr(MSR_VMCR); 2533 1.1 maxv if ((msr & VMCR_SVMED) && (msr & VMCR_LOCK)) { 2534 1.59 maxv printf("NVMM: SVM disabled in BIOS\n"); 2535 1.1 maxv return false; 2536 1.1 maxv } 2537 1.1 maxv 2538 1.1 maxv return true; 2539 1.1 maxv } 2540 1.1 maxv 2541 1.1 maxv static void 2542 1.1 maxv svm_init_asid(uint32_t maxasid) 2543 1.1 maxv { 2544 1.1 maxv size_t i, j, allocsz; 2545 1.1 maxv 2546 1.1 maxv mutex_init(&svm_asidlock, MUTEX_DEFAULT, IPL_NONE); 2547 1.1 maxv 2548 1.1 maxv /* Arbitrarily limit. */ 2549 1.1 maxv maxasid = uimin(maxasid, 8192); 2550 1.1 maxv 2551 1.1 maxv svm_maxasid = maxasid; 2552 1.1 maxv allocsz = roundup(maxasid, 8) / 8; 2553 1.1 maxv svm_asidmap = kmem_zalloc(allocsz, KM_SLEEP); 2554 1.1 maxv 2555 1.1 maxv /* ASID 0 is reserved for the host. */ 2556 1.1 maxv svm_asidmap[0] |= __BIT(0); 2557 1.1 maxv 2558 1.1 maxv /* ASID n-1 is special, we share it. */ 2559 1.1 maxv i = (maxasid - 1) / 8; 2560 1.1 maxv j = (maxasid - 1) % 8; 2561 1.1 maxv svm_asidmap[i] |= __BIT(j); 2562 1.1 maxv } 2563 1.1 maxv 2564 1.1 maxv static void 2565 1.1 maxv svm_change_cpu(void *arg1, void *arg2) 2566 1.1 maxv { 2567 1.56 joerg bool enable = arg1 != NULL; 2568 1.1 maxv uint64_t msr; 2569 1.1 maxv 2570 1.1 maxv msr = rdmsr(MSR_VMCR); 2571 1.1 maxv if (msr & VMCR_SVMED) { 2572 1.1 maxv wrmsr(MSR_VMCR, msr & ~VMCR_SVMED); 2573 1.1 maxv } 2574 1.1 maxv 2575 1.1 maxv if (!enable) { 2576 1.1 maxv wrmsr(MSR_VM_HSAVE_PA, 0); 2577 1.1 maxv } 2578 1.1 maxv 2579 1.1 maxv msr = rdmsr(MSR_EFER); 2580 1.1 maxv if (enable) { 2581 1.1 maxv msr |= EFER_SVME; 2582 1.1 maxv } else { 2583 1.1 maxv msr &= ~EFER_SVME; 2584 1.1 maxv } 2585 1.1 maxv wrmsr(MSR_EFER, msr); 2586 1.1 maxv 2587 1.1 maxv if (enable) { 2588 1.1 maxv wrmsr(MSR_VM_HSAVE_PA, hsave[cpu_index(curcpu())].pa); 2589 1.1 maxv } 2590 1.1 maxv } 2591 1.1 maxv 2592 1.1 maxv static void 2593 1.1 maxv svm_init(void) 2594 1.1 maxv { 2595 1.1 maxv CPU_INFO_ITERATOR cii; 2596 1.1 maxv struct cpu_info *ci; 2597 1.1 maxv struct vm_page *pg; 2598 1.1 maxv u_int descs[4]; 2599 1.1 maxv uint64_t xc; 2600 1.1 maxv 2601 1.1 maxv x86_cpuid(0x8000000a, descs); 2602 1.1 maxv 2603 1.1 maxv /* The guest TLB flush command. */ 2604 1.1 maxv if (descs[3] & CPUID_AMD_SVM_FlushByASID) { 2605 1.1 maxv svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_GUEST; 2606 1.1 maxv } else { 2607 1.1 maxv svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_ALL; 2608 1.1 maxv } 2609 1.1 maxv 2610 1.1 maxv /* Init the ASID. */ 2611 1.1 maxv svm_init_asid(descs[1]); 2612 1.1 maxv 2613 1.1 maxv /* Init the XCR0 mask. */ 2614 1.1 maxv svm_xcr0_mask = SVM_XCR0_MASK_DEFAULT & x86_xsave_features; 2615 1.1 maxv 2616 1.69 maxv /* Init the max basic CPUID leaf. */ 2617 1.69 maxv svm_cpuid_max_basic = uimin(cpuid_level, SVM_CPUID_MAX_BASIC); 2618 1.69 maxv 2619 1.70 maxv /* Init the max extended CPUID leaf. */ 2620 1.70 maxv x86_cpuid(0x80000000, descs); 2621 1.70 maxv svm_cpuid_max_extended = uimin(descs[0], SVM_CPUID_MAX_EXTENDED); 2622 1.70 maxv 2623 1.1 maxv memset(hsave, 0, sizeof(hsave)); 2624 1.1 maxv for (CPU_INFO_FOREACH(cii, ci)) { 2625 1.1 maxv pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); 2626 1.1 maxv hsave[cpu_index(ci)].pa = VM_PAGE_TO_PHYS(pg); 2627 1.1 maxv } 2628 1.1 maxv 2629 1.1 maxv xc = xc_broadcast(0, svm_change_cpu, (void *)true, NULL); 2630 1.1 maxv xc_wait(xc); 2631 1.1 maxv } 2632 1.1 maxv 2633 1.1 maxv static void 2634 1.1 maxv svm_fini_asid(void) 2635 1.1 maxv { 2636 1.1 maxv size_t allocsz; 2637 1.1 maxv 2638 1.1 maxv allocsz = roundup(svm_maxasid, 8) / 8; 2639 1.1 maxv kmem_free(svm_asidmap, allocsz); 2640 1.1 maxv 2641 1.1 maxv mutex_destroy(&svm_asidlock); 2642 1.1 maxv } 2643 1.1 maxv 2644 1.1 maxv static void 2645 1.1 maxv svm_fini(void) 2646 1.1 maxv { 2647 1.1 maxv uint64_t xc; 2648 1.1 maxv size_t i; 2649 1.1 maxv 2650 1.1 maxv xc = xc_broadcast(0, svm_change_cpu, (void *)false, NULL); 2651 1.1 maxv xc_wait(xc); 2652 1.1 maxv 2653 1.1 maxv for (i = 0; i < MAXCPUS; i++) { 2654 1.1 maxv if (hsave[i].pa != 0) 2655 1.1 maxv uvm_pagefree(PHYS_TO_VM_PAGE(hsave[i].pa)); 2656 1.1 maxv } 2657 1.1 maxv 2658 1.1 maxv svm_fini_asid(); 2659 1.1 maxv } 2660 1.1 maxv 2661 1.1 maxv static void 2662 1.1 maxv svm_capability(struct nvmm_capability *cap) 2663 1.1 maxv { 2664 1.52 maxv cap->arch.mach_conf_support = 0; 2665 1.52 maxv cap->arch.vcpu_conf_support = 2666 1.52 maxv NVMM_CAP_ARCH_VCPU_CONF_CPUID; 2667 1.42 maxv cap->arch.xcr0_mask = svm_xcr0_mask; 2668 1.42 maxv cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask; 2669 1.42 maxv cap->arch.conf_cpuid_maxops = SVM_NCPUIDS; 2670 1.1 maxv } 2671 1.1 maxv 2672 1.1 maxv const struct nvmm_impl nvmm_x86_svm = { 2673 1.63 maxv .name = "x86-svm", 2674 1.1 maxv .ident = svm_ident, 2675 1.1 maxv .init = svm_init, 2676 1.1 maxv .fini = svm_fini, 2677 1.1 maxv .capability = svm_capability, 2678 1.51 maxv .mach_conf_max = NVMM_X86_MACH_NCONF, 2679 1.51 maxv .mach_conf_sizes = NULL, 2680 1.51 maxv .vcpu_conf_max = NVMM_X86_VCPU_NCONF, 2681 1.51 maxv .vcpu_conf_sizes = svm_vcpu_conf_sizes, 2682 1.1 maxv .state_size = sizeof(struct nvmm_x64_state), 2683 1.1 maxv .machine_create = svm_machine_create, 2684 1.1 maxv .machine_destroy = svm_machine_destroy, 2685 1.1 maxv .machine_configure = svm_machine_configure, 2686 1.1 maxv .vcpu_create = svm_vcpu_create, 2687 1.1 maxv .vcpu_destroy = svm_vcpu_destroy, 2688 1.51 maxv .vcpu_configure = svm_vcpu_configure, 2689 1.1 maxv .vcpu_setstate = svm_vcpu_setstate, 2690 1.1 maxv .vcpu_getstate = svm_vcpu_getstate, 2691 1.1 maxv .vcpu_inject = svm_vcpu_inject, 2692 1.1 maxv .vcpu_run = svm_vcpu_run 2693 1.1 maxv }; 2694