1 1.43 reinoud /* $NetBSD: libnvmm_x86.c,v 1.43 2020/12/27 20:56:14 reinoud Exp $ */ 2 1.1 maxv 3 1.1 maxv /* 4 1.40 maxv * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net 5 1.1 maxv * All rights reserved. 6 1.1 maxv * 7 1.40 maxv * This code is part of the NVMM hypervisor. 8 1.1 maxv * 9 1.1 maxv * Redistribution and use in source and binary forms, with or without 10 1.1 maxv * modification, are permitted provided that the following conditions 11 1.1 maxv * are met: 12 1.1 maxv * 1. Redistributions of source code must retain the above copyright 13 1.1 maxv * notice, this list of conditions and the following disclaimer. 14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 maxv * notice, this list of conditions and the following disclaimer in the 16 1.1 maxv * documentation and/or other materials provided with the distribution. 17 1.1 maxv * 18 1.40 maxv * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 1.40 maxv * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 1.40 maxv * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 1.40 maxv * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 1.40 maxv * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 1.40 maxv * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 1.40 maxv * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 1.40 maxv * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 1.40 maxv * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 1.40 maxv * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 1.40 maxv * SUCH DAMAGE. 29 1.1 maxv */ 30 1.1 maxv 31 1.1 maxv #include <sys/cdefs.h> 32 1.1 maxv 33 1.1 maxv #include <stdio.h> 34 1.1 maxv #include <stdlib.h> 35 1.1 maxv #include <string.h> 36 1.1 maxv #include <unistd.h> 37 1.1 maxv #include <fcntl.h> 38 1.1 maxv #include <errno.h> 39 1.1 maxv #include <sys/ioctl.h> 40 1.1 maxv #include <sys/mman.h> 41 1.1 maxv #include <machine/vmparam.h> 42 1.1 maxv #include <machine/pte.h> 43 1.1 maxv #include <machine/psl.h> 44 1.1 maxv 45 1.10 maxv #define MIN(X, Y) (((X) < (Y)) ? (X) : (Y)) 46 1.27 maxv #define __cacheline_aligned __attribute__((__aligned__(64))) 47 1.10 maxv 48 1.1 maxv #include <x86/specialreg.h> 49 1.1 maxv 50 1.29 maxv /* -------------------------------------------------------------------------- */ 51 1.29 maxv 52 1.6 maxv /* 53 1.6 maxv * Undocumented debugging function. Helpful. 54 1.6 maxv */ 55 1.6 maxv int 56 1.31 maxv nvmm_vcpu_dump(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) 57 1.6 maxv { 58 1.31 maxv struct nvmm_x64_state *state = vcpu->state; 59 1.26 maxv uint16_t *attr; 60 1.6 maxv size_t i; 61 1.6 maxv int ret; 62 1.6 maxv 63 1.6 maxv const char *segnames[] = { 64 1.26 maxv "ES", "CS", "SS", "DS", "FS", "GS", "GDT", "IDT", "LDT", "TR" 65 1.6 maxv }; 66 1.6 maxv 67 1.31 maxv ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_ALL); 68 1.6 maxv if (ret == -1) 69 1.6 maxv return -1; 70 1.6 maxv 71 1.31 maxv printf("+ VCPU id=%d\n", (int)vcpu->cpuid); 72 1.31 maxv printf("| -> RAX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RAX]); 73 1.34 maxv printf("| -> RCX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RCX]); 74 1.34 maxv printf("| -> RDX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RDX]); 75 1.31 maxv printf("| -> RBX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RBX]); 76 1.34 maxv printf("| -> RSP=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RSP]); 77 1.34 maxv printf("| -> RBP=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RBP]); 78 1.34 maxv printf("| -> RSI=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RSI]); 79 1.34 maxv printf("| -> RDI=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RDI]); 80 1.34 maxv printf("| -> RIP=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RIP]); 81 1.31 maxv printf("| -> RFLAGS=%p\n", (void *)state->gprs[NVMM_X64_GPR_RFLAGS]); 82 1.6 maxv for (i = 0; i < NVMM_X64_NSEG; i++) { 83 1.31 maxv attr = (uint16_t *)&state->segs[i].attrib; 84 1.34 maxv printf("| -> %s: sel=0x%x base=%"PRIx64", limit=%x, " 85 1.34 maxv "attrib=%x [type=%d,l=%d,def=%d]\n", 86 1.6 maxv segnames[i], 87 1.31 maxv state->segs[i].selector, 88 1.31 maxv state->segs[i].base, 89 1.31 maxv state->segs[i].limit, 90 1.34 maxv *attr, 91 1.34 maxv state->segs[i].attrib.type, 92 1.34 maxv state->segs[i].attrib.l, 93 1.34 maxv state->segs[i].attrib.def); 94 1.26 maxv } 95 1.31 maxv printf("| -> MSR_EFER=%"PRIx64"\n", state->msrs[NVMM_X64_MSR_EFER]); 96 1.31 maxv printf("| -> CR0=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR0]); 97 1.31 maxv printf("| -> CR3=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR3]); 98 1.31 maxv printf("| -> CR4=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR4]); 99 1.31 maxv printf("| -> CR8=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR8]); 100 1.6 maxv 101 1.6 maxv return 0; 102 1.6 maxv } 103 1.6 maxv 104 1.1 maxv /* -------------------------------------------------------------------------- */ 105 1.1 maxv 106 1.1 maxv #define PTE32_L1_SHIFT 12 107 1.1 maxv #define PTE32_L2_SHIFT 22 108 1.1 maxv 109 1.1 maxv #define PTE32_L2_MASK 0xffc00000 110 1.1 maxv #define PTE32_L1_MASK 0x003ff000 111 1.1 maxv 112 1.1 maxv #define PTE32_L2_FRAME (PTE32_L2_MASK) 113 1.1 maxv #define PTE32_L1_FRAME (PTE32_L2_FRAME|PTE32_L1_MASK) 114 1.1 maxv 115 1.1 maxv #define pte32_l1idx(va) (((va) & PTE32_L1_MASK) >> PTE32_L1_SHIFT) 116 1.1 maxv #define pte32_l2idx(va) (((va) & PTE32_L2_MASK) >> PTE32_L2_SHIFT) 117 1.1 maxv 118 1.38 maxv #define CR3_FRAME_32BIT __BITS(31, 12) 119 1.19 maxv 120 1.1 maxv typedef uint32_t pte_32bit_t; 121 1.1 maxv 122 1.1 maxv static int 123 1.1 maxv x86_gva_to_gpa_32bit(struct nvmm_machine *mach, uint64_t cr3, 124 1.1 maxv gvaddr_t gva, gpaddr_t *gpa, bool has_pse, nvmm_prot_t *prot) 125 1.1 maxv { 126 1.1 maxv gpaddr_t L2gpa, L1gpa; 127 1.1 maxv uintptr_t L2hva, L1hva; 128 1.1 maxv pte_32bit_t *pdir, pte; 129 1.28 maxv nvmm_prot_t pageprot; 130 1.1 maxv 131 1.1 maxv /* We begin with an RWXU access. */ 132 1.1 maxv *prot = NVMM_PROT_ALL; 133 1.1 maxv 134 1.1 maxv /* Parse L2. */ 135 1.19 maxv L2gpa = (cr3 & CR3_FRAME_32BIT); 136 1.28 maxv if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva, &pageprot) == -1) 137 1.1 maxv return -1; 138 1.1 maxv pdir = (pte_32bit_t *)L2hva; 139 1.1 maxv pte = pdir[pte32_l2idx(gva)]; 140 1.38 maxv if ((pte & PTE_P) == 0) 141 1.1 maxv return -1; 142 1.38 maxv if ((pte & PTE_U) == 0) 143 1.1 maxv *prot &= ~NVMM_PROT_USER; 144 1.38 maxv if ((pte & PTE_W) == 0) 145 1.1 maxv *prot &= ~NVMM_PROT_WRITE; 146 1.38 maxv if ((pte & PTE_PS) && !has_pse) 147 1.1 maxv return -1; 148 1.38 maxv if (pte & PTE_PS) { 149 1.1 maxv *gpa = (pte & PTE32_L2_FRAME); 150 1.10 maxv *gpa = *gpa + (gva & PTE32_L1_MASK); 151 1.1 maxv return 0; 152 1.1 maxv } 153 1.1 maxv 154 1.1 maxv /* Parse L1. */ 155 1.38 maxv L1gpa = (pte & PTE_FRAME); 156 1.28 maxv if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva, &pageprot) == -1) 157 1.1 maxv return -1; 158 1.1 maxv pdir = (pte_32bit_t *)L1hva; 159 1.1 maxv pte = pdir[pte32_l1idx(gva)]; 160 1.38 maxv if ((pte & PTE_P) == 0) 161 1.1 maxv return -1; 162 1.38 maxv if ((pte & PTE_U) == 0) 163 1.1 maxv *prot &= ~NVMM_PROT_USER; 164 1.38 maxv if ((pte & PTE_W) == 0) 165 1.1 maxv *prot &= ~NVMM_PROT_WRITE; 166 1.38 maxv if (pte & PTE_PS) 167 1.1 maxv return -1; 168 1.1 maxv 169 1.38 maxv *gpa = (pte & PTE_FRAME); 170 1.1 maxv return 0; 171 1.1 maxv } 172 1.1 maxv 173 1.1 maxv /* -------------------------------------------------------------------------- */ 174 1.1 maxv 175 1.1 maxv #define PTE32_PAE_L1_SHIFT 12 176 1.1 maxv #define PTE32_PAE_L2_SHIFT 21 177 1.1 maxv #define PTE32_PAE_L3_SHIFT 30 178 1.1 maxv 179 1.1 maxv #define PTE32_PAE_L3_MASK 0xc0000000 180 1.1 maxv #define PTE32_PAE_L2_MASK 0x3fe00000 181 1.1 maxv #define PTE32_PAE_L1_MASK 0x001ff000 182 1.1 maxv 183 1.1 maxv #define PTE32_PAE_L3_FRAME (PTE32_PAE_L3_MASK) 184 1.1 maxv #define PTE32_PAE_L2_FRAME (PTE32_PAE_L3_FRAME|PTE32_PAE_L2_MASK) 185 1.1 maxv #define PTE32_PAE_L1_FRAME (PTE32_PAE_L2_FRAME|PTE32_PAE_L1_MASK) 186 1.1 maxv 187 1.1 maxv #define pte32_pae_l1idx(va) (((va) & PTE32_PAE_L1_MASK) >> PTE32_PAE_L1_SHIFT) 188 1.1 maxv #define pte32_pae_l2idx(va) (((va) & PTE32_PAE_L2_MASK) >> PTE32_PAE_L2_SHIFT) 189 1.1 maxv #define pte32_pae_l3idx(va) (((va) & PTE32_PAE_L3_MASK) >> PTE32_PAE_L3_SHIFT) 190 1.1 maxv 191 1.19 maxv #define CR3_FRAME_32BIT_PAE __BITS(31, 5) 192 1.19 maxv 193 1.1 maxv typedef uint64_t pte_32bit_pae_t; 194 1.1 maxv 195 1.1 maxv static int 196 1.1 maxv x86_gva_to_gpa_32bit_pae(struct nvmm_machine *mach, uint64_t cr3, 197 1.23 maxv gvaddr_t gva, gpaddr_t *gpa, nvmm_prot_t *prot) 198 1.1 maxv { 199 1.1 maxv gpaddr_t L3gpa, L2gpa, L1gpa; 200 1.1 maxv uintptr_t L3hva, L2hva, L1hva; 201 1.1 maxv pte_32bit_pae_t *pdir, pte; 202 1.28 maxv nvmm_prot_t pageprot; 203 1.1 maxv 204 1.1 maxv /* We begin with an RWXU access. */ 205 1.1 maxv *prot = NVMM_PROT_ALL; 206 1.1 maxv 207 1.1 maxv /* Parse L3. */ 208 1.19 maxv L3gpa = (cr3 & CR3_FRAME_32BIT_PAE); 209 1.28 maxv if (nvmm_gpa_to_hva(mach, L3gpa, &L3hva, &pageprot) == -1) 210 1.1 maxv return -1; 211 1.1 maxv pdir = (pte_32bit_pae_t *)L3hva; 212 1.1 maxv pte = pdir[pte32_pae_l3idx(gva)]; 213 1.38 maxv if ((pte & PTE_P) == 0) 214 1.1 maxv return -1; 215 1.38 maxv if (pte & PTE_NX) 216 1.1 maxv *prot &= ~NVMM_PROT_EXEC; 217 1.38 maxv if (pte & PTE_PS) 218 1.1 maxv return -1; 219 1.1 maxv 220 1.1 maxv /* Parse L2. */ 221 1.38 maxv L2gpa = (pte & PTE_FRAME); 222 1.28 maxv if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva, &pageprot) == -1) 223 1.1 maxv return -1; 224 1.1 maxv pdir = (pte_32bit_pae_t *)L2hva; 225 1.1 maxv pte = pdir[pte32_pae_l2idx(gva)]; 226 1.38 maxv if ((pte & PTE_P) == 0) 227 1.1 maxv return -1; 228 1.38 maxv if ((pte & PTE_U) == 0) 229 1.1 maxv *prot &= ~NVMM_PROT_USER; 230 1.38 maxv if ((pte & PTE_W) == 0) 231 1.1 maxv *prot &= ~NVMM_PROT_WRITE; 232 1.38 maxv if (pte & PTE_NX) 233 1.1 maxv *prot &= ~NVMM_PROT_EXEC; 234 1.38 maxv if (pte & PTE_PS) { 235 1.1 maxv *gpa = (pte & PTE32_PAE_L2_FRAME); 236 1.10 maxv *gpa = *gpa + (gva & PTE32_PAE_L1_MASK); 237 1.1 maxv return 0; 238 1.1 maxv } 239 1.1 maxv 240 1.1 maxv /* Parse L1. */ 241 1.38 maxv L1gpa = (pte & PTE_FRAME); 242 1.28 maxv if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva, &pageprot) == -1) 243 1.1 maxv return -1; 244 1.1 maxv pdir = (pte_32bit_pae_t *)L1hva; 245 1.1 maxv pte = pdir[pte32_pae_l1idx(gva)]; 246 1.38 maxv if ((pte & PTE_P) == 0) 247 1.1 maxv return -1; 248 1.38 maxv if ((pte & PTE_U) == 0) 249 1.1 maxv *prot &= ~NVMM_PROT_USER; 250 1.38 maxv if ((pte & PTE_W) == 0) 251 1.1 maxv *prot &= ~NVMM_PROT_WRITE; 252 1.38 maxv if (pte & PTE_NX) 253 1.1 maxv *prot &= ~NVMM_PROT_EXEC; 254 1.38 maxv if (pte & PTE_PS) 255 1.1 maxv return -1; 256 1.1 maxv 257 1.38 maxv *gpa = (pte & PTE_FRAME); 258 1.1 maxv return 0; 259 1.1 maxv } 260 1.1 maxv 261 1.1 maxv /* -------------------------------------------------------------------------- */ 262 1.1 maxv 263 1.1 maxv #define PTE64_L1_SHIFT 12 264 1.1 maxv #define PTE64_L2_SHIFT 21 265 1.1 maxv #define PTE64_L3_SHIFT 30 266 1.1 maxv #define PTE64_L4_SHIFT 39 267 1.1 maxv 268 1.1 maxv #define PTE64_L4_MASK 0x0000ff8000000000 269 1.1 maxv #define PTE64_L3_MASK 0x0000007fc0000000 270 1.1 maxv #define PTE64_L2_MASK 0x000000003fe00000 271 1.1 maxv #define PTE64_L1_MASK 0x00000000001ff000 272 1.1 maxv 273 1.1 maxv #define PTE64_L4_FRAME PTE64_L4_MASK 274 1.1 maxv #define PTE64_L3_FRAME (PTE64_L4_FRAME|PTE64_L3_MASK) 275 1.1 maxv #define PTE64_L2_FRAME (PTE64_L3_FRAME|PTE64_L2_MASK) 276 1.1 maxv #define PTE64_L1_FRAME (PTE64_L2_FRAME|PTE64_L1_MASK) 277 1.1 maxv 278 1.1 maxv #define pte64_l1idx(va) (((va) & PTE64_L1_MASK) >> PTE64_L1_SHIFT) 279 1.1 maxv #define pte64_l2idx(va) (((va) & PTE64_L2_MASK) >> PTE64_L2_SHIFT) 280 1.1 maxv #define pte64_l3idx(va) (((va) & PTE64_L3_MASK) >> PTE64_L3_SHIFT) 281 1.1 maxv #define pte64_l4idx(va) (((va) & PTE64_L4_MASK) >> PTE64_L4_SHIFT) 282 1.1 maxv 283 1.38 maxv #define CR3_FRAME_64BIT __BITS(51, 12) 284 1.19 maxv 285 1.1 maxv typedef uint64_t pte_64bit_t; 286 1.1 maxv 287 1.1 maxv static inline bool 288 1.1 maxv x86_gva_64bit_canonical(gvaddr_t gva) 289 1.1 maxv { 290 1.1 maxv /* Bits 63:47 must have the same value. */ 291 1.1 maxv #define SIGN_EXTEND 0xffff800000000000ULL 292 1.1 maxv return (gva & SIGN_EXTEND) == 0 || (gva & SIGN_EXTEND) == SIGN_EXTEND; 293 1.1 maxv } 294 1.1 maxv 295 1.1 maxv static int 296 1.1 maxv x86_gva_to_gpa_64bit(struct nvmm_machine *mach, uint64_t cr3, 297 1.11 maxv gvaddr_t gva, gpaddr_t *gpa, nvmm_prot_t *prot) 298 1.1 maxv { 299 1.1 maxv gpaddr_t L4gpa, L3gpa, L2gpa, L1gpa; 300 1.1 maxv uintptr_t L4hva, L3hva, L2hva, L1hva; 301 1.1 maxv pte_64bit_t *pdir, pte; 302 1.28 maxv nvmm_prot_t pageprot; 303 1.1 maxv 304 1.1 maxv /* We begin with an RWXU access. */ 305 1.1 maxv *prot = NVMM_PROT_ALL; 306 1.1 maxv 307 1.1 maxv if (!x86_gva_64bit_canonical(gva)) 308 1.1 maxv return -1; 309 1.1 maxv 310 1.1 maxv /* Parse L4. */ 311 1.19 maxv L4gpa = (cr3 & CR3_FRAME_64BIT); 312 1.28 maxv if (nvmm_gpa_to_hva(mach, L4gpa, &L4hva, &pageprot) == -1) 313 1.1 maxv return -1; 314 1.1 maxv pdir = (pte_64bit_t *)L4hva; 315 1.1 maxv pte = pdir[pte64_l4idx(gva)]; 316 1.38 maxv if ((pte & PTE_P) == 0) 317 1.1 maxv return -1; 318 1.38 maxv if ((pte & PTE_U) == 0) 319 1.1 maxv *prot &= ~NVMM_PROT_USER; 320 1.38 maxv if ((pte & PTE_W) == 0) 321 1.1 maxv *prot &= ~NVMM_PROT_WRITE; 322 1.38 maxv if (pte & PTE_NX) 323 1.1 maxv *prot &= ~NVMM_PROT_EXEC; 324 1.38 maxv if (pte & PTE_PS) 325 1.1 maxv return -1; 326 1.1 maxv 327 1.1 maxv /* Parse L3. */ 328 1.38 maxv L3gpa = (pte & PTE_FRAME); 329 1.28 maxv if (nvmm_gpa_to_hva(mach, L3gpa, &L3hva, &pageprot) == -1) 330 1.1 maxv return -1; 331 1.1 maxv pdir = (pte_64bit_t *)L3hva; 332 1.1 maxv pte = pdir[pte64_l3idx(gva)]; 333 1.38 maxv if ((pte & PTE_P) == 0) 334 1.1 maxv return -1; 335 1.38 maxv if ((pte & PTE_U) == 0) 336 1.1 maxv *prot &= ~NVMM_PROT_USER; 337 1.38 maxv if ((pte & PTE_W) == 0) 338 1.1 maxv *prot &= ~NVMM_PROT_WRITE; 339 1.38 maxv if (pte & PTE_NX) 340 1.1 maxv *prot &= ~NVMM_PROT_EXEC; 341 1.38 maxv if (pte & PTE_PS) { 342 1.1 maxv *gpa = (pte & PTE64_L3_FRAME); 343 1.10 maxv *gpa = *gpa + (gva & (PTE64_L2_MASK|PTE64_L1_MASK)); 344 1.1 maxv return 0; 345 1.1 maxv } 346 1.1 maxv 347 1.1 maxv /* Parse L2. */ 348 1.38 maxv L2gpa = (pte & PTE_FRAME); 349 1.28 maxv if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva, &pageprot) == -1) 350 1.1 maxv return -1; 351 1.1 maxv pdir = (pte_64bit_t *)L2hva; 352 1.1 maxv pte = pdir[pte64_l2idx(gva)]; 353 1.38 maxv if ((pte & PTE_P) == 0) 354 1.1 maxv return -1; 355 1.38 maxv if ((pte & PTE_U) == 0) 356 1.1 maxv *prot &= ~NVMM_PROT_USER; 357 1.38 maxv if ((pte & PTE_W) == 0) 358 1.1 maxv *prot &= ~NVMM_PROT_WRITE; 359 1.38 maxv if (pte & PTE_NX) 360 1.1 maxv *prot &= ~NVMM_PROT_EXEC; 361 1.38 maxv if (pte & PTE_PS) { 362 1.1 maxv *gpa = (pte & PTE64_L2_FRAME); 363 1.10 maxv *gpa = *gpa + (gva & PTE64_L1_MASK); 364 1.1 maxv return 0; 365 1.1 maxv } 366 1.1 maxv 367 1.1 maxv /* Parse L1. */ 368 1.38 maxv L1gpa = (pte & PTE_FRAME); 369 1.28 maxv if (nvmm_gpa_to_hva(mach, L1gpa, &L1hva, &pageprot) == -1) 370 1.1 maxv return -1; 371 1.1 maxv pdir = (pte_64bit_t *)L1hva; 372 1.1 maxv pte = pdir[pte64_l1idx(gva)]; 373 1.38 maxv if ((pte & PTE_P) == 0) 374 1.1 maxv return -1; 375 1.38 maxv if ((pte & PTE_U) == 0) 376 1.1 maxv *prot &= ~NVMM_PROT_USER; 377 1.38 maxv if ((pte & PTE_W) == 0) 378 1.1 maxv *prot &= ~NVMM_PROT_WRITE; 379 1.38 maxv if (pte & PTE_NX) 380 1.1 maxv *prot &= ~NVMM_PROT_EXEC; 381 1.38 maxv if (pte & PTE_PS) 382 1.1 maxv return -1; 383 1.1 maxv 384 1.38 maxv *gpa = (pte & PTE_FRAME); 385 1.1 maxv return 0; 386 1.1 maxv } 387 1.1 maxv 388 1.1 maxv static inline int 389 1.1 maxv x86_gva_to_gpa(struct nvmm_machine *mach, struct nvmm_x64_state *state, 390 1.1 maxv gvaddr_t gva, gpaddr_t *gpa, nvmm_prot_t *prot) 391 1.1 maxv { 392 1.1 maxv bool is_pae, is_lng, has_pse; 393 1.1 maxv uint64_t cr3; 394 1.6 maxv size_t off; 395 1.1 maxv int ret; 396 1.1 maxv 397 1.1 maxv if ((state->crs[NVMM_X64_CR_CR0] & CR0_PG) == 0) { 398 1.1 maxv /* No paging. */ 399 1.4 maxv *prot = NVMM_PROT_ALL; 400 1.1 maxv *gpa = gva; 401 1.1 maxv return 0; 402 1.1 maxv } 403 1.1 maxv 404 1.6 maxv off = (gva & PAGE_MASK); 405 1.6 maxv gva &= ~PAGE_MASK; 406 1.6 maxv 407 1.1 maxv is_pae = (state->crs[NVMM_X64_CR_CR4] & CR4_PAE) != 0; 408 1.15 maxv is_lng = (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) != 0; 409 1.1 maxv has_pse = (state->crs[NVMM_X64_CR_CR4] & CR4_PSE) != 0; 410 1.1 maxv cr3 = state->crs[NVMM_X64_CR_CR3]; 411 1.1 maxv 412 1.1 maxv if (is_pae && is_lng) { 413 1.1 maxv /* 64bit */ 414 1.11 maxv ret = x86_gva_to_gpa_64bit(mach, cr3, gva, gpa, prot); 415 1.1 maxv } else if (is_pae && !is_lng) { 416 1.1 maxv /* 32bit PAE */ 417 1.23 maxv ret = x86_gva_to_gpa_32bit_pae(mach, cr3, gva, gpa, prot); 418 1.1 maxv } else if (!is_pae && !is_lng) { 419 1.1 maxv /* 32bit */ 420 1.1 maxv ret = x86_gva_to_gpa_32bit(mach, cr3, gva, gpa, has_pse, prot); 421 1.1 maxv } else { 422 1.1 maxv ret = -1; 423 1.1 maxv } 424 1.1 maxv 425 1.1 maxv if (ret == -1) { 426 1.1 maxv errno = EFAULT; 427 1.1 maxv } 428 1.1 maxv 429 1.6 maxv *gpa = *gpa + off; 430 1.6 maxv 431 1.1 maxv return ret; 432 1.1 maxv } 433 1.1 maxv 434 1.1 maxv int 435 1.31 maxv nvmm_gva_to_gpa(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 436 1.1 maxv gvaddr_t gva, gpaddr_t *gpa, nvmm_prot_t *prot) 437 1.1 maxv { 438 1.31 maxv struct nvmm_x64_state *state = vcpu->state; 439 1.1 maxv int ret; 440 1.1 maxv 441 1.31 maxv ret = nvmm_vcpu_getstate(mach, vcpu, 442 1.1 maxv NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS); 443 1.1 maxv if (ret == -1) 444 1.1 maxv return -1; 445 1.1 maxv 446 1.31 maxv return x86_gva_to_gpa(mach, state, gva, gpa, prot); 447 1.1 maxv } 448 1.1 maxv 449 1.1 maxv /* -------------------------------------------------------------------------- */ 450 1.1 maxv 451 1.32 maxv #define DISASSEMBLER_BUG() \ 452 1.32 maxv do { \ 453 1.32 maxv errno = EINVAL; \ 454 1.32 maxv return -1; \ 455 1.32 maxv } while (0); 456 1.32 maxv 457 1.1 maxv static inline bool 458 1.15 maxv is_long_mode(struct nvmm_x64_state *state) 459 1.15 maxv { 460 1.15 maxv return (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) != 0; 461 1.15 maxv } 462 1.15 maxv 463 1.15 maxv static inline bool 464 1.5 maxv is_64bit(struct nvmm_x64_state *state) 465 1.5 maxv { 466 1.26 maxv return (state->segs[NVMM_X64_SEG_CS].attrib.l != 0); 467 1.5 maxv } 468 1.5 maxv 469 1.5 maxv static inline bool 470 1.5 maxv is_32bit(struct nvmm_x64_state *state) 471 1.5 maxv { 472 1.26 maxv return (state->segs[NVMM_X64_SEG_CS].attrib.l == 0) && 473 1.26 maxv (state->segs[NVMM_X64_SEG_CS].attrib.def == 1); 474 1.5 maxv } 475 1.5 maxv 476 1.5 maxv static inline bool 477 1.5 maxv is_16bit(struct nvmm_x64_state *state) 478 1.5 maxv { 479 1.26 maxv return (state->segs[NVMM_X64_SEG_CS].attrib.l == 0) && 480 1.26 maxv (state->segs[NVMM_X64_SEG_CS].attrib.def == 0); 481 1.5 maxv } 482 1.5 maxv 483 1.1 maxv static int 484 1.15 maxv segment_check(struct nvmm_x64_state_seg *seg, gvaddr_t gva, size_t size) 485 1.1 maxv { 486 1.1 maxv uint64_t limit; 487 1.1 maxv 488 1.1 maxv /* 489 1.1 maxv * This is incomplete. We should check topdown, etc, really that's 490 1.1 maxv * tiring. 491 1.1 maxv */ 492 1.1 maxv if (__predict_false(!seg->attrib.p)) { 493 1.1 maxv goto error; 494 1.1 maxv } 495 1.1 maxv 496 1.26 maxv limit = (uint64_t)seg->limit + 1; 497 1.26 maxv if (__predict_true(seg->attrib.g)) { 498 1.1 maxv limit *= PAGE_SIZE; 499 1.1 maxv } 500 1.1 maxv 501 1.15 maxv if (__predict_false(gva + size > limit)) { 502 1.1 maxv goto error; 503 1.1 maxv } 504 1.1 maxv 505 1.1 maxv return 0; 506 1.1 maxv 507 1.1 maxv error: 508 1.1 maxv errno = EFAULT; 509 1.1 maxv return -1; 510 1.1 maxv } 511 1.1 maxv 512 1.15 maxv static inline void 513 1.15 maxv segment_apply(struct nvmm_x64_state_seg *seg, gvaddr_t *gva) 514 1.15 maxv { 515 1.15 maxv *gva += seg->base; 516 1.15 maxv } 517 1.15 maxv 518 1.15 maxv static inline uint64_t 519 1.15 maxv size_to_mask(size_t size) 520 1.6 maxv { 521 1.15 maxv switch (size) { 522 1.15 maxv case 1: 523 1.15 maxv return 0x00000000000000FF; 524 1.15 maxv case 2: 525 1.15 maxv return 0x000000000000FFFF; 526 1.15 maxv case 4: 527 1.15 maxv return 0x00000000FFFFFFFF; 528 1.6 maxv case 8: 529 1.15 maxv default: 530 1.6 maxv return 0xFFFFFFFFFFFFFFFF; 531 1.6 maxv } 532 1.6 maxv } 533 1.6 maxv 534 1.6 maxv static uint64_t 535 1.10 maxv rep_get_cnt(struct nvmm_x64_state *state, size_t adsize) 536 1.10 maxv { 537 1.10 maxv uint64_t mask, cnt; 538 1.10 maxv 539 1.15 maxv mask = size_to_mask(adsize); 540 1.10 maxv cnt = state->gprs[NVMM_X64_GPR_RCX] & mask; 541 1.10 maxv 542 1.10 maxv return cnt; 543 1.10 maxv } 544 1.10 maxv 545 1.10 maxv static void 546 1.10 maxv rep_set_cnt(struct nvmm_x64_state *state, size_t adsize, uint64_t cnt) 547 1.10 maxv { 548 1.10 maxv uint64_t mask; 549 1.10 maxv 550 1.15 maxv /* XXX: should we zero-extend? */ 551 1.15 maxv mask = size_to_mask(adsize); 552 1.10 maxv state->gprs[NVMM_X64_GPR_RCX] &= ~mask; 553 1.10 maxv state->gprs[NVMM_X64_GPR_RCX] |= cnt; 554 1.10 maxv } 555 1.10 maxv 556 1.6 maxv static int 557 1.37 maxv read_guest_memory(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 558 1.6 maxv gvaddr_t gva, uint8_t *data, size_t size) 559 1.6 maxv { 560 1.37 maxv struct nvmm_x64_state *state = vcpu->state; 561 1.6 maxv struct nvmm_mem mem; 562 1.6 maxv nvmm_prot_t prot; 563 1.6 maxv gpaddr_t gpa; 564 1.6 maxv uintptr_t hva; 565 1.6 maxv bool is_mmio; 566 1.6 maxv int ret, remain; 567 1.6 maxv 568 1.6 maxv ret = x86_gva_to_gpa(mach, state, gva, &gpa, &prot); 569 1.6 maxv if (__predict_false(ret == -1)) { 570 1.6 maxv return -1; 571 1.6 maxv } 572 1.6 maxv if (__predict_false(!(prot & NVMM_PROT_READ))) { 573 1.6 maxv errno = EFAULT; 574 1.6 maxv return -1; 575 1.6 maxv } 576 1.6 maxv 577 1.6 maxv if ((gva & PAGE_MASK) + size > PAGE_SIZE) { 578 1.6 maxv remain = ((gva & PAGE_MASK) + size - PAGE_SIZE); 579 1.6 maxv } else { 580 1.6 maxv remain = 0; 581 1.6 maxv } 582 1.6 maxv size -= remain; 583 1.6 maxv 584 1.28 maxv ret = nvmm_gpa_to_hva(mach, gpa, &hva, &prot); 585 1.6 maxv is_mmio = (ret == -1); 586 1.6 maxv 587 1.6 maxv if (is_mmio) { 588 1.37 maxv mem.mach = mach; 589 1.37 maxv mem.vcpu = vcpu; 590 1.11 maxv mem.data = data; 591 1.6 maxv mem.gpa = gpa; 592 1.6 maxv mem.write = false; 593 1.6 maxv mem.size = size; 594 1.37 maxv (*vcpu->cbs.mem)(&mem); 595 1.6 maxv } else { 596 1.28 maxv if (__predict_false(!(prot & NVMM_PROT_READ))) { 597 1.28 maxv errno = EFAULT; 598 1.28 maxv return -1; 599 1.28 maxv } 600 1.6 maxv memcpy(data, (uint8_t *)hva, size); 601 1.6 maxv } 602 1.6 maxv 603 1.6 maxv if (remain > 0) { 604 1.37 maxv ret = read_guest_memory(mach, vcpu, gva + size, 605 1.6 maxv data + size, remain); 606 1.6 maxv } else { 607 1.6 maxv ret = 0; 608 1.6 maxv } 609 1.6 maxv 610 1.6 maxv return ret; 611 1.6 maxv } 612 1.6 maxv 613 1.6 maxv static int 614 1.37 maxv write_guest_memory(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 615 1.6 maxv gvaddr_t gva, uint8_t *data, size_t size) 616 1.6 maxv { 617 1.37 maxv struct nvmm_x64_state *state = vcpu->state; 618 1.6 maxv struct nvmm_mem mem; 619 1.6 maxv nvmm_prot_t prot; 620 1.6 maxv gpaddr_t gpa; 621 1.6 maxv uintptr_t hva; 622 1.6 maxv bool is_mmio; 623 1.6 maxv int ret, remain; 624 1.6 maxv 625 1.6 maxv ret = x86_gva_to_gpa(mach, state, gva, &gpa, &prot); 626 1.6 maxv if (__predict_false(ret == -1)) { 627 1.6 maxv return -1; 628 1.6 maxv } 629 1.6 maxv if (__predict_false(!(prot & NVMM_PROT_WRITE))) { 630 1.6 maxv errno = EFAULT; 631 1.6 maxv return -1; 632 1.6 maxv } 633 1.6 maxv 634 1.6 maxv if ((gva & PAGE_MASK) + size > PAGE_SIZE) { 635 1.6 maxv remain = ((gva & PAGE_MASK) + size - PAGE_SIZE); 636 1.6 maxv } else { 637 1.6 maxv remain = 0; 638 1.6 maxv } 639 1.6 maxv size -= remain; 640 1.6 maxv 641 1.28 maxv ret = nvmm_gpa_to_hva(mach, gpa, &hva, &prot); 642 1.6 maxv is_mmio = (ret == -1); 643 1.6 maxv 644 1.6 maxv if (is_mmio) { 645 1.37 maxv mem.mach = mach; 646 1.37 maxv mem.vcpu = vcpu; 647 1.11 maxv mem.data = data; 648 1.6 maxv mem.gpa = gpa; 649 1.6 maxv mem.write = true; 650 1.6 maxv mem.size = size; 651 1.37 maxv (*vcpu->cbs.mem)(&mem); 652 1.6 maxv } else { 653 1.28 maxv if (__predict_false(!(prot & NVMM_PROT_WRITE))) { 654 1.28 maxv errno = EFAULT; 655 1.28 maxv return -1; 656 1.28 maxv } 657 1.6 maxv memcpy((uint8_t *)hva, data, size); 658 1.6 maxv } 659 1.6 maxv 660 1.6 maxv if (remain > 0) { 661 1.37 maxv ret = write_guest_memory(mach, vcpu, gva + size, 662 1.6 maxv data + size, remain); 663 1.6 maxv } else { 664 1.6 maxv ret = 0; 665 1.6 maxv } 666 1.6 maxv 667 1.6 maxv return ret; 668 1.6 maxv } 669 1.6 maxv 670 1.6 maxv /* -------------------------------------------------------------------------- */ 671 1.6 maxv 672 1.37 maxv static int fetch_segment(struct nvmm_machine *, struct nvmm_vcpu *); 673 1.8 maxv 674 1.10 maxv #define NVMM_IO_BATCH_SIZE 32 675 1.10 maxv 676 1.10 maxv static int 677 1.37 maxv assist_io_batch(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 678 1.10 maxv struct nvmm_io *io, gvaddr_t gva, uint64_t cnt) 679 1.10 maxv { 680 1.10 maxv uint8_t iobuf[NVMM_IO_BATCH_SIZE]; 681 1.10 maxv size_t i, iosize, iocnt; 682 1.10 maxv int ret; 683 1.10 maxv 684 1.10 maxv cnt = MIN(cnt, NVMM_IO_BATCH_SIZE); 685 1.10 maxv iosize = MIN(io->size * cnt, NVMM_IO_BATCH_SIZE); 686 1.10 maxv iocnt = iosize / io->size; 687 1.10 maxv 688 1.10 maxv io->data = iobuf; 689 1.10 maxv 690 1.10 maxv if (!io->in) { 691 1.37 maxv ret = read_guest_memory(mach, vcpu, gva, iobuf, iosize); 692 1.10 maxv if (ret == -1) 693 1.10 maxv return -1; 694 1.10 maxv } 695 1.10 maxv 696 1.10 maxv for (i = 0; i < iocnt; i++) { 697 1.37 maxv (*vcpu->cbs.io)(io); 698 1.10 maxv io->data += io->size; 699 1.10 maxv } 700 1.10 maxv 701 1.10 maxv if (io->in) { 702 1.37 maxv ret = write_guest_memory(mach, vcpu, gva, iobuf, iosize); 703 1.10 maxv if (ret == -1) 704 1.10 maxv return -1; 705 1.10 maxv } 706 1.10 maxv 707 1.10 maxv return iocnt; 708 1.10 maxv } 709 1.10 maxv 710 1.1 maxv int 711 1.31 maxv nvmm_assist_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) 712 1.1 maxv { 713 1.31 maxv struct nvmm_x64_state *state = vcpu->state; 714 1.36 maxv struct nvmm_vcpu_exit *exit = vcpu->exit; 715 1.1 maxv struct nvmm_io io; 716 1.10 maxv uint64_t cnt = 0; /* GCC */ 717 1.10 maxv uint8_t iobuf[8]; 718 1.10 maxv int iocnt = 1; 719 1.15 maxv gvaddr_t gva = 0; /* GCC */ 720 1.5 maxv int reg = 0; /* GCC */ 721 1.8 maxv int ret, seg; 722 1.10 maxv bool psld = false; 723 1.1 maxv 724 1.36 maxv if (__predict_false(exit->reason != NVMM_VCPU_EXIT_IO)) { 725 1.1 maxv errno = EINVAL; 726 1.1 maxv return -1; 727 1.1 maxv } 728 1.1 maxv 729 1.37 maxv io.mach = mach; 730 1.37 maxv io.vcpu = vcpu; 731 1.1 maxv io.port = exit->u.io.port; 732 1.36 maxv io.in = exit->u.io.in; 733 1.1 maxv io.size = exit->u.io.operand_size; 734 1.10 maxv io.data = iobuf; 735 1.1 maxv 736 1.31 maxv ret = nvmm_vcpu_getstate(mach, vcpu, 737 1.1 maxv NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS | 738 1.1 maxv NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS); 739 1.1 maxv if (ret == -1) 740 1.1 maxv return -1; 741 1.1 maxv 742 1.10 maxv if (exit->u.io.rep) { 743 1.31 maxv cnt = rep_get_cnt(state, exit->u.io.address_size); 744 1.10 maxv if (__predict_false(cnt == 0)) { 745 1.31 maxv state->gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc; 746 1.15 maxv goto out; 747 1.10 maxv } 748 1.10 maxv } 749 1.10 maxv 750 1.31 maxv if (__predict_false(state->gprs[NVMM_X64_GPR_RFLAGS] & PSL_D)) { 751 1.10 maxv psld = true; 752 1.10 maxv } 753 1.10 maxv 754 1.6 maxv /* 755 1.6 maxv * Determine GVA. 756 1.6 maxv */ 757 1.6 maxv if (exit->u.io.str) { 758 1.5 maxv if (io.in) { 759 1.5 maxv reg = NVMM_X64_GPR_RDI; 760 1.5 maxv } else { 761 1.5 maxv reg = NVMM_X64_GPR_RSI; 762 1.5 maxv } 763 1.1 maxv 764 1.31 maxv gva = state->gprs[reg]; 765 1.15 maxv gva &= size_to_mask(exit->u.io.address_size); 766 1.1 maxv 767 1.15 maxv if (exit->u.io.seg != -1) { 768 1.15 maxv seg = exit->u.io.seg; 769 1.15 maxv } else { 770 1.15 maxv if (io.in) { 771 1.15 maxv seg = NVMM_X64_SEG_ES; 772 1.8 maxv } else { 773 1.37 maxv seg = fetch_segment(mach, vcpu); 774 1.15 maxv if (seg == -1) 775 1.15 maxv return -1; 776 1.8 maxv } 777 1.15 maxv } 778 1.8 maxv 779 1.31 maxv if (__predict_true(is_long_mode(state))) { 780 1.15 maxv if (seg == NVMM_X64_SEG_GS || seg == NVMM_X64_SEG_FS) { 781 1.31 maxv segment_apply(&state->segs[seg], &gva); 782 1.15 maxv } 783 1.15 maxv } else { 784 1.31 maxv ret = segment_check(&state->segs[seg], gva, io.size); 785 1.1 maxv if (ret == -1) 786 1.1 maxv return -1; 787 1.31 maxv segment_apply(&state->segs[seg], &gva); 788 1.1 maxv } 789 1.10 maxv 790 1.10 maxv if (exit->u.io.rep && !psld) { 791 1.37 maxv iocnt = assist_io_batch(mach, vcpu, &io, gva, cnt); 792 1.10 maxv if (iocnt == -1) 793 1.10 maxv return -1; 794 1.10 maxv goto done; 795 1.10 maxv } 796 1.6 maxv } 797 1.1 maxv 798 1.6 maxv if (!io.in) { 799 1.6 maxv if (!exit->u.io.str) { 800 1.31 maxv memcpy(io.data, &state->gprs[NVMM_X64_GPR_RAX], io.size); 801 1.6 maxv } else { 802 1.37 maxv ret = read_guest_memory(mach, vcpu, gva, io.data, 803 1.6 maxv io.size); 804 1.1 maxv if (ret == -1) 805 1.1 maxv return -1; 806 1.1 maxv } 807 1.1 maxv } 808 1.1 maxv 809 1.37 maxv (*vcpu->cbs.io)(&io); 810 1.1 maxv 811 1.1 maxv if (io.in) { 812 1.6 maxv if (!exit->u.io.str) { 813 1.31 maxv memcpy(&state->gprs[NVMM_X64_GPR_RAX], io.data, io.size); 814 1.15 maxv if (io.size == 4) { 815 1.15 maxv /* Zero-extend to 64 bits. */ 816 1.31 maxv state->gprs[NVMM_X64_GPR_RAX] &= size_to_mask(4); 817 1.15 maxv } 818 1.1 maxv } else { 819 1.37 maxv ret = write_guest_memory(mach, vcpu, gva, io.data, 820 1.6 maxv io.size); 821 1.6 maxv if (ret == -1) 822 1.6 maxv return -1; 823 1.1 maxv } 824 1.1 maxv } 825 1.1 maxv 826 1.10 maxv done: 827 1.5 maxv if (exit->u.io.str) { 828 1.10 maxv if (__predict_false(psld)) { 829 1.31 maxv state->gprs[reg] -= iocnt * io.size; 830 1.5 maxv } else { 831 1.31 maxv state->gprs[reg] += iocnt * io.size; 832 1.5 maxv } 833 1.5 maxv } 834 1.5 maxv 835 1.1 maxv if (exit->u.io.rep) { 836 1.10 maxv cnt -= iocnt; 837 1.31 maxv rep_set_cnt(state, exit->u.io.address_size, cnt); 838 1.6 maxv if (cnt == 0) { 839 1.31 maxv state->gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc; 840 1.1 maxv } 841 1.1 maxv } else { 842 1.31 maxv state->gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc; 843 1.1 maxv } 844 1.1 maxv 845 1.15 maxv out: 846 1.31 maxv ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS); 847 1.1 maxv if (ret == -1) 848 1.1 maxv return -1; 849 1.1 maxv 850 1.1 maxv return 0; 851 1.1 maxv } 852 1.1 maxv 853 1.1 maxv /* -------------------------------------------------------------------------- */ 854 1.1 maxv 855 1.19 maxv struct x86_emul { 856 1.33 maxv bool readreg; 857 1.33 maxv bool backprop; 858 1.19 maxv bool notouch; 859 1.37 maxv void (*func)(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 860 1.19 maxv }; 861 1.19 maxv 862 1.37 maxv static void x86_func_or(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 863 1.37 maxv static void x86_func_and(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 864 1.37 maxv static void x86_func_xchg(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 865 1.37 maxv static void x86_func_sub(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 866 1.37 maxv static void x86_func_xor(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 867 1.37 maxv static void x86_func_cmp(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 868 1.37 maxv static void x86_func_test(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 869 1.37 maxv static void x86_func_mov(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 870 1.37 maxv static void x86_func_stos(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 871 1.37 maxv static void x86_func_lods(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *); 872 1.19 maxv 873 1.19 maxv static const struct x86_emul x86_emul_or = { 874 1.33 maxv .readreg = true, 875 1.19 maxv .func = x86_func_or 876 1.19 maxv }; 877 1.19 maxv 878 1.19 maxv static const struct x86_emul x86_emul_and = { 879 1.33 maxv .readreg = true, 880 1.19 maxv .func = x86_func_and 881 1.19 maxv }; 882 1.19 maxv 883 1.33 maxv static const struct x86_emul x86_emul_xchg = { 884 1.33 maxv .readreg = true, 885 1.33 maxv .backprop = true, 886 1.33 maxv .func = x86_func_xchg 887 1.33 maxv }; 888 1.33 maxv 889 1.19 maxv static const struct x86_emul x86_emul_sub = { 890 1.33 maxv .readreg = true, 891 1.19 maxv .func = x86_func_sub 892 1.19 maxv }; 893 1.19 maxv 894 1.19 maxv static const struct x86_emul x86_emul_xor = { 895 1.33 maxv .readreg = true, 896 1.19 maxv .func = x86_func_xor 897 1.19 maxv }; 898 1.19 maxv 899 1.19 maxv static const struct x86_emul x86_emul_cmp = { 900 1.19 maxv .notouch = true, 901 1.19 maxv .func = x86_func_cmp 902 1.19 maxv }; 903 1.19 maxv 904 1.19 maxv static const struct x86_emul x86_emul_test = { 905 1.19 maxv .notouch = true, 906 1.19 maxv .func = x86_func_test 907 1.19 maxv }; 908 1.19 maxv 909 1.19 maxv static const struct x86_emul x86_emul_mov = { 910 1.19 maxv .func = x86_func_mov 911 1.19 maxv }; 912 1.19 maxv 913 1.19 maxv static const struct x86_emul x86_emul_stos = { 914 1.19 maxv .func = x86_func_stos 915 1.19 maxv }; 916 1.19 maxv 917 1.19 maxv static const struct x86_emul x86_emul_lods = { 918 1.19 maxv .func = x86_func_lods 919 1.19 maxv }; 920 1.19 maxv 921 1.13 maxv /* Legacy prefixes. */ 922 1.13 maxv #define LEG_LOCK 0xF0 923 1.13 maxv #define LEG_REPN 0xF2 924 1.13 maxv #define LEG_REP 0xF3 925 1.13 maxv #define LEG_OVR_CS 0x2E 926 1.13 maxv #define LEG_OVR_SS 0x36 927 1.13 maxv #define LEG_OVR_DS 0x3E 928 1.13 maxv #define LEG_OVR_ES 0x26 929 1.13 maxv #define LEG_OVR_FS 0x64 930 1.13 maxv #define LEG_OVR_GS 0x65 931 1.13 maxv #define LEG_OPR_OVR 0x66 932 1.13 maxv #define LEG_ADR_OVR 0x67 933 1.13 maxv 934 1.13 maxv struct x86_legpref { 935 1.13 maxv bool opr_ovr:1; 936 1.13 maxv bool adr_ovr:1; 937 1.13 maxv bool rep:1; 938 1.13 maxv bool repn:1; 939 1.43 reinoud bool repe:1; 940 1.27 maxv int8_t seg; 941 1.5 maxv }; 942 1.5 maxv 943 1.5 maxv struct x86_rexpref { 944 1.27 maxv bool b:1; 945 1.27 maxv bool x:1; 946 1.27 maxv bool r:1; 947 1.27 maxv bool w:1; 948 1.27 maxv bool present:1; 949 1.5 maxv }; 950 1.5 maxv 951 1.5 maxv struct x86_reg { 952 1.5 maxv int num; /* NVMM GPR state index */ 953 1.5 maxv uint64_t mask; 954 1.5 maxv }; 955 1.5 maxv 956 1.32 maxv struct x86_dualreg { 957 1.32 maxv int reg1; 958 1.32 maxv int reg2; 959 1.32 maxv }; 960 1.32 maxv 961 1.5 maxv enum x86_disp_type { 962 1.5 maxv DISP_NONE, 963 1.5 maxv DISP_0, 964 1.5 maxv DISP_1, 965 1.32 maxv DISP_2, 966 1.5 maxv DISP_4 967 1.5 maxv }; 968 1.5 maxv 969 1.5 maxv struct x86_disp { 970 1.5 maxv enum x86_disp_type type; 971 1.11 maxv uint64_t data; /* 4 bytes, but can be sign-extended */ 972 1.5 maxv }; 973 1.5 maxv 974 1.5 maxv struct x86_regmodrm { 975 1.27 maxv uint8_t mod:2; 976 1.27 maxv uint8_t reg:3; 977 1.27 maxv uint8_t rm:3; 978 1.5 maxv }; 979 1.5 maxv 980 1.5 maxv struct x86_immediate { 981 1.11 maxv uint64_t data; 982 1.5 maxv }; 983 1.5 maxv 984 1.5 maxv struct x86_sib { 985 1.5 maxv uint8_t scale; 986 1.5 maxv const struct x86_reg *idx; 987 1.5 maxv const struct x86_reg *bas; 988 1.5 maxv }; 989 1.5 maxv 990 1.5 maxv enum x86_store_type { 991 1.5 maxv STORE_NONE, 992 1.5 maxv STORE_REG, 993 1.32 maxv STORE_DUALREG, 994 1.5 maxv STORE_IMM, 995 1.5 maxv STORE_SIB, 996 1.5 maxv STORE_DMO 997 1.5 maxv }; 998 1.5 maxv 999 1.5 maxv struct x86_store { 1000 1.5 maxv enum x86_store_type type; 1001 1.5 maxv union { 1002 1.5 maxv const struct x86_reg *reg; 1003 1.32 maxv struct x86_dualreg dualreg; 1004 1.5 maxv struct x86_immediate imm; 1005 1.5 maxv struct x86_sib sib; 1006 1.5 maxv uint64_t dmo; 1007 1.5 maxv } u; 1008 1.5 maxv struct x86_disp disp; 1009 1.6 maxv int hardseg; 1010 1.5 maxv }; 1011 1.5 maxv 1012 1.5 maxv struct x86_instr { 1013 1.27 maxv uint8_t len; 1014 1.13 maxv struct x86_legpref legpref; 1015 1.5 maxv struct x86_rexpref rexpref; 1016 1.27 maxv struct x86_regmodrm regmodrm; 1017 1.27 maxv uint8_t operand_size; 1018 1.27 maxv uint8_t address_size; 1019 1.10 maxv uint64_t zeroextend_mask; 1020 1.5 maxv 1021 1.5 maxv const struct x86_opcode *opcode; 1022 1.27 maxv const struct x86_emul *emul; 1023 1.5 maxv 1024 1.5 maxv struct x86_store src; 1025 1.5 maxv struct x86_store dst; 1026 1.5 maxv struct x86_store *strm; 1027 1.5 maxv }; 1028 1.5 maxv 1029 1.5 maxv struct x86_decode_fsm { 1030 1.5 maxv /* vcpu */ 1031 1.5 maxv bool is64bit; 1032 1.5 maxv bool is32bit; 1033 1.5 maxv bool is16bit; 1034 1.5 maxv 1035 1.5 maxv /* fsm */ 1036 1.5 maxv int (*fn)(struct x86_decode_fsm *, struct x86_instr *); 1037 1.5 maxv uint8_t *buf; 1038 1.5 maxv uint8_t *end; 1039 1.5 maxv }; 1040 1.5 maxv 1041 1.5 maxv struct x86_opcode { 1042 1.27 maxv bool valid:1; 1043 1.27 maxv bool regmodrm:1; 1044 1.27 maxv bool regtorm:1; 1045 1.27 maxv bool dmo:1; 1046 1.27 maxv bool todmo:1; 1047 1.27 maxv bool movs:1; 1048 1.43 reinoud bool cmps:1; 1049 1.27 maxv bool stos:1; 1050 1.27 maxv bool lods:1; 1051 1.27 maxv bool szoverride:1; 1052 1.27 maxv bool group1:1; 1053 1.27 maxv bool group3:1; 1054 1.27 maxv bool group11:1; 1055 1.27 maxv bool immediate:1; 1056 1.27 maxv uint8_t defsize; 1057 1.27 maxv uint8_t flags; 1058 1.19 maxv const struct x86_emul *emul; 1059 1.5 maxv }; 1060 1.5 maxv 1061 1.5 maxv struct x86_group_entry { 1062 1.19 maxv const struct x86_emul *emul; 1063 1.5 maxv }; 1064 1.5 maxv 1065 1.5 maxv #define OPSIZE_BYTE 0x01 1066 1.5 maxv #define OPSIZE_WORD 0x02 /* 2 bytes */ 1067 1.5 maxv #define OPSIZE_DOUB 0x04 /* 4 bytes */ 1068 1.5 maxv #define OPSIZE_QUAD 0x08 /* 8 bytes */ 1069 1.5 maxv 1070 1.11 maxv #define FLAG_imm8 0x01 1071 1.11 maxv #define FLAG_immz 0x02 1072 1.11 maxv #define FLAG_ze 0x04 1073 1.11 maxv 1074 1.27 maxv static const struct x86_group_entry group1[8] __cacheline_aligned = { 1075 1.19 maxv [1] = { .emul = &x86_emul_or }, 1076 1.19 maxv [4] = { .emul = &x86_emul_and }, 1077 1.19 maxv [6] = { .emul = &x86_emul_xor }, 1078 1.19 maxv [7] = { .emul = &x86_emul_cmp } 1079 1.19 maxv }; 1080 1.19 maxv 1081 1.27 maxv static const struct x86_group_entry group3[8] __cacheline_aligned = { 1082 1.19 maxv [0] = { .emul = &x86_emul_test }, 1083 1.19 maxv [1] = { .emul = &x86_emul_test } 1084 1.11 maxv }; 1085 1.5 maxv 1086 1.27 maxv static const struct x86_group_entry group11[8] __cacheline_aligned = { 1087 1.19 maxv [0] = { .emul = &x86_emul_mov } 1088 1.5 maxv }; 1089 1.5 maxv 1090 1.27 maxv static const struct x86_opcode primary_opcode_table[256] __cacheline_aligned = { 1091 1.5 maxv /* 1092 1.11 maxv * Group1 1093 1.11 maxv */ 1094 1.27 maxv [0x80] = { 1095 1.19 maxv /* Eb, Ib */ 1096 1.27 maxv .valid = true, 1097 1.19 maxv .regmodrm = true, 1098 1.19 maxv .regtorm = true, 1099 1.19 maxv .szoverride = false, 1100 1.19 maxv .defsize = OPSIZE_BYTE, 1101 1.19 maxv .group1 = true, 1102 1.19 maxv .immediate = true, 1103 1.19 maxv .emul = NULL /* group1 */ 1104 1.19 maxv }, 1105 1.27 maxv [0x81] = { 1106 1.15 maxv /* Ev, Iz */ 1107 1.27 maxv .valid = true, 1108 1.15 maxv .regmodrm = true, 1109 1.15 maxv .regtorm = true, 1110 1.15 maxv .szoverride = true, 1111 1.15 maxv .defsize = -1, 1112 1.15 maxv .group1 = true, 1113 1.15 maxv .immediate = true, 1114 1.15 maxv .flags = FLAG_immz, 1115 1.15 maxv .emul = NULL /* group1 */ 1116 1.15 maxv }, 1117 1.27 maxv [0x83] = { 1118 1.11 maxv /* Ev, Ib */ 1119 1.27 maxv .valid = true, 1120 1.11 maxv .regmodrm = true, 1121 1.11 maxv .regtorm = true, 1122 1.11 maxv .szoverride = true, 1123 1.11 maxv .defsize = -1, 1124 1.11 maxv .group1 = true, 1125 1.11 maxv .immediate = true, 1126 1.11 maxv .flags = FLAG_imm8, 1127 1.11 maxv .emul = NULL /* group1 */ 1128 1.11 maxv }, 1129 1.11 maxv 1130 1.11 maxv /* 1131 1.19 maxv * Group3 1132 1.19 maxv */ 1133 1.27 maxv [0xF6] = { 1134 1.19 maxv /* Eb, Ib */ 1135 1.27 maxv .valid = true, 1136 1.19 maxv .regmodrm = true, 1137 1.19 maxv .regtorm = true, 1138 1.19 maxv .szoverride = false, 1139 1.19 maxv .defsize = OPSIZE_BYTE, 1140 1.19 maxv .group3 = true, 1141 1.19 maxv .immediate = true, 1142 1.19 maxv .emul = NULL /* group3 */ 1143 1.19 maxv }, 1144 1.27 maxv [0xF7] = { 1145 1.19 maxv /* Ev, Iz */ 1146 1.27 maxv .valid = true, 1147 1.19 maxv .regmodrm = true, 1148 1.19 maxv .regtorm = true, 1149 1.19 maxv .szoverride = true, 1150 1.19 maxv .defsize = -1, 1151 1.19 maxv .group3 = true, 1152 1.19 maxv .immediate = true, 1153 1.19 maxv .flags = FLAG_immz, 1154 1.19 maxv .emul = NULL /* group3 */ 1155 1.19 maxv }, 1156 1.19 maxv 1157 1.19 maxv /* 1158 1.5 maxv * Group11 1159 1.5 maxv */ 1160 1.27 maxv [0xC6] = { 1161 1.11 maxv /* Eb, Ib */ 1162 1.27 maxv .valid = true, 1163 1.5 maxv .regmodrm = true, 1164 1.5 maxv .regtorm = true, 1165 1.5 maxv .szoverride = false, 1166 1.5 maxv .defsize = OPSIZE_BYTE, 1167 1.5 maxv .group11 = true, 1168 1.5 maxv .immediate = true, 1169 1.5 maxv .emul = NULL /* group11 */ 1170 1.5 maxv }, 1171 1.27 maxv [0xC7] = { 1172 1.11 maxv /* Ev, Iz */ 1173 1.27 maxv .valid = true, 1174 1.5 maxv .regmodrm = true, 1175 1.5 maxv .regtorm = true, 1176 1.5 maxv .szoverride = true, 1177 1.5 maxv .defsize = -1, 1178 1.5 maxv .group11 = true, 1179 1.5 maxv .immediate = true, 1180 1.11 maxv .flags = FLAG_immz, 1181 1.5 maxv .emul = NULL /* group11 */ 1182 1.5 maxv }, 1183 1.5 maxv 1184 1.5 maxv /* 1185 1.5 maxv * OR 1186 1.5 maxv */ 1187 1.27 maxv [0x08] = { 1188 1.5 maxv /* Eb, Gb */ 1189 1.27 maxv .valid = true, 1190 1.5 maxv .regmodrm = true, 1191 1.5 maxv .regtorm = true, 1192 1.5 maxv .szoverride = false, 1193 1.5 maxv .defsize = OPSIZE_BYTE, 1194 1.19 maxv .emul = &x86_emul_or 1195 1.5 maxv }, 1196 1.27 maxv [0x09] = { 1197 1.5 maxv /* Ev, Gv */ 1198 1.27 maxv .valid = true, 1199 1.5 maxv .regmodrm = true, 1200 1.5 maxv .regtorm = true, 1201 1.5 maxv .szoverride = true, 1202 1.5 maxv .defsize = -1, 1203 1.19 maxv .emul = &x86_emul_or 1204 1.5 maxv }, 1205 1.27 maxv [0x0A] = { 1206 1.5 maxv /* Gb, Eb */ 1207 1.27 maxv .valid = true, 1208 1.5 maxv .regmodrm = true, 1209 1.5 maxv .regtorm = false, 1210 1.5 maxv .szoverride = false, 1211 1.5 maxv .defsize = OPSIZE_BYTE, 1212 1.19 maxv .emul = &x86_emul_or 1213 1.5 maxv }, 1214 1.27 maxv [0x0B] = { 1215 1.5 maxv /* Gv, Ev */ 1216 1.27 maxv .valid = true, 1217 1.5 maxv .regmodrm = true, 1218 1.5 maxv .regtorm = false, 1219 1.5 maxv .szoverride = true, 1220 1.5 maxv .defsize = -1, 1221 1.19 maxv .emul = &x86_emul_or 1222 1.5 maxv }, 1223 1.5 maxv 1224 1.5 maxv /* 1225 1.5 maxv * AND 1226 1.5 maxv */ 1227 1.27 maxv [0x20] = { 1228 1.5 maxv /* Eb, Gb */ 1229 1.27 maxv .valid = true, 1230 1.5 maxv .regmodrm = true, 1231 1.5 maxv .regtorm = true, 1232 1.5 maxv .szoverride = false, 1233 1.5 maxv .defsize = OPSIZE_BYTE, 1234 1.19 maxv .emul = &x86_emul_and 1235 1.5 maxv }, 1236 1.27 maxv [0x21] = { 1237 1.5 maxv /* Ev, Gv */ 1238 1.27 maxv .valid = true, 1239 1.5 maxv .regmodrm = true, 1240 1.5 maxv .regtorm = true, 1241 1.5 maxv .szoverride = true, 1242 1.5 maxv .defsize = -1, 1243 1.19 maxv .emul = &x86_emul_and 1244 1.5 maxv }, 1245 1.27 maxv [0x22] = { 1246 1.5 maxv /* Gb, Eb */ 1247 1.27 maxv .valid = true, 1248 1.5 maxv .regmodrm = true, 1249 1.5 maxv .regtorm = false, 1250 1.5 maxv .szoverride = false, 1251 1.5 maxv .defsize = OPSIZE_BYTE, 1252 1.19 maxv .emul = &x86_emul_and 1253 1.5 maxv }, 1254 1.27 maxv [0x23] = { 1255 1.5 maxv /* Gv, Ev */ 1256 1.27 maxv .valid = true, 1257 1.5 maxv .regmodrm = true, 1258 1.5 maxv .regtorm = false, 1259 1.5 maxv .szoverride = true, 1260 1.5 maxv .defsize = -1, 1261 1.19 maxv .emul = &x86_emul_and 1262 1.19 maxv }, 1263 1.19 maxv 1264 1.19 maxv /* 1265 1.19 maxv * SUB 1266 1.19 maxv */ 1267 1.27 maxv [0x28] = { 1268 1.19 maxv /* Eb, Gb */ 1269 1.27 maxv .valid = true, 1270 1.19 maxv .regmodrm = true, 1271 1.19 maxv .regtorm = true, 1272 1.19 maxv .szoverride = false, 1273 1.19 maxv .defsize = OPSIZE_BYTE, 1274 1.19 maxv .emul = &x86_emul_sub 1275 1.19 maxv }, 1276 1.27 maxv [0x29] = { 1277 1.19 maxv /* Ev, Gv */ 1278 1.27 maxv .valid = true, 1279 1.19 maxv .regmodrm = true, 1280 1.19 maxv .regtorm = true, 1281 1.19 maxv .szoverride = true, 1282 1.19 maxv .defsize = -1, 1283 1.19 maxv .emul = &x86_emul_sub 1284 1.19 maxv }, 1285 1.27 maxv [0x2A] = { 1286 1.19 maxv /* Gb, Eb */ 1287 1.27 maxv .valid = true, 1288 1.19 maxv .regmodrm = true, 1289 1.19 maxv .regtorm = false, 1290 1.19 maxv .szoverride = false, 1291 1.19 maxv .defsize = OPSIZE_BYTE, 1292 1.19 maxv .emul = &x86_emul_sub 1293 1.19 maxv }, 1294 1.27 maxv [0x2B] = { 1295 1.19 maxv /* Gv, Ev */ 1296 1.27 maxv .valid = true, 1297 1.19 maxv .regmodrm = true, 1298 1.19 maxv .regtorm = false, 1299 1.19 maxv .szoverride = true, 1300 1.19 maxv .defsize = -1, 1301 1.19 maxv .emul = &x86_emul_sub 1302 1.5 maxv }, 1303 1.5 maxv 1304 1.5 maxv /* 1305 1.5 maxv * XOR 1306 1.5 maxv */ 1307 1.27 maxv [0x30] = { 1308 1.5 maxv /* Eb, Gb */ 1309 1.27 maxv .valid = true, 1310 1.5 maxv .regmodrm = true, 1311 1.5 maxv .regtorm = true, 1312 1.5 maxv .szoverride = false, 1313 1.5 maxv .defsize = OPSIZE_BYTE, 1314 1.19 maxv .emul = &x86_emul_xor 1315 1.5 maxv }, 1316 1.27 maxv [0x31] = { 1317 1.5 maxv /* Ev, Gv */ 1318 1.27 maxv .valid = true, 1319 1.5 maxv .regmodrm = true, 1320 1.5 maxv .regtorm = true, 1321 1.5 maxv .szoverride = true, 1322 1.5 maxv .defsize = -1, 1323 1.19 maxv .emul = &x86_emul_xor 1324 1.5 maxv }, 1325 1.27 maxv [0x32] = { 1326 1.5 maxv /* Gb, Eb */ 1327 1.27 maxv .valid = true, 1328 1.5 maxv .regmodrm = true, 1329 1.5 maxv .regtorm = false, 1330 1.5 maxv .szoverride = false, 1331 1.5 maxv .defsize = OPSIZE_BYTE, 1332 1.19 maxv .emul = &x86_emul_xor 1333 1.5 maxv }, 1334 1.27 maxv [0x33] = { 1335 1.5 maxv /* Gv, Ev */ 1336 1.27 maxv .valid = true, 1337 1.5 maxv .regmodrm = true, 1338 1.5 maxv .regtorm = false, 1339 1.5 maxv .szoverride = true, 1340 1.5 maxv .defsize = -1, 1341 1.19 maxv .emul = &x86_emul_xor 1342 1.5 maxv }, 1343 1.5 maxv 1344 1.5 maxv /* 1345 1.33 maxv * XCHG 1346 1.33 maxv */ 1347 1.33 maxv [0x86] = { 1348 1.33 maxv /* Eb, Gb */ 1349 1.33 maxv .valid = true, 1350 1.33 maxv .regmodrm = true, 1351 1.33 maxv .regtorm = true, 1352 1.33 maxv .szoverride = false, 1353 1.33 maxv .defsize = OPSIZE_BYTE, 1354 1.33 maxv .emul = &x86_emul_xchg 1355 1.33 maxv }, 1356 1.33 maxv [0x87] = { 1357 1.33 maxv /* Ev, Gv */ 1358 1.33 maxv .valid = true, 1359 1.33 maxv .regmodrm = true, 1360 1.33 maxv .regtorm = true, 1361 1.33 maxv .szoverride = true, 1362 1.33 maxv .defsize = -1, 1363 1.33 maxv .emul = &x86_emul_xchg 1364 1.33 maxv }, 1365 1.33 maxv 1366 1.33 maxv /* 1367 1.5 maxv * MOV 1368 1.5 maxv */ 1369 1.27 maxv [0x88] = { 1370 1.5 maxv /* Eb, Gb */ 1371 1.27 maxv .valid = true, 1372 1.5 maxv .regmodrm = true, 1373 1.5 maxv .regtorm = true, 1374 1.5 maxv .szoverride = false, 1375 1.5 maxv .defsize = OPSIZE_BYTE, 1376 1.19 maxv .emul = &x86_emul_mov 1377 1.5 maxv }, 1378 1.27 maxv [0x89] = { 1379 1.5 maxv /* Ev, Gv */ 1380 1.27 maxv .valid = true, 1381 1.5 maxv .regmodrm = true, 1382 1.5 maxv .regtorm = true, 1383 1.5 maxv .szoverride = true, 1384 1.5 maxv .defsize = -1, 1385 1.19 maxv .emul = &x86_emul_mov 1386 1.5 maxv }, 1387 1.27 maxv [0x8A] = { 1388 1.5 maxv /* Gb, Eb */ 1389 1.27 maxv .valid = true, 1390 1.5 maxv .regmodrm = true, 1391 1.5 maxv .regtorm = false, 1392 1.5 maxv .szoverride = false, 1393 1.5 maxv .defsize = OPSIZE_BYTE, 1394 1.19 maxv .emul = &x86_emul_mov 1395 1.5 maxv }, 1396 1.27 maxv [0x8B] = { 1397 1.5 maxv /* Gv, Ev */ 1398 1.27 maxv .valid = true, 1399 1.5 maxv .regmodrm = true, 1400 1.5 maxv .regtorm = false, 1401 1.5 maxv .szoverride = true, 1402 1.5 maxv .defsize = -1, 1403 1.19 maxv .emul = &x86_emul_mov 1404 1.5 maxv }, 1405 1.27 maxv [0xA0] = { 1406 1.5 maxv /* AL, Ob */ 1407 1.27 maxv .valid = true, 1408 1.5 maxv .dmo = true, 1409 1.5 maxv .todmo = false, 1410 1.5 maxv .szoverride = false, 1411 1.5 maxv .defsize = OPSIZE_BYTE, 1412 1.19 maxv .emul = &x86_emul_mov 1413 1.5 maxv }, 1414 1.27 maxv [0xA1] = { 1415 1.5 maxv /* rAX, Ov */ 1416 1.27 maxv .valid = true, 1417 1.5 maxv .dmo = true, 1418 1.5 maxv .todmo = false, 1419 1.5 maxv .szoverride = true, 1420 1.5 maxv .defsize = -1, 1421 1.19 maxv .emul = &x86_emul_mov 1422 1.5 maxv }, 1423 1.27 maxv [0xA2] = { 1424 1.5 maxv /* Ob, AL */ 1425 1.27 maxv .valid = true, 1426 1.5 maxv .dmo = true, 1427 1.5 maxv .todmo = true, 1428 1.5 maxv .szoverride = false, 1429 1.5 maxv .defsize = OPSIZE_BYTE, 1430 1.19 maxv .emul = &x86_emul_mov 1431 1.5 maxv }, 1432 1.27 maxv [0xA3] = { 1433 1.5 maxv /* Ov, rAX */ 1434 1.27 maxv .valid = true, 1435 1.5 maxv .dmo = true, 1436 1.5 maxv .todmo = true, 1437 1.5 maxv .szoverride = true, 1438 1.5 maxv .defsize = -1, 1439 1.19 maxv .emul = &x86_emul_mov 1440 1.5 maxv }, 1441 1.5 maxv 1442 1.5 maxv /* 1443 1.6 maxv * MOVS 1444 1.6 maxv */ 1445 1.27 maxv [0xA4] = { 1446 1.6 maxv /* Yb, Xb */ 1447 1.27 maxv .valid = true, 1448 1.6 maxv .movs = true, 1449 1.6 maxv .szoverride = false, 1450 1.6 maxv .defsize = OPSIZE_BYTE, 1451 1.43 reinoud .emul = NULL 1452 1.6 maxv }, 1453 1.27 maxv [0xA5] = { 1454 1.6 maxv /* Yv, Xv */ 1455 1.27 maxv .valid = true, 1456 1.6 maxv .movs = true, 1457 1.6 maxv .szoverride = true, 1458 1.6 maxv .defsize = -1, 1459 1.43 reinoud .emul = NULL 1460 1.43 reinoud }, 1461 1.43 reinoud 1462 1.43 reinoud /* 1463 1.43 reinoud * CMPS 1464 1.43 reinoud */ 1465 1.43 reinoud [0xA6] = { 1466 1.43 reinoud /* Yb, Xb */ 1467 1.43 reinoud .valid = true, 1468 1.43 reinoud .cmps = true, 1469 1.43 reinoud .szoverride = false, 1470 1.43 reinoud .defsize = OPSIZE_BYTE, 1471 1.43 reinoud .emul = NULL 1472 1.43 reinoud }, 1473 1.43 reinoud [0xA7] = { 1474 1.43 reinoud /* Yv, Xv */ 1475 1.43 reinoud .valid = true, 1476 1.43 reinoud .cmps = true, 1477 1.43 reinoud .szoverride = true, 1478 1.43 reinoud .defsize = -1, 1479 1.43 reinoud .emul = NULL 1480 1.6 maxv }, 1481 1.6 maxv 1482 1.6 maxv /* 1483 1.5 maxv * STOS 1484 1.5 maxv */ 1485 1.27 maxv [0xAA] = { 1486 1.5 maxv /* Yb, AL */ 1487 1.27 maxv .valid = true, 1488 1.5 maxv .stos = true, 1489 1.5 maxv .szoverride = false, 1490 1.5 maxv .defsize = OPSIZE_BYTE, 1491 1.19 maxv .emul = &x86_emul_stos 1492 1.5 maxv }, 1493 1.27 maxv [0xAB] = { 1494 1.5 maxv /* Yv, rAX */ 1495 1.27 maxv .valid = true, 1496 1.5 maxv .stos = true, 1497 1.5 maxv .szoverride = true, 1498 1.5 maxv .defsize = -1, 1499 1.19 maxv .emul = &x86_emul_stos 1500 1.5 maxv }, 1501 1.5 maxv 1502 1.5 maxv /* 1503 1.5 maxv * LODS 1504 1.5 maxv */ 1505 1.27 maxv [0xAC] = { 1506 1.5 maxv /* AL, Xb */ 1507 1.27 maxv .valid = true, 1508 1.5 maxv .lods = true, 1509 1.5 maxv .szoverride = false, 1510 1.5 maxv .defsize = OPSIZE_BYTE, 1511 1.19 maxv .emul = &x86_emul_lods 1512 1.5 maxv }, 1513 1.27 maxv [0xAD] = { 1514 1.5 maxv /* rAX, Xv */ 1515 1.27 maxv .valid = true, 1516 1.5 maxv .lods = true, 1517 1.5 maxv .szoverride = true, 1518 1.5 maxv .defsize = -1, 1519 1.19 maxv .emul = &x86_emul_lods 1520 1.5 maxv }, 1521 1.5 maxv }; 1522 1.5 maxv 1523 1.27 maxv static const struct x86_opcode secondary_opcode_table[256] __cacheline_aligned = { 1524 1.10 maxv /* 1525 1.10 maxv * MOVZX 1526 1.10 maxv */ 1527 1.27 maxv [0xB6] = { 1528 1.10 maxv /* Gv, Eb */ 1529 1.27 maxv .valid = true, 1530 1.10 maxv .regmodrm = true, 1531 1.10 maxv .regtorm = false, 1532 1.10 maxv .szoverride = true, 1533 1.10 maxv .defsize = OPSIZE_BYTE, 1534 1.11 maxv .flags = FLAG_ze, 1535 1.19 maxv .emul = &x86_emul_mov 1536 1.10 maxv }, 1537 1.27 maxv [0xB7] = { 1538 1.10 maxv /* Gv, Ew */ 1539 1.27 maxv .valid = true, 1540 1.10 maxv .regmodrm = true, 1541 1.10 maxv .regtorm = false, 1542 1.10 maxv .szoverride = true, 1543 1.10 maxv .defsize = OPSIZE_WORD, 1544 1.11 maxv .flags = FLAG_ze, 1545 1.19 maxv .emul = &x86_emul_mov 1546 1.10 maxv }, 1547 1.10 maxv }; 1548 1.10 maxv 1549 1.5 maxv static const struct x86_reg gpr_map__rip = { NVMM_X64_GPR_RIP, 0xFFFFFFFFFFFFFFFF }; 1550 1.5 maxv 1551 1.5 maxv /* [REX-present][enc][opsize] */ 1552 1.27 maxv static const struct x86_reg gpr_map__special[2][4][8] __cacheline_aligned = { 1553 1.5 maxv [false] = { 1554 1.5 maxv /* No REX prefix. */ 1555 1.5 maxv [0b00] = { 1556 1.5 maxv [0] = { NVMM_X64_GPR_RAX, 0x000000000000FF00 }, /* AH */ 1557 1.5 maxv [1] = { NVMM_X64_GPR_RSP, 0x000000000000FFFF }, /* SP */ 1558 1.5 maxv [2] = { -1, 0 }, 1559 1.5 maxv [3] = { NVMM_X64_GPR_RSP, 0x00000000FFFFFFFF }, /* ESP */ 1560 1.5 maxv [4] = { -1, 0 }, 1561 1.5 maxv [5] = { -1, 0 }, 1562 1.5 maxv [6] = { -1, 0 }, 1563 1.5 maxv [7] = { -1, 0 }, 1564 1.5 maxv }, 1565 1.5 maxv [0b01] = { 1566 1.5 maxv [0] = { NVMM_X64_GPR_RCX, 0x000000000000FF00 }, /* CH */ 1567 1.5 maxv [1] = { NVMM_X64_GPR_RBP, 0x000000000000FFFF }, /* BP */ 1568 1.5 maxv [2] = { -1, 0 }, 1569 1.5 maxv [3] = { NVMM_X64_GPR_RBP, 0x00000000FFFFFFFF }, /* EBP */ 1570 1.5 maxv [4] = { -1, 0 }, 1571 1.5 maxv [5] = { -1, 0 }, 1572 1.5 maxv [6] = { -1, 0 }, 1573 1.5 maxv [7] = { -1, 0 }, 1574 1.5 maxv }, 1575 1.5 maxv [0b10] = { 1576 1.5 maxv [0] = { NVMM_X64_GPR_RDX, 0x000000000000FF00 }, /* DH */ 1577 1.5 maxv [1] = { NVMM_X64_GPR_RSI, 0x000000000000FFFF }, /* SI */ 1578 1.5 maxv [2] = { -1, 0 }, 1579 1.5 maxv [3] = { NVMM_X64_GPR_RSI, 0x00000000FFFFFFFF }, /* ESI */ 1580 1.5 maxv [4] = { -1, 0 }, 1581 1.5 maxv [5] = { -1, 0 }, 1582 1.5 maxv [6] = { -1, 0 }, 1583 1.5 maxv [7] = { -1, 0 }, 1584 1.5 maxv }, 1585 1.5 maxv [0b11] = { 1586 1.5 maxv [0] = { NVMM_X64_GPR_RBX, 0x000000000000FF00 }, /* BH */ 1587 1.5 maxv [1] = { NVMM_X64_GPR_RDI, 0x000000000000FFFF }, /* DI */ 1588 1.5 maxv [2] = { -1, 0 }, 1589 1.5 maxv [3] = { NVMM_X64_GPR_RDI, 0x00000000FFFFFFFF }, /* EDI */ 1590 1.5 maxv [4] = { -1, 0 }, 1591 1.5 maxv [5] = { -1, 0 }, 1592 1.5 maxv [6] = { -1, 0 }, 1593 1.5 maxv [7] = { -1, 0 }, 1594 1.5 maxv } 1595 1.5 maxv }, 1596 1.5 maxv [true] = { 1597 1.5 maxv /* Has REX prefix. */ 1598 1.5 maxv [0b00] = { 1599 1.5 maxv [0] = { NVMM_X64_GPR_RSP, 0x00000000000000FF }, /* SPL */ 1600 1.5 maxv [1] = { NVMM_X64_GPR_RSP, 0x000000000000FFFF }, /* SP */ 1601 1.5 maxv [2] = { -1, 0 }, 1602 1.5 maxv [3] = { NVMM_X64_GPR_RSP, 0x00000000FFFFFFFF }, /* ESP */ 1603 1.5 maxv [4] = { -1, 0 }, 1604 1.5 maxv [5] = { -1, 0 }, 1605 1.5 maxv [6] = { -1, 0 }, 1606 1.5 maxv [7] = { NVMM_X64_GPR_RSP, 0xFFFFFFFFFFFFFFFF }, /* RSP */ 1607 1.5 maxv }, 1608 1.5 maxv [0b01] = { 1609 1.5 maxv [0] = { NVMM_X64_GPR_RBP, 0x00000000000000FF }, /* BPL */ 1610 1.5 maxv [1] = { NVMM_X64_GPR_RBP, 0x000000000000FFFF }, /* BP */ 1611 1.5 maxv [2] = { -1, 0 }, 1612 1.5 maxv [3] = { NVMM_X64_GPR_RBP, 0x00000000FFFFFFFF }, /* EBP */ 1613 1.5 maxv [4] = { -1, 0 }, 1614 1.5 maxv [5] = { -1, 0 }, 1615 1.5 maxv [6] = { -1, 0 }, 1616 1.5 maxv [7] = { NVMM_X64_GPR_RBP, 0xFFFFFFFFFFFFFFFF }, /* RBP */ 1617 1.5 maxv }, 1618 1.5 maxv [0b10] = { 1619 1.5 maxv [0] = { NVMM_X64_GPR_RSI, 0x00000000000000FF }, /* SIL */ 1620 1.5 maxv [1] = { NVMM_X64_GPR_RSI, 0x000000000000FFFF }, /* SI */ 1621 1.5 maxv [2] = { -1, 0 }, 1622 1.5 maxv [3] = { NVMM_X64_GPR_RSI, 0x00000000FFFFFFFF }, /* ESI */ 1623 1.5 maxv [4] = { -1, 0 }, 1624 1.5 maxv [5] = { -1, 0 }, 1625 1.5 maxv [6] = { -1, 0 }, 1626 1.5 maxv [7] = { NVMM_X64_GPR_RSI, 0xFFFFFFFFFFFFFFFF }, /* RSI */ 1627 1.5 maxv }, 1628 1.5 maxv [0b11] = { 1629 1.5 maxv [0] = { NVMM_X64_GPR_RDI, 0x00000000000000FF }, /* DIL */ 1630 1.5 maxv [1] = { NVMM_X64_GPR_RDI, 0x000000000000FFFF }, /* DI */ 1631 1.5 maxv [2] = { -1, 0 }, 1632 1.5 maxv [3] = { NVMM_X64_GPR_RDI, 0x00000000FFFFFFFF }, /* EDI */ 1633 1.5 maxv [4] = { -1, 0 }, 1634 1.5 maxv [5] = { -1, 0 }, 1635 1.5 maxv [6] = { -1, 0 }, 1636 1.5 maxv [7] = { NVMM_X64_GPR_RDI, 0xFFFFFFFFFFFFFFFF }, /* RDI */ 1637 1.5 maxv } 1638 1.5 maxv } 1639 1.5 maxv }; 1640 1.5 maxv 1641 1.5 maxv /* [depends][enc][size] */ 1642 1.27 maxv static const struct x86_reg gpr_map[2][8][8] __cacheline_aligned = { 1643 1.5 maxv [false] = { 1644 1.5 maxv /* Not extended. */ 1645 1.5 maxv [0b000] = { 1646 1.5 maxv [0] = { NVMM_X64_GPR_RAX, 0x00000000000000FF }, /* AL */ 1647 1.5 maxv [1] = { NVMM_X64_GPR_RAX, 0x000000000000FFFF }, /* AX */ 1648 1.5 maxv [2] = { -1, 0 }, 1649 1.5 maxv [3] = { NVMM_X64_GPR_RAX, 0x00000000FFFFFFFF }, /* EAX */ 1650 1.5 maxv [4] = { -1, 0 }, 1651 1.5 maxv [5] = { -1, 0 }, 1652 1.5 maxv [6] = { -1, 0 }, 1653 1.18 maxv [7] = { NVMM_X64_GPR_RAX, 0xFFFFFFFFFFFFFFFF }, /* RAX */ 1654 1.5 maxv }, 1655 1.5 maxv [0b001] = { 1656 1.5 maxv [0] = { NVMM_X64_GPR_RCX, 0x00000000000000FF }, /* CL */ 1657 1.5 maxv [1] = { NVMM_X64_GPR_RCX, 0x000000000000FFFF }, /* CX */ 1658 1.5 maxv [2] = { -1, 0 }, 1659 1.5 maxv [3] = { NVMM_X64_GPR_RCX, 0x00000000FFFFFFFF }, /* ECX */ 1660 1.5 maxv [4] = { -1, 0 }, 1661 1.5 maxv [5] = { -1, 0 }, 1662 1.5 maxv [6] = { -1, 0 }, 1663 1.18 maxv [7] = { NVMM_X64_GPR_RCX, 0xFFFFFFFFFFFFFFFF }, /* RCX */ 1664 1.5 maxv }, 1665 1.5 maxv [0b010] = { 1666 1.5 maxv [0] = { NVMM_X64_GPR_RDX, 0x00000000000000FF }, /* DL */ 1667 1.5 maxv [1] = { NVMM_X64_GPR_RDX, 0x000000000000FFFF }, /* DX */ 1668 1.5 maxv [2] = { -1, 0 }, 1669 1.5 maxv [3] = { NVMM_X64_GPR_RDX, 0x00000000FFFFFFFF }, /* EDX */ 1670 1.5 maxv [4] = { -1, 0 }, 1671 1.5 maxv [5] = { -1, 0 }, 1672 1.5 maxv [6] = { -1, 0 }, 1673 1.18 maxv [7] = { NVMM_X64_GPR_RDX, 0xFFFFFFFFFFFFFFFF }, /* RDX */ 1674 1.5 maxv }, 1675 1.5 maxv [0b011] = { 1676 1.5 maxv [0] = { NVMM_X64_GPR_RBX, 0x00000000000000FF }, /* BL */ 1677 1.5 maxv [1] = { NVMM_X64_GPR_RBX, 0x000000000000FFFF }, /* BX */ 1678 1.5 maxv [2] = { -1, 0 }, 1679 1.5 maxv [3] = { NVMM_X64_GPR_RBX, 0x00000000FFFFFFFF }, /* EBX */ 1680 1.5 maxv [4] = { -1, 0 }, 1681 1.5 maxv [5] = { -1, 0 }, 1682 1.5 maxv [6] = { -1, 0 }, 1683 1.18 maxv [7] = { NVMM_X64_GPR_RBX, 0xFFFFFFFFFFFFFFFF }, /* RBX */ 1684 1.5 maxv }, 1685 1.5 maxv [0b100] = { 1686 1.5 maxv [0] = { -1, 0 }, /* SPECIAL */ 1687 1.5 maxv [1] = { -1, 0 }, /* SPECIAL */ 1688 1.5 maxv [2] = { -1, 0 }, 1689 1.5 maxv [3] = { -1, 0 }, /* SPECIAL */ 1690 1.5 maxv [4] = { -1, 0 }, 1691 1.5 maxv [5] = { -1, 0 }, 1692 1.5 maxv [6] = { -1, 0 }, 1693 1.5 maxv [7] = { -1, 0 }, /* SPECIAL */ 1694 1.5 maxv }, 1695 1.5 maxv [0b101] = { 1696 1.5 maxv [0] = { -1, 0 }, /* SPECIAL */ 1697 1.5 maxv [1] = { -1, 0 }, /* SPECIAL */ 1698 1.5 maxv [2] = { -1, 0 }, 1699 1.5 maxv [3] = { -1, 0 }, /* SPECIAL */ 1700 1.5 maxv [4] = { -1, 0 }, 1701 1.5 maxv [5] = { -1, 0 }, 1702 1.5 maxv [6] = { -1, 0 }, 1703 1.5 maxv [7] = { -1, 0 }, /* SPECIAL */ 1704 1.5 maxv }, 1705 1.5 maxv [0b110] = { 1706 1.5 maxv [0] = { -1, 0 }, /* SPECIAL */ 1707 1.5 maxv [1] = { -1, 0 }, /* SPECIAL */ 1708 1.5 maxv [2] = { -1, 0 }, 1709 1.5 maxv [3] = { -1, 0 }, /* SPECIAL */ 1710 1.5 maxv [4] = { -1, 0 }, 1711 1.5 maxv [5] = { -1, 0 }, 1712 1.5 maxv [6] = { -1, 0 }, 1713 1.5 maxv [7] = { -1, 0 }, /* SPECIAL */ 1714 1.5 maxv }, 1715 1.5 maxv [0b111] = { 1716 1.5 maxv [0] = { -1, 0 }, /* SPECIAL */ 1717 1.5 maxv [1] = { -1, 0 }, /* SPECIAL */ 1718 1.5 maxv [2] = { -1, 0 }, 1719 1.5 maxv [3] = { -1, 0 }, /* SPECIAL */ 1720 1.5 maxv [4] = { -1, 0 }, 1721 1.5 maxv [5] = { -1, 0 }, 1722 1.5 maxv [6] = { -1, 0 }, 1723 1.5 maxv [7] = { -1, 0 }, /* SPECIAL */ 1724 1.5 maxv }, 1725 1.5 maxv }, 1726 1.5 maxv [true] = { 1727 1.5 maxv /* Extended. */ 1728 1.5 maxv [0b000] = { 1729 1.5 maxv [0] = { NVMM_X64_GPR_R8, 0x00000000000000FF }, /* R8B */ 1730 1.5 maxv [1] = { NVMM_X64_GPR_R8, 0x000000000000FFFF }, /* R8W */ 1731 1.5 maxv [2] = { -1, 0 }, 1732 1.5 maxv [3] = { NVMM_X64_GPR_R8, 0x00000000FFFFFFFF }, /* R8D */ 1733 1.5 maxv [4] = { -1, 0 }, 1734 1.5 maxv [5] = { -1, 0 }, 1735 1.5 maxv [6] = { -1, 0 }, 1736 1.18 maxv [7] = { NVMM_X64_GPR_R8, 0xFFFFFFFFFFFFFFFF }, /* R8 */ 1737 1.5 maxv }, 1738 1.5 maxv [0b001] = { 1739 1.5 maxv [0] = { NVMM_X64_GPR_R9, 0x00000000000000FF }, /* R9B */ 1740 1.5 maxv [1] = { NVMM_X64_GPR_R9, 0x000000000000FFFF }, /* R9W */ 1741 1.5 maxv [2] = { -1, 0 }, 1742 1.5 maxv [3] = { NVMM_X64_GPR_R9, 0x00000000FFFFFFFF }, /* R9D */ 1743 1.5 maxv [4] = { -1, 0 }, 1744 1.5 maxv [5] = { -1, 0 }, 1745 1.5 maxv [6] = { -1, 0 }, 1746 1.18 maxv [7] = { NVMM_X64_GPR_R9, 0xFFFFFFFFFFFFFFFF }, /* R9 */ 1747 1.5 maxv }, 1748 1.5 maxv [0b010] = { 1749 1.5 maxv [0] = { NVMM_X64_GPR_R10, 0x00000000000000FF }, /* R10B */ 1750 1.5 maxv [1] = { NVMM_X64_GPR_R10, 0x000000000000FFFF }, /* R10W */ 1751 1.5 maxv [2] = { -1, 0 }, 1752 1.5 maxv [3] = { NVMM_X64_GPR_R10, 0x00000000FFFFFFFF }, /* R10D */ 1753 1.5 maxv [4] = { -1, 0 }, 1754 1.5 maxv [5] = { -1, 0 }, 1755 1.5 maxv [6] = { -1, 0 }, 1756 1.18 maxv [7] = { NVMM_X64_GPR_R10, 0xFFFFFFFFFFFFFFFF }, /* R10 */ 1757 1.5 maxv }, 1758 1.5 maxv [0b011] = { 1759 1.5 maxv [0] = { NVMM_X64_GPR_R11, 0x00000000000000FF }, /* R11B */ 1760 1.5 maxv [1] = { NVMM_X64_GPR_R11, 0x000000000000FFFF }, /* R11W */ 1761 1.5 maxv [2] = { -1, 0 }, 1762 1.5 maxv [3] = { NVMM_X64_GPR_R11, 0x00000000FFFFFFFF }, /* R11D */ 1763 1.5 maxv [4] = { -1, 0 }, 1764 1.5 maxv [5] = { -1, 0 }, 1765 1.5 maxv [6] = { -1, 0 }, 1766 1.18 maxv [7] = { NVMM_X64_GPR_R11, 0xFFFFFFFFFFFFFFFF }, /* R11 */ 1767 1.5 maxv }, 1768 1.5 maxv [0b100] = { 1769 1.5 maxv [0] = { NVMM_X64_GPR_R12, 0x00000000000000FF }, /* R12B */ 1770 1.5 maxv [1] = { NVMM_X64_GPR_R12, 0x000000000000FFFF }, /* R12W */ 1771 1.5 maxv [2] = { -1, 0 }, 1772 1.5 maxv [3] = { NVMM_X64_GPR_R12, 0x00000000FFFFFFFF }, /* R12D */ 1773 1.5 maxv [4] = { -1, 0 }, 1774 1.5 maxv [5] = { -1, 0 }, 1775 1.5 maxv [6] = { -1, 0 }, 1776 1.18 maxv [7] = { NVMM_X64_GPR_R12, 0xFFFFFFFFFFFFFFFF }, /* R12 */ 1777 1.5 maxv }, 1778 1.5 maxv [0b101] = { 1779 1.5 maxv [0] = { NVMM_X64_GPR_R13, 0x00000000000000FF }, /* R13B */ 1780 1.5 maxv [1] = { NVMM_X64_GPR_R13, 0x000000000000FFFF }, /* R13W */ 1781 1.5 maxv [2] = { -1, 0 }, 1782 1.5 maxv [3] = { NVMM_X64_GPR_R13, 0x00000000FFFFFFFF }, /* R13D */ 1783 1.5 maxv [4] = { -1, 0 }, 1784 1.5 maxv [5] = { -1, 0 }, 1785 1.5 maxv [6] = { -1, 0 }, 1786 1.18 maxv [7] = { NVMM_X64_GPR_R13, 0xFFFFFFFFFFFFFFFF }, /* R13 */ 1787 1.5 maxv }, 1788 1.5 maxv [0b110] = { 1789 1.5 maxv [0] = { NVMM_X64_GPR_R14, 0x00000000000000FF }, /* R14B */ 1790 1.5 maxv [1] = { NVMM_X64_GPR_R14, 0x000000000000FFFF }, /* R14W */ 1791 1.5 maxv [2] = { -1, 0 }, 1792 1.5 maxv [3] = { NVMM_X64_GPR_R14, 0x00000000FFFFFFFF }, /* R14D */ 1793 1.5 maxv [4] = { -1, 0 }, 1794 1.5 maxv [5] = { -1, 0 }, 1795 1.5 maxv [6] = { -1, 0 }, 1796 1.18 maxv [7] = { NVMM_X64_GPR_R14, 0xFFFFFFFFFFFFFFFF }, /* R14 */ 1797 1.5 maxv }, 1798 1.5 maxv [0b111] = { 1799 1.5 maxv [0] = { NVMM_X64_GPR_R15, 0x00000000000000FF }, /* R15B */ 1800 1.5 maxv [1] = { NVMM_X64_GPR_R15, 0x000000000000FFFF }, /* R15W */ 1801 1.5 maxv [2] = { -1, 0 }, 1802 1.5 maxv [3] = { NVMM_X64_GPR_R15, 0x00000000FFFFFFFF }, /* R15D */ 1803 1.5 maxv [4] = { -1, 0 }, 1804 1.5 maxv [5] = { -1, 0 }, 1805 1.5 maxv [6] = { -1, 0 }, 1806 1.18 maxv [7] = { NVMM_X64_GPR_R15, 0xFFFFFFFFFFFFFFFF }, /* R15 */ 1807 1.5 maxv }, 1808 1.5 maxv } 1809 1.5 maxv }; 1810 1.5 maxv 1811 1.32 maxv /* [enc] */ 1812 1.32 maxv static const int gpr_dual_reg1_rm[8] __cacheline_aligned = { 1813 1.32 maxv [0b000] = NVMM_X64_GPR_RBX, /* BX (+SI) */ 1814 1.32 maxv [0b001] = NVMM_X64_GPR_RBX, /* BX (+DI) */ 1815 1.32 maxv [0b010] = NVMM_X64_GPR_RBP, /* BP (+SI) */ 1816 1.32 maxv [0b011] = NVMM_X64_GPR_RBP, /* BP (+DI) */ 1817 1.32 maxv [0b100] = NVMM_X64_GPR_RSI, /* SI */ 1818 1.32 maxv [0b101] = NVMM_X64_GPR_RDI, /* DI */ 1819 1.32 maxv [0b110] = NVMM_X64_GPR_RBP, /* BP */ 1820 1.32 maxv [0b111] = NVMM_X64_GPR_RBX, /* BX */ 1821 1.32 maxv }; 1822 1.32 maxv 1823 1.5 maxv static int 1824 1.5 maxv node_overflow(struct x86_decode_fsm *fsm, struct x86_instr *instr) 1825 1.5 maxv { 1826 1.5 maxv fsm->fn = NULL; 1827 1.5 maxv return -1; 1828 1.5 maxv } 1829 1.5 maxv 1830 1.5 maxv static int 1831 1.5 maxv fsm_read(struct x86_decode_fsm *fsm, uint8_t *bytes, size_t n) 1832 1.5 maxv { 1833 1.5 maxv if (fsm->buf + n > fsm->end) { 1834 1.5 maxv return -1; 1835 1.5 maxv } 1836 1.5 maxv memcpy(bytes, fsm->buf, n); 1837 1.5 maxv return 0; 1838 1.5 maxv } 1839 1.5 maxv 1840 1.27 maxv static inline void 1841 1.5 maxv fsm_advance(struct x86_decode_fsm *fsm, size_t n, 1842 1.5 maxv int (*fn)(struct x86_decode_fsm *, struct x86_instr *)) 1843 1.5 maxv { 1844 1.5 maxv fsm->buf += n; 1845 1.5 maxv if (fsm->buf > fsm->end) { 1846 1.5 maxv fsm->fn = node_overflow; 1847 1.5 maxv } else { 1848 1.5 maxv fsm->fn = fn; 1849 1.5 maxv } 1850 1.5 maxv } 1851 1.5 maxv 1852 1.5 maxv static const struct x86_reg * 1853 1.5 maxv resolve_special_register(struct x86_instr *instr, uint8_t enc, size_t regsize) 1854 1.5 maxv { 1855 1.5 maxv enc &= 0b11; 1856 1.5 maxv if (regsize == 8) { 1857 1.5 maxv /* May be 64bit without REX */ 1858 1.5 maxv return &gpr_map__special[1][enc][regsize-1]; 1859 1.5 maxv } 1860 1.5 maxv return &gpr_map__special[instr->rexpref.present][enc][regsize-1]; 1861 1.5 maxv } 1862 1.5 maxv 1863 1.5 maxv /* 1864 1.6 maxv * Special node, for MOVS. Fake two displacements of zero on the source and 1865 1.6 maxv * destination registers. 1866 1.6 maxv */ 1867 1.6 maxv static int 1868 1.6 maxv node_movs(struct x86_decode_fsm *fsm, struct x86_instr *instr) 1869 1.6 maxv { 1870 1.6 maxv size_t adrsize; 1871 1.6 maxv 1872 1.6 maxv adrsize = instr->address_size; 1873 1.6 maxv 1874 1.6 maxv /* DS:RSI */ 1875 1.6 maxv instr->src.type = STORE_REG; 1876 1.6 maxv instr->src.u.reg = &gpr_map__special[1][2][adrsize-1]; 1877 1.6 maxv instr->src.disp.type = DISP_0; 1878 1.6 maxv 1879 1.6 maxv /* ES:RDI, force ES */ 1880 1.6 maxv instr->dst.type = STORE_REG; 1881 1.6 maxv instr->dst.u.reg = &gpr_map__special[1][3][adrsize-1]; 1882 1.6 maxv instr->dst.disp.type = DISP_0; 1883 1.6 maxv instr->dst.hardseg = NVMM_X64_SEG_ES; 1884 1.6 maxv 1885 1.6 maxv fsm_advance(fsm, 0, NULL); 1886 1.6 maxv 1887 1.6 maxv return 0; 1888 1.6 maxv } 1889 1.6 maxv 1890 1.6 maxv /* 1891 1.43 reinoud * Special node, for CMPS. Fake two displacements of zero on the source and 1892 1.43 reinoud * destination registers. 1893 1.43 reinoud * XXX coded as clone of movs as its similar in register usage 1894 1.43 reinoud * XXX might be merged with node_movs() 1895 1.43 reinoud */ 1896 1.43 reinoud static int 1897 1.43 reinoud node_cmps(struct x86_decode_fsm *fsm, struct x86_instr *instr) 1898 1.43 reinoud { 1899 1.43 reinoud size_t adrsize; 1900 1.43 reinoud 1901 1.43 reinoud adrsize = instr->address_size; 1902 1.43 reinoud 1903 1.43 reinoud /* DS:RSI */ 1904 1.43 reinoud instr->src.type = STORE_REG; 1905 1.43 reinoud instr->src.u.reg = &gpr_map__special[1][2][adrsize-1]; 1906 1.43 reinoud instr->src.disp.type = DISP_0; 1907 1.43 reinoud 1908 1.43 reinoud /* ES:RDI, force ES */ 1909 1.43 reinoud instr->dst.type = STORE_REG; 1910 1.43 reinoud instr->dst.u.reg = &gpr_map__special[1][3][adrsize-1]; 1911 1.43 reinoud instr->dst.disp.type = DISP_0; 1912 1.43 reinoud instr->dst.hardseg = NVMM_X64_SEG_ES; 1913 1.43 reinoud 1914 1.43 reinoud fsm_advance(fsm, 0, NULL); 1915 1.43 reinoud 1916 1.43 reinoud return 0; 1917 1.43 reinoud } 1918 1.43 reinoud 1919 1.43 reinoud /* 1920 1.5 maxv * Special node, for STOS and LODS. Fake a displacement of zero on the 1921 1.5 maxv * destination register. 1922 1.5 maxv */ 1923 1.5 maxv static int 1924 1.5 maxv node_stlo(struct x86_decode_fsm *fsm, struct x86_instr *instr) 1925 1.5 maxv { 1926 1.5 maxv const struct x86_opcode *opcode = instr->opcode; 1927 1.5 maxv struct x86_store *stlo, *streg; 1928 1.5 maxv size_t adrsize, regsize; 1929 1.5 maxv 1930 1.5 maxv adrsize = instr->address_size; 1931 1.5 maxv regsize = instr->operand_size; 1932 1.5 maxv 1933 1.5 maxv if (opcode->stos) { 1934 1.5 maxv streg = &instr->src; 1935 1.5 maxv stlo = &instr->dst; 1936 1.5 maxv } else { 1937 1.5 maxv streg = &instr->dst; 1938 1.5 maxv stlo = &instr->src; 1939 1.5 maxv } 1940 1.5 maxv 1941 1.5 maxv streg->type = STORE_REG; 1942 1.5 maxv streg->u.reg = &gpr_map[0][0][regsize-1]; /* ?AX */ 1943 1.5 maxv 1944 1.5 maxv stlo->type = STORE_REG; 1945 1.5 maxv if (opcode->stos) { 1946 1.5 maxv /* ES:RDI, force ES */ 1947 1.5 maxv stlo->u.reg = &gpr_map__special[1][3][adrsize-1]; 1948 1.6 maxv stlo->hardseg = NVMM_X64_SEG_ES; 1949 1.5 maxv } else { 1950 1.5 maxv /* DS:RSI */ 1951 1.5 maxv stlo->u.reg = &gpr_map__special[1][2][adrsize-1]; 1952 1.5 maxv } 1953 1.5 maxv stlo->disp.type = DISP_0; 1954 1.5 maxv 1955 1.5 maxv fsm_advance(fsm, 0, NULL); 1956 1.5 maxv 1957 1.5 maxv return 0; 1958 1.5 maxv } 1959 1.5 maxv 1960 1.5 maxv static int 1961 1.5 maxv node_dmo(struct x86_decode_fsm *fsm, struct x86_instr *instr) 1962 1.5 maxv { 1963 1.5 maxv const struct x86_opcode *opcode = instr->opcode; 1964 1.5 maxv struct x86_store *stdmo, *streg; 1965 1.5 maxv size_t adrsize, regsize; 1966 1.5 maxv 1967 1.5 maxv adrsize = instr->address_size; 1968 1.5 maxv regsize = instr->operand_size; 1969 1.5 maxv 1970 1.5 maxv if (opcode->todmo) { 1971 1.5 maxv streg = &instr->src; 1972 1.5 maxv stdmo = &instr->dst; 1973 1.5 maxv } else { 1974 1.5 maxv streg = &instr->dst; 1975 1.5 maxv stdmo = &instr->src; 1976 1.5 maxv } 1977 1.5 maxv 1978 1.5 maxv streg->type = STORE_REG; 1979 1.5 maxv streg->u.reg = &gpr_map[0][0][regsize-1]; /* ?AX */ 1980 1.5 maxv 1981 1.5 maxv stdmo->type = STORE_DMO; 1982 1.5 maxv if (fsm_read(fsm, (uint8_t *)&stdmo->u.dmo, adrsize) == -1) { 1983 1.5 maxv return -1; 1984 1.5 maxv } 1985 1.5 maxv fsm_advance(fsm, adrsize, NULL); 1986 1.5 maxv 1987 1.5 maxv return 0; 1988 1.5 maxv } 1989 1.5 maxv 1990 1.15 maxv static inline uint64_t 1991 1.11 maxv sign_extend(uint64_t val, int size) 1992 1.11 maxv { 1993 1.11 maxv if (size == 1) { 1994 1.11 maxv if (val & __BIT(7)) 1995 1.11 maxv val |= 0xFFFFFFFFFFFFFF00; 1996 1.11 maxv } else if (size == 2) { 1997 1.11 maxv if (val & __BIT(15)) 1998 1.11 maxv val |= 0xFFFFFFFFFFFF0000; 1999 1.11 maxv } else if (size == 4) { 2000 1.11 maxv if (val & __BIT(31)) 2001 1.11 maxv val |= 0xFFFFFFFF00000000; 2002 1.11 maxv } 2003 1.11 maxv return val; 2004 1.11 maxv } 2005 1.11 maxv 2006 1.5 maxv static int 2007 1.5 maxv node_immediate(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2008 1.5 maxv { 2009 1.5 maxv const struct x86_opcode *opcode = instr->opcode; 2010 1.5 maxv struct x86_store *store; 2011 1.5 maxv uint8_t immsize; 2012 1.11 maxv size_t sesize = 0; 2013 1.5 maxv 2014 1.5 maxv /* The immediate is the source */ 2015 1.5 maxv store = &instr->src; 2016 1.5 maxv immsize = instr->operand_size; 2017 1.5 maxv 2018 1.11 maxv if (opcode->flags & FLAG_imm8) { 2019 1.11 maxv sesize = immsize; 2020 1.11 maxv immsize = 1; 2021 1.11 maxv } else if ((opcode->flags & FLAG_immz) && (immsize == 8)) { 2022 1.11 maxv sesize = immsize; 2023 1.5 maxv immsize = 4; 2024 1.5 maxv } 2025 1.5 maxv 2026 1.5 maxv store->type = STORE_IMM; 2027 1.11 maxv if (fsm_read(fsm, (uint8_t *)&store->u.imm.data, immsize) == -1) { 2028 1.5 maxv return -1; 2029 1.5 maxv } 2030 1.15 maxv fsm_advance(fsm, immsize, NULL); 2031 1.5 maxv 2032 1.11 maxv if (sesize != 0) { 2033 1.11 maxv store->u.imm.data = sign_extend(store->u.imm.data, sesize); 2034 1.11 maxv } 2035 1.5 maxv 2036 1.5 maxv return 0; 2037 1.5 maxv } 2038 1.5 maxv 2039 1.5 maxv static int 2040 1.5 maxv node_disp(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2041 1.5 maxv { 2042 1.5 maxv const struct x86_opcode *opcode = instr->opcode; 2043 1.11 maxv uint64_t data = 0; 2044 1.5 maxv size_t n; 2045 1.5 maxv 2046 1.5 maxv if (instr->strm->disp.type == DISP_1) { 2047 1.5 maxv n = 1; 2048 1.32 maxv } else if (instr->strm->disp.type == DISP_2) { 2049 1.32 maxv n = 2; 2050 1.32 maxv } else if (instr->strm->disp.type == DISP_4) { 2051 1.5 maxv n = 4; 2052 1.32 maxv } else { 2053 1.32 maxv DISASSEMBLER_BUG(); 2054 1.5 maxv } 2055 1.5 maxv 2056 1.11 maxv if (fsm_read(fsm, (uint8_t *)&data, n) == -1) { 2057 1.5 maxv return -1; 2058 1.5 maxv } 2059 1.5 maxv 2060 1.11 maxv if (__predict_true(fsm->is64bit)) { 2061 1.11 maxv data = sign_extend(data, n); 2062 1.11 maxv } 2063 1.11 maxv 2064 1.11 maxv instr->strm->disp.data = data; 2065 1.11 maxv 2066 1.5 maxv if (opcode->immediate) { 2067 1.5 maxv fsm_advance(fsm, n, node_immediate); 2068 1.5 maxv } else { 2069 1.5 maxv fsm_advance(fsm, n, NULL); 2070 1.5 maxv } 2071 1.5 maxv 2072 1.5 maxv return 0; 2073 1.5 maxv } 2074 1.5 maxv 2075 1.32 maxv /* 2076 1.32 maxv * Special node to handle 16bit addressing encoding, which can reference two 2077 1.32 maxv * registers at once. 2078 1.32 maxv */ 2079 1.32 maxv static int 2080 1.32 maxv node_dual(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2081 1.32 maxv { 2082 1.32 maxv int reg1, reg2; 2083 1.32 maxv 2084 1.32 maxv reg1 = gpr_dual_reg1_rm[instr->regmodrm.rm]; 2085 1.32 maxv 2086 1.32 maxv if (instr->regmodrm.rm == 0b000 || 2087 1.32 maxv instr->regmodrm.rm == 0b010) { 2088 1.32 maxv reg2 = NVMM_X64_GPR_RSI; 2089 1.32 maxv } else if (instr->regmodrm.rm == 0b001 || 2090 1.32 maxv instr->regmodrm.rm == 0b011) { 2091 1.32 maxv reg2 = NVMM_X64_GPR_RDI; 2092 1.32 maxv } else { 2093 1.32 maxv DISASSEMBLER_BUG(); 2094 1.32 maxv } 2095 1.32 maxv 2096 1.32 maxv instr->strm->type = STORE_DUALREG; 2097 1.32 maxv instr->strm->u.dualreg.reg1 = reg1; 2098 1.32 maxv instr->strm->u.dualreg.reg2 = reg2; 2099 1.32 maxv 2100 1.32 maxv if (instr->strm->disp.type == DISP_NONE) { 2101 1.32 maxv DISASSEMBLER_BUG(); 2102 1.32 maxv } else if (instr->strm->disp.type == DISP_0) { 2103 1.32 maxv /* Indirect register addressing mode */ 2104 1.32 maxv if (instr->opcode->immediate) { 2105 1.32 maxv fsm_advance(fsm, 1, node_immediate); 2106 1.32 maxv } else { 2107 1.32 maxv fsm_advance(fsm, 1, NULL); 2108 1.32 maxv } 2109 1.32 maxv } else { 2110 1.32 maxv fsm_advance(fsm, 1, node_disp); 2111 1.32 maxv } 2112 1.32 maxv 2113 1.32 maxv return 0; 2114 1.32 maxv } 2115 1.32 maxv 2116 1.5 maxv static const struct x86_reg * 2117 1.5 maxv get_register_idx(struct x86_instr *instr, uint8_t index) 2118 1.5 maxv { 2119 1.5 maxv uint8_t enc = index; 2120 1.5 maxv const struct x86_reg *reg; 2121 1.5 maxv size_t regsize; 2122 1.5 maxv 2123 1.5 maxv regsize = instr->address_size; 2124 1.5 maxv reg = &gpr_map[instr->rexpref.x][enc][regsize-1]; 2125 1.5 maxv 2126 1.5 maxv if (reg->num == -1) { 2127 1.5 maxv reg = resolve_special_register(instr, enc, regsize); 2128 1.5 maxv } 2129 1.5 maxv 2130 1.5 maxv return reg; 2131 1.5 maxv } 2132 1.5 maxv 2133 1.5 maxv static const struct x86_reg * 2134 1.5 maxv get_register_bas(struct x86_instr *instr, uint8_t base) 2135 1.5 maxv { 2136 1.5 maxv uint8_t enc = base; 2137 1.5 maxv const struct x86_reg *reg; 2138 1.5 maxv size_t regsize; 2139 1.5 maxv 2140 1.5 maxv regsize = instr->address_size; 2141 1.5 maxv reg = &gpr_map[instr->rexpref.b][enc][regsize-1]; 2142 1.5 maxv if (reg->num == -1) { 2143 1.5 maxv reg = resolve_special_register(instr, enc, regsize); 2144 1.5 maxv } 2145 1.5 maxv 2146 1.5 maxv return reg; 2147 1.5 maxv } 2148 1.5 maxv 2149 1.5 maxv static int 2150 1.5 maxv node_sib(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2151 1.5 maxv { 2152 1.5 maxv const struct x86_opcode *opcode; 2153 1.5 maxv uint8_t scale, index, base; 2154 1.5 maxv bool noindex, nobase; 2155 1.5 maxv uint8_t byte; 2156 1.5 maxv 2157 1.5 maxv if (fsm_read(fsm, &byte, sizeof(byte)) == -1) { 2158 1.5 maxv return -1; 2159 1.5 maxv } 2160 1.5 maxv 2161 1.5 maxv scale = ((byte & 0b11000000) >> 6); 2162 1.5 maxv index = ((byte & 0b00111000) >> 3); 2163 1.5 maxv base = ((byte & 0b00000111) >> 0); 2164 1.5 maxv 2165 1.5 maxv opcode = instr->opcode; 2166 1.5 maxv 2167 1.5 maxv noindex = false; 2168 1.5 maxv nobase = false; 2169 1.5 maxv 2170 1.5 maxv if (index == 0b100 && !instr->rexpref.x) { 2171 1.5 maxv /* Special case: the index is null */ 2172 1.5 maxv noindex = true; 2173 1.5 maxv } 2174 1.5 maxv 2175 1.5 maxv if (instr->regmodrm.mod == 0b00 && base == 0b101) { 2176 1.5 maxv /* Special case: the base is null + disp32 */ 2177 1.5 maxv instr->strm->disp.type = DISP_4; 2178 1.5 maxv nobase = true; 2179 1.5 maxv } 2180 1.5 maxv 2181 1.5 maxv instr->strm->type = STORE_SIB; 2182 1.5 maxv instr->strm->u.sib.scale = (1 << scale); 2183 1.5 maxv if (!noindex) 2184 1.5 maxv instr->strm->u.sib.idx = get_register_idx(instr, index); 2185 1.5 maxv if (!nobase) 2186 1.5 maxv instr->strm->u.sib.bas = get_register_bas(instr, base); 2187 1.5 maxv 2188 1.5 maxv /* May have a displacement, or an immediate */ 2189 1.32 maxv if (instr->strm->disp.type == DISP_1 || 2190 1.32 maxv instr->strm->disp.type == DISP_2 || 2191 1.32 maxv instr->strm->disp.type == DISP_4) { 2192 1.5 maxv fsm_advance(fsm, 1, node_disp); 2193 1.5 maxv } else if (opcode->immediate) { 2194 1.5 maxv fsm_advance(fsm, 1, node_immediate); 2195 1.5 maxv } else { 2196 1.5 maxv fsm_advance(fsm, 1, NULL); 2197 1.5 maxv } 2198 1.5 maxv 2199 1.5 maxv return 0; 2200 1.5 maxv } 2201 1.5 maxv 2202 1.5 maxv static const struct x86_reg * 2203 1.5 maxv get_register_reg(struct x86_instr *instr, const struct x86_opcode *opcode) 2204 1.5 maxv { 2205 1.5 maxv uint8_t enc = instr->regmodrm.reg; 2206 1.5 maxv const struct x86_reg *reg; 2207 1.5 maxv size_t regsize; 2208 1.5 maxv 2209 1.11 maxv regsize = instr->operand_size; 2210 1.5 maxv 2211 1.5 maxv reg = &gpr_map[instr->rexpref.r][enc][regsize-1]; 2212 1.5 maxv if (reg->num == -1) { 2213 1.5 maxv reg = resolve_special_register(instr, enc, regsize); 2214 1.5 maxv } 2215 1.5 maxv 2216 1.5 maxv return reg; 2217 1.5 maxv } 2218 1.5 maxv 2219 1.5 maxv static const struct x86_reg * 2220 1.5 maxv get_register_rm(struct x86_instr *instr, const struct x86_opcode *opcode) 2221 1.5 maxv { 2222 1.5 maxv uint8_t enc = instr->regmodrm.rm; 2223 1.5 maxv const struct x86_reg *reg; 2224 1.5 maxv size_t regsize; 2225 1.5 maxv 2226 1.5 maxv if (instr->strm->disp.type == DISP_NONE) { 2227 1.11 maxv regsize = instr->operand_size; 2228 1.5 maxv } else { 2229 1.5 maxv /* Indirect access, the size is that of the address. */ 2230 1.5 maxv regsize = instr->address_size; 2231 1.5 maxv } 2232 1.5 maxv 2233 1.5 maxv reg = &gpr_map[instr->rexpref.b][enc][regsize-1]; 2234 1.5 maxv if (reg->num == -1) { 2235 1.5 maxv reg = resolve_special_register(instr, enc, regsize); 2236 1.5 maxv } 2237 1.5 maxv 2238 1.5 maxv return reg; 2239 1.5 maxv } 2240 1.5 maxv 2241 1.5 maxv static inline bool 2242 1.5 maxv has_sib(struct x86_instr *instr) 2243 1.5 maxv { 2244 1.32 maxv return (instr->address_size != 2 && /* no SIB in 16bit addressing */ 2245 1.32 maxv instr->regmodrm.mod != 0b11 && 2246 1.32 maxv instr->regmodrm.rm == 0b100); 2247 1.5 maxv } 2248 1.5 maxv 2249 1.5 maxv static inline bool 2250 1.9 maxv is_rip_relative(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2251 1.5 maxv { 2252 1.32 maxv return (fsm->is64bit && /* RIP-relative only in 64bit mode */ 2253 1.32 maxv instr->regmodrm.mod == 0b00 && 2254 1.32 maxv instr->regmodrm.rm == 0b101); 2255 1.9 maxv } 2256 1.9 maxv 2257 1.9 maxv static inline bool 2258 1.9 maxv is_disp32_only(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2259 1.9 maxv { 2260 1.32 maxv return (!fsm->is64bit && /* no disp32-only in 64bit mode */ 2261 1.32 maxv instr->address_size != 2 && /* no disp32-only in 16bit addressing */ 2262 1.32 maxv instr->regmodrm.mod == 0b00 && 2263 1.32 maxv instr->regmodrm.rm == 0b101); 2264 1.32 maxv } 2265 1.32 maxv 2266 1.32 maxv static inline bool 2267 1.32 maxv is_disp16_only(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2268 1.32 maxv { 2269 1.32 maxv return (instr->address_size == 2 && /* disp16-only only in 16bit addr */ 2270 1.32 maxv instr->regmodrm.mod == 0b00 && 2271 1.32 maxv instr->regmodrm.rm == 0b110); 2272 1.32 maxv } 2273 1.32 maxv 2274 1.32 maxv static inline bool 2275 1.32 maxv is_dual(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2276 1.32 maxv { 2277 1.32 maxv return (instr->address_size == 2 && 2278 1.32 maxv instr->regmodrm.mod != 0b11 && 2279 1.32 maxv instr->regmodrm.rm <= 0b011); 2280 1.5 maxv } 2281 1.5 maxv 2282 1.5 maxv static enum x86_disp_type 2283 1.5 maxv get_disp_type(struct x86_instr *instr) 2284 1.5 maxv { 2285 1.5 maxv switch (instr->regmodrm.mod) { 2286 1.32 maxv case 0b00: /* indirect */ 2287 1.5 maxv return DISP_0; 2288 1.32 maxv case 0b01: /* indirect+1 */ 2289 1.5 maxv return DISP_1; 2290 1.32 maxv case 0b10: /* indirect+{2,4} */ 2291 1.32 maxv if (__predict_false(instr->address_size == 2)) { 2292 1.32 maxv return DISP_2; 2293 1.32 maxv } 2294 1.5 maxv return DISP_4; 2295 1.32 maxv case 0b11: /* direct */ 2296 1.35 maxv default: /* llvm */ 2297 1.5 maxv return DISP_NONE; 2298 1.5 maxv } 2299 1.39 joerg __unreachable(); 2300 1.5 maxv } 2301 1.5 maxv 2302 1.5 maxv static int 2303 1.5 maxv node_regmodrm(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2304 1.5 maxv { 2305 1.5 maxv struct x86_store *strg, *strm; 2306 1.5 maxv const struct x86_opcode *opcode; 2307 1.5 maxv const struct x86_reg *reg; 2308 1.5 maxv uint8_t byte; 2309 1.5 maxv 2310 1.5 maxv if (fsm_read(fsm, &byte, sizeof(byte)) == -1) { 2311 1.5 maxv return -1; 2312 1.5 maxv } 2313 1.5 maxv 2314 1.5 maxv opcode = instr->opcode; 2315 1.5 maxv 2316 1.27 maxv instr->regmodrm.rm = ((byte & 0b00000111) >> 0); 2317 1.27 maxv instr->regmodrm.reg = ((byte & 0b00111000) >> 3); 2318 1.5 maxv instr->regmodrm.mod = ((byte & 0b11000000) >> 6); 2319 1.5 maxv 2320 1.5 maxv if (opcode->regtorm) { 2321 1.5 maxv strg = &instr->src; 2322 1.5 maxv strm = &instr->dst; 2323 1.5 maxv } else { /* RM to REG */ 2324 1.5 maxv strm = &instr->src; 2325 1.5 maxv strg = &instr->dst; 2326 1.5 maxv } 2327 1.5 maxv 2328 1.5 maxv /* Save for later use. */ 2329 1.5 maxv instr->strm = strm; 2330 1.5 maxv 2331 1.5 maxv /* 2332 1.5 maxv * Special cases: Groups. The REG field of REGMODRM is the index in 2333 1.5 maxv * the group. op1 gets overwritten in the Immediate node, if any. 2334 1.5 maxv */ 2335 1.11 maxv if (opcode->group1) { 2336 1.11 maxv if (group1[instr->regmodrm.reg].emul == NULL) { 2337 1.11 maxv return -1; 2338 1.11 maxv } 2339 1.11 maxv instr->emul = group1[instr->regmodrm.reg].emul; 2340 1.19 maxv } else if (opcode->group3) { 2341 1.19 maxv if (group3[instr->regmodrm.reg].emul == NULL) { 2342 1.19 maxv return -1; 2343 1.19 maxv } 2344 1.19 maxv instr->emul = group3[instr->regmodrm.reg].emul; 2345 1.11 maxv } else if (opcode->group11) { 2346 1.5 maxv if (group11[instr->regmodrm.reg].emul == NULL) { 2347 1.5 maxv return -1; 2348 1.5 maxv } 2349 1.5 maxv instr->emul = group11[instr->regmodrm.reg].emul; 2350 1.5 maxv } 2351 1.5 maxv 2352 1.16 maxv if (!opcode->immediate) { 2353 1.16 maxv reg = get_register_reg(instr, opcode); 2354 1.16 maxv if (reg == NULL) { 2355 1.16 maxv return -1; 2356 1.16 maxv } 2357 1.16 maxv strg->type = STORE_REG; 2358 1.16 maxv strg->u.reg = reg; 2359 1.5 maxv } 2360 1.5 maxv 2361 1.24 maxv /* The displacement applies to RM. */ 2362 1.24 maxv strm->disp.type = get_disp_type(instr); 2363 1.24 maxv 2364 1.5 maxv if (has_sib(instr)) { 2365 1.5 maxv /* Overwrites RM */ 2366 1.5 maxv fsm_advance(fsm, 1, node_sib); 2367 1.5 maxv return 0; 2368 1.5 maxv } 2369 1.5 maxv 2370 1.9 maxv if (is_rip_relative(fsm, instr)) { 2371 1.5 maxv /* Overwrites RM */ 2372 1.5 maxv strm->type = STORE_REG; 2373 1.5 maxv strm->u.reg = &gpr_map__rip; 2374 1.5 maxv strm->disp.type = DISP_4; 2375 1.5 maxv fsm_advance(fsm, 1, node_disp); 2376 1.5 maxv return 0; 2377 1.5 maxv } 2378 1.5 maxv 2379 1.9 maxv if (is_disp32_only(fsm, instr)) { 2380 1.9 maxv /* Overwrites RM */ 2381 1.9 maxv strm->type = STORE_REG; 2382 1.9 maxv strm->u.reg = NULL; 2383 1.9 maxv strm->disp.type = DISP_4; 2384 1.9 maxv fsm_advance(fsm, 1, node_disp); 2385 1.9 maxv return 0; 2386 1.9 maxv } 2387 1.9 maxv 2388 1.32 maxv if (__predict_false(is_disp16_only(fsm, instr))) { 2389 1.32 maxv /* Overwrites RM */ 2390 1.32 maxv strm->type = STORE_REG; 2391 1.32 maxv strm->u.reg = NULL; 2392 1.32 maxv strm->disp.type = DISP_2; 2393 1.32 maxv fsm_advance(fsm, 1, node_disp); 2394 1.32 maxv return 0; 2395 1.32 maxv } 2396 1.32 maxv 2397 1.32 maxv if (__predict_false(is_dual(fsm, instr))) { 2398 1.32 maxv /* Overwrites RM */ 2399 1.32 maxv fsm_advance(fsm, 0, node_dual); 2400 1.32 maxv return 0; 2401 1.32 maxv } 2402 1.32 maxv 2403 1.5 maxv reg = get_register_rm(instr, opcode); 2404 1.5 maxv if (reg == NULL) { 2405 1.5 maxv return -1; 2406 1.5 maxv } 2407 1.5 maxv strm->type = STORE_REG; 2408 1.5 maxv strm->u.reg = reg; 2409 1.5 maxv 2410 1.5 maxv if (strm->disp.type == DISP_NONE) { 2411 1.5 maxv /* Direct register addressing mode */ 2412 1.5 maxv if (opcode->immediate) { 2413 1.5 maxv fsm_advance(fsm, 1, node_immediate); 2414 1.5 maxv } else { 2415 1.5 maxv fsm_advance(fsm, 1, NULL); 2416 1.5 maxv } 2417 1.5 maxv } else if (strm->disp.type == DISP_0) { 2418 1.5 maxv /* Indirect register addressing mode */ 2419 1.5 maxv if (opcode->immediate) { 2420 1.5 maxv fsm_advance(fsm, 1, node_immediate); 2421 1.5 maxv } else { 2422 1.5 maxv fsm_advance(fsm, 1, NULL); 2423 1.5 maxv } 2424 1.5 maxv } else { 2425 1.5 maxv fsm_advance(fsm, 1, node_disp); 2426 1.5 maxv } 2427 1.5 maxv 2428 1.5 maxv return 0; 2429 1.5 maxv } 2430 1.5 maxv 2431 1.5 maxv static size_t 2432 1.5 maxv get_operand_size(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2433 1.5 maxv { 2434 1.5 maxv const struct x86_opcode *opcode = instr->opcode; 2435 1.5 maxv int opsize; 2436 1.5 maxv 2437 1.5 maxv /* Get the opsize */ 2438 1.5 maxv if (!opcode->szoverride) { 2439 1.5 maxv opsize = opcode->defsize; 2440 1.5 maxv } else if (instr->rexpref.present && instr->rexpref.w) { 2441 1.5 maxv opsize = 8; 2442 1.5 maxv } else { 2443 1.5 maxv if (!fsm->is16bit) { 2444 1.13 maxv if (instr->legpref.opr_ovr) { 2445 1.5 maxv opsize = 2; 2446 1.5 maxv } else { 2447 1.5 maxv opsize = 4; 2448 1.5 maxv } 2449 1.5 maxv } else { /* 16bit */ 2450 1.13 maxv if (instr->legpref.opr_ovr) { 2451 1.5 maxv opsize = 4; 2452 1.5 maxv } else { 2453 1.5 maxv opsize = 2; 2454 1.5 maxv } 2455 1.5 maxv } 2456 1.5 maxv } 2457 1.5 maxv 2458 1.5 maxv return opsize; 2459 1.5 maxv } 2460 1.5 maxv 2461 1.5 maxv static size_t 2462 1.5 maxv get_address_size(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2463 1.5 maxv { 2464 1.5 maxv if (fsm->is64bit) { 2465 1.13 maxv if (__predict_false(instr->legpref.adr_ovr)) { 2466 1.5 maxv return 4; 2467 1.5 maxv } 2468 1.5 maxv return 8; 2469 1.5 maxv } 2470 1.5 maxv 2471 1.5 maxv if (fsm->is32bit) { 2472 1.13 maxv if (__predict_false(instr->legpref.adr_ovr)) { 2473 1.5 maxv return 2; 2474 1.5 maxv } 2475 1.5 maxv return 4; 2476 1.5 maxv } 2477 1.5 maxv 2478 1.5 maxv /* 16bit. */ 2479 1.13 maxv if (__predict_false(instr->legpref.adr_ovr)) { 2480 1.5 maxv return 4; 2481 1.5 maxv } 2482 1.5 maxv return 2; 2483 1.5 maxv } 2484 1.5 maxv 2485 1.5 maxv static int 2486 1.5 maxv node_primary_opcode(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2487 1.1 maxv { 2488 1.5 maxv const struct x86_opcode *opcode; 2489 1.5 maxv uint8_t byte; 2490 1.5 maxv 2491 1.5 maxv if (fsm_read(fsm, &byte, sizeof(byte)) == -1) { 2492 1.5 maxv return -1; 2493 1.5 maxv } 2494 1.5 maxv 2495 1.27 maxv opcode = &primary_opcode_table[byte]; 2496 1.27 maxv if (__predict_false(!opcode->valid)) { 2497 1.1 maxv return -1; 2498 1.1 maxv } 2499 1.1 maxv 2500 1.5 maxv instr->opcode = opcode; 2501 1.5 maxv instr->emul = opcode->emul; 2502 1.5 maxv instr->operand_size = get_operand_size(fsm, instr); 2503 1.5 maxv instr->address_size = get_address_size(fsm, instr); 2504 1.5 maxv 2505 1.15 maxv if (fsm->is64bit && (instr->operand_size == 4)) { 2506 1.15 maxv /* Zero-extend to 64 bits. */ 2507 1.15 maxv instr->zeroextend_mask = ~size_to_mask(4); 2508 1.15 maxv } 2509 1.15 maxv 2510 1.5 maxv if (opcode->regmodrm) { 2511 1.5 maxv fsm_advance(fsm, 1, node_regmodrm); 2512 1.5 maxv } else if (opcode->dmo) { 2513 1.5 maxv /* Direct-Memory Offsets */ 2514 1.5 maxv fsm_advance(fsm, 1, node_dmo); 2515 1.5 maxv } else if (opcode->stos || opcode->lods) { 2516 1.5 maxv fsm_advance(fsm, 1, node_stlo); 2517 1.6 maxv } else if (opcode->movs) { 2518 1.6 maxv fsm_advance(fsm, 1, node_movs); 2519 1.43 reinoud } else if (opcode->cmps) { 2520 1.43 reinoud fsm_advance(fsm, 1, node_cmps); 2521 1.5 maxv } else { 2522 1.5 maxv return -1; 2523 1.5 maxv } 2524 1.5 maxv 2525 1.5 maxv return 0; 2526 1.5 maxv } 2527 1.5 maxv 2528 1.10 maxv static int 2529 1.10 maxv node_secondary_opcode(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2530 1.10 maxv { 2531 1.10 maxv const struct x86_opcode *opcode; 2532 1.10 maxv uint8_t byte; 2533 1.10 maxv 2534 1.10 maxv if (fsm_read(fsm, &byte, sizeof(byte)) == -1) { 2535 1.10 maxv return -1; 2536 1.10 maxv } 2537 1.10 maxv 2538 1.27 maxv opcode = &secondary_opcode_table[byte]; 2539 1.27 maxv if (__predict_false(!opcode->valid)) { 2540 1.10 maxv return -1; 2541 1.10 maxv } 2542 1.10 maxv 2543 1.10 maxv instr->opcode = opcode; 2544 1.10 maxv instr->emul = opcode->emul; 2545 1.10 maxv instr->operand_size = get_operand_size(fsm, instr); 2546 1.10 maxv instr->address_size = get_address_size(fsm, instr); 2547 1.10 maxv 2548 1.18 maxv if (fsm->is64bit && (instr->operand_size == 4)) { 2549 1.18 maxv /* Zero-extend to 64 bits. */ 2550 1.18 maxv instr->zeroextend_mask = ~size_to_mask(4); 2551 1.18 maxv } 2552 1.18 maxv 2553 1.11 maxv if (opcode->flags & FLAG_ze) { 2554 1.10 maxv /* 2555 1.10 maxv * Compute the mask for zero-extend. Update the operand size, 2556 1.10 maxv * we move fewer bytes. 2557 1.10 maxv */ 2558 1.18 maxv instr->zeroextend_mask |= size_to_mask(instr->operand_size); 2559 1.10 maxv instr->zeroextend_mask &= ~size_to_mask(opcode->defsize); 2560 1.10 maxv instr->operand_size = opcode->defsize; 2561 1.10 maxv } 2562 1.10 maxv 2563 1.10 maxv if (opcode->regmodrm) { 2564 1.10 maxv fsm_advance(fsm, 1, node_regmodrm); 2565 1.10 maxv } else { 2566 1.10 maxv return -1; 2567 1.10 maxv } 2568 1.10 maxv 2569 1.10 maxv return 0; 2570 1.10 maxv } 2571 1.10 maxv 2572 1.5 maxv static int 2573 1.5 maxv node_main(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2574 1.5 maxv { 2575 1.5 maxv uint8_t byte; 2576 1.5 maxv 2577 1.5 maxv #define ESCAPE 0x0F 2578 1.5 maxv #define VEX_1 0xC5 2579 1.5 maxv #define VEX_2 0xC4 2580 1.5 maxv #define XOP 0x8F 2581 1.5 maxv 2582 1.5 maxv if (fsm_read(fsm, &byte, sizeof(byte)) == -1) { 2583 1.5 maxv return -1; 2584 1.5 maxv } 2585 1.5 maxv 2586 1.5 maxv /* 2587 1.5 maxv * We don't take XOP. It is AMD-specific, and it was removed shortly 2588 1.5 maxv * after being introduced. 2589 1.5 maxv */ 2590 1.5 maxv if (byte == ESCAPE) { 2591 1.10 maxv fsm_advance(fsm, 1, node_secondary_opcode); 2592 1.5 maxv } else if (!instr->rexpref.present) { 2593 1.5 maxv if (byte == VEX_1) { 2594 1.5 maxv return -1; 2595 1.5 maxv } else if (byte == VEX_2) { 2596 1.5 maxv return -1; 2597 1.5 maxv } else { 2598 1.5 maxv fsm->fn = node_primary_opcode; 2599 1.5 maxv } 2600 1.5 maxv } else { 2601 1.5 maxv fsm->fn = node_primary_opcode; 2602 1.5 maxv } 2603 1.5 maxv 2604 1.5 maxv return 0; 2605 1.5 maxv } 2606 1.5 maxv 2607 1.5 maxv static int 2608 1.5 maxv node_rex_prefix(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2609 1.5 maxv { 2610 1.5 maxv struct x86_rexpref *rexpref = &instr->rexpref; 2611 1.5 maxv uint8_t byte; 2612 1.5 maxv size_t n = 0; 2613 1.5 maxv 2614 1.5 maxv if (fsm_read(fsm, &byte, sizeof(byte)) == -1) { 2615 1.5 maxv return -1; 2616 1.5 maxv } 2617 1.5 maxv 2618 1.5 maxv if (byte >= 0x40 && byte <= 0x4F) { 2619 1.5 maxv if (__predict_false(!fsm->is64bit)) { 2620 1.5 maxv return -1; 2621 1.5 maxv } 2622 1.27 maxv rexpref->b = ((byte & 0x1) != 0); 2623 1.27 maxv rexpref->x = ((byte & 0x2) != 0); 2624 1.27 maxv rexpref->r = ((byte & 0x4) != 0); 2625 1.27 maxv rexpref->w = ((byte & 0x8) != 0); 2626 1.5 maxv rexpref->present = true; 2627 1.5 maxv n = 1; 2628 1.5 maxv } 2629 1.5 maxv 2630 1.5 maxv fsm_advance(fsm, n, node_main); 2631 1.5 maxv return 0; 2632 1.5 maxv } 2633 1.5 maxv 2634 1.5 maxv static int 2635 1.5 maxv node_legacy_prefix(struct x86_decode_fsm *fsm, struct x86_instr *instr) 2636 1.5 maxv { 2637 1.5 maxv uint8_t byte; 2638 1.5 maxv 2639 1.5 maxv if (fsm_read(fsm, &byte, sizeof(byte)) == -1) { 2640 1.5 maxv return -1; 2641 1.5 maxv } 2642 1.5 maxv 2643 1.13 maxv if (byte == LEG_OPR_OVR) { 2644 1.13 maxv instr->legpref.opr_ovr = 1; 2645 1.13 maxv } else if (byte == LEG_OVR_DS) { 2646 1.13 maxv instr->legpref.seg = NVMM_X64_SEG_DS; 2647 1.13 maxv } else if (byte == LEG_OVR_ES) { 2648 1.13 maxv instr->legpref.seg = NVMM_X64_SEG_ES; 2649 1.13 maxv } else if (byte == LEG_REP) { 2650 1.13 maxv instr->legpref.rep = 1; 2651 1.13 maxv } else if (byte == LEG_OVR_GS) { 2652 1.13 maxv instr->legpref.seg = NVMM_X64_SEG_GS; 2653 1.13 maxv } else if (byte == LEG_OVR_FS) { 2654 1.13 maxv instr->legpref.seg = NVMM_X64_SEG_FS; 2655 1.13 maxv } else if (byte == LEG_ADR_OVR) { 2656 1.13 maxv instr->legpref.adr_ovr = 1; 2657 1.13 maxv } else if (byte == LEG_OVR_CS) { 2658 1.13 maxv instr->legpref.seg = NVMM_X64_SEG_CS; 2659 1.13 maxv } else if (byte == LEG_OVR_SS) { 2660 1.13 maxv instr->legpref.seg = NVMM_X64_SEG_SS; 2661 1.13 maxv } else if (byte == LEG_REPN) { 2662 1.13 maxv instr->legpref.repn = 1; 2663 1.13 maxv } else if (byte == LEG_LOCK) { 2664 1.13 maxv /* ignore */ 2665 1.5 maxv } else { 2666 1.13 maxv /* not a legacy prefix */ 2667 1.13 maxv fsm_advance(fsm, 0, node_rex_prefix); 2668 1.13 maxv return 0; 2669 1.5 maxv } 2670 1.5 maxv 2671 1.13 maxv fsm_advance(fsm, 1, node_legacy_prefix); 2672 1.5 maxv return 0; 2673 1.5 maxv } 2674 1.5 maxv 2675 1.5 maxv static int 2676 1.5 maxv x86_decode(uint8_t *inst_bytes, size_t inst_len, struct x86_instr *instr, 2677 1.5 maxv struct nvmm_x64_state *state) 2678 1.5 maxv { 2679 1.5 maxv struct x86_decode_fsm fsm; 2680 1.5 maxv int ret; 2681 1.5 maxv 2682 1.5 maxv memset(instr, 0, sizeof(*instr)); 2683 1.13 maxv instr->legpref.seg = -1; 2684 1.25 maxv instr->src.hardseg = -1; 2685 1.25 maxv instr->dst.hardseg = -1; 2686 1.5 maxv 2687 1.5 maxv fsm.is64bit = is_64bit(state); 2688 1.5 maxv fsm.is32bit = is_32bit(state); 2689 1.5 maxv fsm.is16bit = is_16bit(state); 2690 1.5 maxv 2691 1.5 maxv fsm.fn = node_legacy_prefix; 2692 1.5 maxv fsm.buf = inst_bytes; 2693 1.5 maxv fsm.end = inst_bytes + inst_len; 2694 1.5 maxv 2695 1.5 maxv while (fsm.fn != NULL) { 2696 1.5 maxv ret = (*fsm.fn)(&fsm, instr); 2697 1.43 reinoud if (ret == -1) { 2698 1.43 reinoud #ifdef NVMM_DEBUG 2699 1.43 reinoud printf("\n%s debug: unrecognized instruction found " \ 2700 1.43 reinoud "with max length %ld : [ ", __func__, inst_len); 2701 1.43 reinoud for (uint i = 0; i < inst_len; i++) 2702 1.43 reinoud printf("%02x ", inst_bytes[i]); 2703 1.43 reinoud printf("]\n"); 2704 1.43 reinoud fflush(stdout); 2705 1.43 reinoud #endif 2706 1.5 maxv return -1; 2707 1.43 reinoud } 2708 1.5 maxv } 2709 1.5 maxv 2710 1.5 maxv instr->len = fsm.buf - inst_bytes; 2711 1.5 maxv 2712 1.5 maxv return 0; 2713 1.5 maxv } 2714 1.5 maxv 2715 1.5 maxv /* -------------------------------------------------------------------------- */ 2716 1.5 maxv 2717 1.19 maxv #define EXEC_INSTR(sz, instr) \ 2718 1.19 maxv static uint##sz##_t \ 2719 1.20 christos exec_##instr##sz(uint##sz##_t op1, uint##sz##_t op2, uint64_t *rflags) \ 2720 1.19 maxv { \ 2721 1.19 maxv uint##sz##_t res; \ 2722 1.19 maxv __asm __volatile ( \ 2723 1.33 maxv #instr" %2, %3;" \ 2724 1.33 maxv "mov %3, %1;" \ 2725 1.19 maxv "pushfq;" \ 2726 1.33 maxv "popq %0" \ 2727 1.19 maxv : "=r" (*rflags), "=r" (res) \ 2728 1.19 maxv : "r" (op1), "r" (op2)); \ 2729 1.19 maxv return res; \ 2730 1.19 maxv } 2731 1.19 maxv 2732 1.19 maxv #define EXEC_DISPATCHER(instr) \ 2733 1.19 maxv static uint64_t \ 2734 1.19 maxv exec_##instr(uint64_t op1, uint64_t op2, uint64_t *rflags, size_t opsize) \ 2735 1.19 maxv { \ 2736 1.19 maxv switch (opsize) { \ 2737 1.19 maxv case 1: \ 2738 1.19 maxv return exec_##instr##8(op1, op2, rflags); \ 2739 1.19 maxv case 2: \ 2740 1.19 maxv return exec_##instr##16(op1, op2, rflags); \ 2741 1.19 maxv case 4: \ 2742 1.19 maxv return exec_##instr##32(op1, op2, rflags); \ 2743 1.19 maxv default: \ 2744 1.19 maxv return exec_##instr##64(op1, op2, rflags); \ 2745 1.19 maxv } \ 2746 1.19 maxv } 2747 1.19 maxv 2748 1.19 maxv /* SUB: ret = op1 - op2 */ 2749 1.19 maxv #define PSL_SUB_MASK (PSL_V|PSL_C|PSL_Z|PSL_N|PSL_PF|PSL_AF) 2750 1.19 maxv EXEC_INSTR(8, sub) 2751 1.19 maxv EXEC_INSTR(16, sub) 2752 1.19 maxv EXEC_INSTR(32, sub) 2753 1.19 maxv EXEC_INSTR(64, sub) 2754 1.19 maxv EXEC_DISPATCHER(sub) 2755 1.19 maxv 2756 1.19 maxv /* OR: ret = op1 | op2 */ 2757 1.19 maxv #define PSL_OR_MASK (PSL_V|PSL_C|PSL_Z|PSL_N|PSL_PF) 2758 1.19 maxv EXEC_INSTR(8, or) 2759 1.19 maxv EXEC_INSTR(16, or) 2760 1.19 maxv EXEC_INSTR(32, or) 2761 1.19 maxv EXEC_INSTR(64, or) 2762 1.19 maxv EXEC_DISPATCHER(or) 2763 1.19 maxv 2764 1.19 maxv /* AND: ret = op1 & op2 */ 2765 1.19 maxv #define PSL_AND_MASK (PSL_V|PSL_C|PSL_Z|PSL_N|PSL_PF) 2766 1.19 maxv EXEC_INSTR(8, and) 2767 1.19 maxv EXEC_INSTR(16, and) 2768 1.19 maxv EXEC_INSTR(32, and) 2769 1.19 maxv EXEC_INSTR(64, and) 2770 1.19 maxv EXEC_DISPATCHER(and) 2771 1.19 maxv 2772 1.19 maxv /* XOR: ret = op1 ^ op2 */ 2773 1.19 maxv #define PSL_XOR_MASK (PSL_V|PSL_C|PSL_Z|PSL_N|PSL_PF) 2774 1.19 maxv EXEC_INSTR(8, xor) 2775 1.19 maxv EXEC_INSTR(16, xor) 2776 1.19 maxv EXEC_INSTR(32, xor) 2777 1.19 maxv EXEC_INSTR(64, xor) 2778 1.19 maxv EXEC_DISPATCHER(xor) 2779 1.19 maxv 2780 1.19 maxv /* -------------------------------------------------------------------------- */ 2781 1.5 maxv 2782 1.19 maxv /* 2783 1.19 maxv * Emulation functions. We don't care about the order of the operands, except 2784 1.33 maxv * for SUB, CMP and TEST. For these ones we look at mem->write to determine who 2785 1.19 maxv * is op1 and who is op2. 2786 1.19 maxv */ 2787 1.5 maxv 2788 1.5 maxv static void 2789 1.37 maxv x86_func_or(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs) 2790 1.5 maxv { 2791 1.19 maxv uint64_t *retval = (uint64_t *)mem->data; 2792 1.5 maxv const bool write = mem->write; 2793 1.19 maxv uint64_t *op1, op2, fl, ret; 2794 1.5 maxv 2795 1.19 maxv op1 = (uint64_t *)mem->data; 2796 1.19 maxv op2 = 0; 2797 1.5 maxv 2798 1.19 maxv /* Fetch the value to be OR'ed (op2). */ 2799 1.19 maxv mem->data = (uint8_t *)&op2; 2800 1.5 maxv mem->write = false; 2801 1.37 maxv (*vcpu->cbs.mem)(mem); 2802 1.5 maxv 2803 1.5 maxv /* Perform the OR. */ 2804 1.19 maxv ret = exec_or(*op1, op2, &fl, mem->size); 2805 1.5 maxv 2806 1.5 maxv if (write) { 2807 1.5 maxv /* Write back the result. */ 2808 1.19 maxv mem->data = (uint8_t *)&ret; 2809 1.5 maxv mem->write = true; 2810 1.37 maxv (*vcpu->cbs.mem)(mem); 2811 1.19 maxv } else { 2812 1.19 maxv /* Return data to the caller. */ 2813 1.19 maxv *retval = ret; 2814 1.5 maxv } 2815 1.5 maxv 2816 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_OR_MASK; 2817 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_OR_MASK); 2818 1.5 maxv } 2819 1.5 maxv 2820 1.5 maxv static void 2821 1.37 maxv x86_func_and(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs) 2822 1.5 maxv { 2823 1.19 maxv uint64_t *retval = (uint64_t *)mem->data; 2824 1.5 maxv const bool write = mem->write; 2825 1.19 maxv uint64_t *op1, op2, fl, ret; 2826 1.5 maxv 2827 1.19 maxv op1 = (uint64_t *)mem->data; 2828 1.19 maxv op2 = 0; 2829 1.5 maxv 2830 1.19 maxv /* Fetch the value to be AND'ed (op2). */ 2831 1.19 maxv mem->data = (uint8_t *)&op2; 2832 1.5 maxv mem->write = false; 2833 1.37 maxv (*vcpu->cbs.mem)(mem); 2834 1.5 maxv 2835 1.5 maxv /* Perform the AND. */ 2836 1.19 maxv ret = exec_and(*op1, op2, &fl, mem->size); 2837 1.5 maxv 2838 1.5 maxv if (write) { 2839 1.5 maxv /* Write back the result. */ 2840 1.19 maxv mem->data = (uint8_t *)&ret; 2841 1.5 maxv mem->write = true; 2842 1.37 maxv (*vcpu->cbs.mem)(mem); 2843 1.19 maxv } else { 2844 1.19 maxv /* Return data to the caller. */ 2845 1.19 maxv *retval = ret; 2846 1.5 maxv } 2847 1.5 maxv 2848 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_AND_MASK; 2849 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_AND_MASK); 2850 1.5 maxv } 2851 1.5 maxv 2852 1.5 maxv static void 2853 1.37 maxv x86_func_xchg(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs) 2854 1.33 maxv { 2855 1.33 maxv uint64_t *op1, op2; 2856 1.33 maxv 2857 1.33 maxv op1 = (uint64_t *)mem->data; 2858 1.33 maxv op2 = 0; 2859 1.33 maxv 2860 1.33 maxv /* Fetch op2. */ 2861 1.33 maxv mem->data = (uint8_t *)&op2; 2862 1.33 maxv mem->write = false; 2863 1.37 maxv (*vcpu->cbs.mem)(mem); 2864 1.33 maxv 2865 1.33 maxv /* Write op1 in op2. */ 2866 1.33 maxv mem->data = (uint8_t *)op1; 2867 1.33 maxv mem->write = true; 2868 1.37 maxv (*vcpu->cbs.mem)(mem); 2869 1.33 maxv 2870 1.33 maxv /* Write op2 in op1. */ 2871 1.33 maxv *op1 = op2; 2872 1.33 maxv } 2873 1.33 maxv 2874 1.33 maxv static void 2875 1.37 maxv x86_func_sub(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs) 2876 1.5 maxv { 2877 1.19 maxv uint64_t *retval = (uint64_t *)mem->data; 2878 1.5 maxv const bool write = mem->write; 2879 1.19 maxv uint64_t *op1, *op2, fl, ret; 2880 1.19 maxv uint64_t tmp; 2881 1.19 maxv bool memop1; 2882 1.19 maxv 2883 1.19 maxv memop1 = !mem->write; 2884 1.19 maxv op1 = memop1 ? &tmp : (uint64_t *)mem->data; 2885 1.19 maxv op2 = memop1 ? (uint64_t *)mem->data : &tmp; 2886 1.19 maxv 2887 1.19 maxv /* Fetch the value to be SUB'ed (op1 or op2). */ 2888 1.19 maxv mem->data = (uint8_t *)&tmp; 2889 1.19 maxv mem->write = false; 2890 1.37 maxv (*vcpu->cbs.mem)(mem); 2891 1.19 maxv 2892 1.19 maxv /* Perform the SUB. */ 2893 1.19 maxv ret = exec_sub(*op1, *op2, &fl, mem->size); 2894 1.19 maxv 2895 1.19 maxv if (write) { 2896 1.19 maxv /* Write back the result. */ 2897 1.19 maxv mem->data = (uint8_t *)&ret; 2898 1.19 maxv mem->write = true; 2899 1.37 maxv (*vcpu->cbs.mem)(mem); 2900 1.19 maxv } else { 2901 1.19 maxv /* Return data to the caller. */ 2902 1.19 maxv *retval = ret; 2903 1.19 maxv } 2904 1.19 maxv 2905 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_SUB_MASK; 2906 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_SUB_MASK); 2907 1.19 maxv } 2908 1.5 maxv 2909 1.19 maxv static void 2910 1.37 maxv x86_func_xor(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs) 2911 1.19 maxv { 2912 1.19 maxv uint64_t *retval = (uint64_t *)mem->data; 2913 1.19 maxv const bool write = mem->write; 2914 1.19 maxv uint64_t *op1, op2, fl, ret; 2915 1.5 maxv 2916 1.19 maxv op1 = (uint64_t *)mem->data; 2917 1.19 maxv op2 = 0; 2918 1.5 maxv 2919 1.19 maxv /* Fetch the value to be XOR'ed (op2). */ 2920 1.19 maxv mem->data = (uint8_t *)&op2; 2921 1.5 maxv mem->write = false; 2922 1.37 maxv (*vcpu->cbs.mem)(mem); 2923 1.5 maxv 2924 1.5 maxv /* Perform the XOR. */ 2925 1.19 maxv ret = exec_xor(*op1, op2, &fl, mem->size); 2926 1.5 maxv 2927 1.5 maxv if (write) { 2928 1.5 maxv /* Write back the result. */ 2929 1.19 maxv mem->data = (uint8_t *)&ret; 2930 1.5 maxv mem->write = true; 2931 1.37 maxv (*vcpu->cbs.mem)(mem); 2932 1.19 maxv } else { 2933 1.19 maxv /* Return data to the caller. */ 2934 1.19 maxv *retval = ret; 2935 1.5 maxv } 2936 1.5 maxv 2937 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_XOR_MASK; 2938 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_XOR_MASK); 2939 1.5 maxv } 2940 1.5 maxv 2941 1.5 maxv static void 2942 1.37 maxv x86_func_cmp(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs) 2943 1.19 maxv { 2944 1.19 maxv uint64_t *op1, *op2, fl; 2945 1.19 maxv uint64_t tmp; 2946 1.19 maxv bool memop1; 2947 1.19 maxv 2948 1.19 maxv memop1 = !mem->write; 2949 1.19 maxv op1 = memop1 ? &tmp : (uint64_t *)mem->data; 2950 1.19 maxv op2 = memop1 ? (uint64_t *)mem->data : &tmp; 2951 1.19 maxv 2952 1.19 maxv /* Fetch the value to be CMP'ed (op1 or op2). */ 2953 1.19 maxv mem->data = (uint8_t *)&tmp; 2954 1.19 maxv mem->write = false; 2955 1.37 maxv (*vcpu->cbs.mem)(mem); 2956 1.19 maxv 2957 1.19 maxv /* Perform the CMP. */ 2958 1.19 maxv exec_sub(*op1, *op2, &fl, mem->size); 2959 1.19 maxv 2960 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_SUB_MASK; 2961 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_SUB_MASK); 2962 1.19 maxv } 2963 1.19 maxv 2964 1.19 maxv static void 2965 1.37 maxv x86_func_test(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs) 2966 1.19 maxv { 2967 1.19 maxv uint64_t *op1, *op2, fl; 2968 1.19 maxv uint64_t tmp; 2969 1.19 maxv bool memop1; 2970 1.19 maxv 2971 1.19 maxv memop1 = !mem->write; 2972 1.19 maxv op1 = memop1 ? &tmp : (uint64_t *)mem->data; 2973 1.19 maxv op2 = memop1 ? (uint64_t *)mem->data : &tmp; 2974 1.19 maxv 2975 1.19 maxv /* Fetch the value to be TEST'ed (op1 or op2). */ 2976 1.19 maxv mem->data = (uint8_t *)&tmp; 2977 1.19 maxv mem->write = false; 2978 1.37 maxv (*vcpu->cbs.mem)(mem); 2979 1.19 maxv 2980 1.19 maxv /* Perform the TEST. */ 2981 1.19 maxv exec_and(*op1, *op2, &fl, mem->size); 2982 1.19 maxv 2983 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_AND_MASK; 2984 1.19 maxv gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_AND_MASK); 2985 1.19 maxv } 2986 1.19 maxv 2987 1.19 maxv static void 2988 1.37 maxv x86_func_mov(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs) 2989 1.5 maxv { 2990 1.5 maxv /* 2991 1.5 maxv * Nothing special, just move without emulation. 2992 1.5 maxv */ 2993 1.37 maxv (*vcpu->cbs.mem)(mem); 2994 1.5 maxv } 2995 1.5 maxv 2996 1.5 maxv static void 2997 1.37 maxv x86_func_stos(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs) 2998 1.5 maxv { 2999 1.5 maxv /* 3000 1.5 maxv * Just move, and update RDI. 3001 1.5 maxv */ 3002 1.37 maxv (*vcpu->cbs.mem)(mem); 3003 1.5 maxv 3004 1.5 maxv if (gprs[NVMM_X64_GPR_RFLAGS] & PSL_D) { 3005 1.5 maxv gprs[NVMM_X64_GPR_RDI] -= mem->size; 3006 1.5 maxv } else { 3007 1.5 maxv gprs[NVMM_X64_GPR_RDI] += mem->size; 3008 1.5 maxv } 3009 1.5 maxv } 3010 1.5 maxv 3011 1.5 maxv static void 3012 1.37 maxv x86_func_lods(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs) 3013 1.5 maxv { 3014 1.5 maxv /* 3015 1.5 maxv * Just move, and update RSI. 3016 1.5 maxv */ 3017 1.37 maxv (*vcpu->cbs.mem)(mem); 3018 1.5 maxv 3019 1.5 maxv if (gprs[NVMM_X64_GPR_RFLAGS] & PSL_D) { 3020 1.5 maxv gprs[NVMM_X64_GPR_RSI] -= mem->size; 3021 1.5 maxv } else { 3022 1.5 maxv gprs[NVMM_X64_GPR_RSI] += mem->size; 3023 1.5 maxv } 3024 1.5 maxv } 3025 1.5 maxv 3026 1.5 maxv /* -------------------------------------------------------------------------- */ 3027 1.5 maxv 3028 1.5 maxv static inline uint64_t 3029 1.5 maxv gpr_read_address(struct x86_instr *instr, struct nvmm_x64_state *state, int gpr) 3030 1.5 maxv { 3031 1.5 maxv uint64_t val; 3032 1.5 maxv 3033 1.5 maxv val = state->gprs[gpr]; 3034 1.15 maxv val &= size_to_mask(instr->address_size); 3035 1.5 maxv 3036 1.5 maxv return val; 3037 1.5 maxv } 3038 1.5 maxv 3039 1.5 maxv static int 3040 1.6 maxv store_to_gva(struct nvmm_x64_state *state, struct x86_instr *instr, 3041 1.6 maxv struct x86_store *store, gvaddr_t *gvap, size_t size) 3042 1.5 maxv { 3043 1.5 maxv struct x86_sib *sib; 3044 1.6 maxv gvaddr_t gva = 0; 3045 1.5 maxv uint64_t reg; 3046 1.5 maxv int ret, seg; 3047 1.5 maxv 3048 1.5 maxv if (store->type == STORE_SIB) { 3049 1.5 maxv sib = &store->u.sib; 3050 1.5 maxv if (sib->bas != NULL) 3051 1.5 maxv gva += gpr_read_address(instr, state, sib->bas->num); 3052 1.5 maxv if (sib->idx != NULL) { 3053 1.5 maxv reg = gpr_read_address(instr, state, sib->idx->num); 3054 1.5 maxv gva += sib->scale * reg; 3055 1.5 maxv } 3056 1.5 maxv } else if (store->type == STORE_REG) { 3057 1.9 maxv if (store->u.reg == NULL) { 3058 1.32 maxv /* The base is null. Happens with disp32-only and 3059 1.32 maxv * disp16-only. */ 3060 1.9 maxv } else { 3061 1.9 maxv gva = gpr_read_address(instr, state, store->u.reg->num); 3062 1.9 maxv } 3063 1.32 maxv } else if (store->type == STORE_DUALREG) { 3064 1.32 maxv gva = gpr_read_address(instr, state, store->u.dualreg.reg1) + 3065 1.32 maxv gpr_read_address(instr, state, store->u.dualreg.reg2); 3066 1.5 maxv } else { 3067 1.5 maxv gva = store->u.dmo; 3068 1.5 maxv } 3069 1.5 maxv 3070 1.5 maxv if (store->disp.type != DISP_NONE) { 3071 1.11 maxv gva += store->disp.data; 3072 1.5 maxv } 3073 1.5 maxv 3074 1.25 maxv if (store->hardseg != -1) { 3075 1.15 maxv seg = store->hardseg; 3076 1.15 maxv } else { 3077 1.15 maxv if (__predict_false(instr->legpref.seg != -1)) { 3078 1.15 maxv seg = instr->legpref.seg; 3079 1.5 maxv } else { 3080 1.15 maxv seg = NVMM_X64_SEG_DS; 3081 1.5 maxv } 3082 1.15 maxv } 3083 1.5 maxv 3084 1.15 maxv if (__predict_true(is_long_mode(state))) { 3085 1.15 maxv if (seg == NVMM_X64_SEG_GS || seg == NVMM_X64_SEG_FS) { 3086 1.15 maxv segment_apply(&state->segs[seg], &gva); 3087 1.15 maxv } 3088 1.15 maxv } else { 3089 1.15 maxv ret = segment_check(&state->segs[seg], gva, size); 3090 1.5 maxv if (ret == -1) 3091 1.5 maxv return -1; 3092 1.15 maxv segment_apply(&state->segs[seg], &gva); 3093 1.5 maxv } 3094 1.5 maxv 3095 1.6 maxv *gvap = gva; 3096 1.6 maxv return 0; 3097 1.6 maxv } 3098 1.6 maxv 3099 1.6 maxv static int 3100 1.37 maxv fetch_segment(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) 3101 1.8 maxv { 3102 1.37 maxv struct nvmm_x64_state *state = vcpu->state; 3103 1.21 maxv uint8_t inst_bytes[5], byte; 3104 1.13 maxv size_t i, fetchsize; 3105 1.8 maxv gvaddr_t gva; 3106 1.8 maxv int ret, seg; 3107 1.8 maxv 3108 1.8 maxv fetchsize = sizeof(inst_bytes); 3109 1.8 maxv 3110 1.8 maxv gva = state->gprs[NVMM_X64_GPR_RIP]; 3111 1.15 maxv if (__predict_false(!is_long_mode(state))) { 3112 1.15 maxv ret = segment_check(&state->segs[NVMM_X64_SEG_CS], gva, 3113 1.8 maxv fetchsize); 3114 1.8 maxv if (ret == -1) 3115 1.8 maxv return -1; 3116 1.15 maxv segment_apply(&state->segs[NVMM_X64_SEG_CS], &gva); 3117 1.8 maxv } 3118 1.8 maxv 3119 1.37 maxv ret = read_guest_memory(mach, vcpu, gva, inst_bytes, fetchsize); 3120 1.8 maxv if (ret == -1) 3121 1.8 maxv return -1; 3122 1.8 maxv 3123 1.8 maxv seg = NVMM_X64_SEG_DS; 3124 1.13 maxv for (i = 0; i < fetchsize; i++) { 3125 1.13 maxv byte = inst_bytes[i]; 3126 1.13 maxv 3127 1.13 maxv if (byte == LEG_OVR_DS) { 3128 1.13 maxv seg = NVMM_X64_SEG_DS; 3129 1.13 maxv } else if (byte == LEG_OVR_ES) { 3130 1.13 maxv seg = NVMM_X64_SEG_ES; 3131 1.13 maxv } else if (byte == LEG_OVR_GS) { 3132 1.13 maxv seg = NVMM_X64_SEG_GS; 3133 1.13 maxv } else if (byte == LEG_OVR_FS) { 3134 1.13 maxv seg = NVMM_X64_SEG_FS; 3135 1.13 maxv } else if (byte == LEG_OVR_CS) { 3136 1.13 maxv seg = NVMM_X64_SEG_CS; 3137 1.13 maxv } else if (byte == LEG_OVR_SS) { 3138 1.13 maxv seg = NVMM_X64_SEG_SS; 3139 1.13 maxv } else if (byte == LEG_OPR_OVR) { 3140 1.13 maxv /* nothing */ 3141 1.13 maxv } else if (byte == LEG_ADR_OVR) { 3142 1.13 maxv /* nothing */ 3143 1.13 maxv } else if (byte == LEG_REP) { 3144 1.13 maxv /* nothing */ 3145 1.13 maxv } else if (byte == LEG_REPN) { 3146 1.13 maxv /* nothing */ 3147 1.13 maxv } else if (byte == LEG_LOCK) { 3148 1.13 maxv /* nothing */ 3149 1.13 maxv } else { 3150 1.13 maxv return seg; 3151 1.8 maxv } 3152 1.8 maxv } 3153 1.8 maxv 3154 1.8 maxv return seg; 3155 1.8 maxv } 3156 1.8 maxv 3157 1.8 maxv static int 3158 1.37 maxv fetch_instruction(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 3159 1.36 maxv struct nvmm_vcpu_exit *exit) 3160 1.5 maxv { 3161 1.37 maxv struct nvmm_x64_state *state = vcpu->state; 3162 1.6 maxv size_t fetchsize; 3163 1.6 maxv gvaddr_t gva; 3164 1.5 maxv int ret; 3165 1.5 maxv 3166 1.5 maxv fetchsize = sizeof(exit->u.mem.inst_bytes); 3167 1.5 maxv 3168 1.5 maxv gva = state->gprs[NVMM_X64_GPR_RIP]; 3169 1.15 maxv if (__predict_false(!is_long_mode(state))) { 3170 1.15 maxv ret = segment_check(&state->segs[NVMM_X64_SEG_CS], gva, 3171 1.5 maxv fetchsize); 3172 1.5 maxv if (ret == -1) 3173 1.5 maxv return -1; 3174 1.15 maxv segment_apply(&state->segs[NVMM_X64_SEG_CS], &gva); 3175 1.5 maxv } 3176 1.5 maxv 3177 1.37 maxv ret = read_guest_memory(mach, vcpu, gva, exit->u.mem.inst_bytes, 3178 1.6 maxv fetchsize); 3179 1.6 maxv if (ret == -1) 3180 1.6 maxv return -1; 3181 1.6 maxv 3182 1.6 maxv exit->u.mem.inst_len = fetchsize; 3183 1.6 maxv 3184 1.6 maxv return 0; 3185 1.6 maxv } 3186 1.6 maxv 3187 1.6 maxv static int 3188 1.43 reinoud assist_mem_movs(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 3189 1.6 maxv struct x86_instr *instr) 3190 1.6 maxv { 3191 1.37 maxv struct nvmm_x64_state *state = vcpu->state; 3192 1.43 reinoud uint64_t *gprs; 3193 1.6 maxv uint8_t data[8]; 3194 1.6 maxv gvaddr_t gva; 3195 1.6 maxv size_t size; 3196 1.6 maxv int ret; 3197 1.6 maxv 3198 1.6 maxv size = instr->operand_size; 3199 1.43 reinoud gprs = state->gprs; 3200 1.5 maxv 3201 1.6 maxv /* Source. */ 3202 1.6 maxv ret = store_to_gva(state, instr, &instr->src, &gva, size); 3203 1.5 maxv if (ret == -1) 3204 1.5 maxv return -1; 3205 1.37 maxv ret = read_guest_memory(mach, vcpu, gva, data, size); 3206 1.6 maxv if (ret == -1) 3207 1.5 maxv return -1; 3208 1.5 maxv 3209 1.6 maxv /* Destination. */ 3210 1.6 maxv ret = store_to_gva(state, instr, &instr->dst, &gva, size); 3211 1.6 maxv if (ret == -1) 3212 1.6 maxv return -1; 3213 1.37 maxv ret = write_guest_memory(mach, vcpu, gva, data, size); 3214 1.5 maxv if (ret == -1) 3215 1.5 maxv return -1; 3216 1.5 maxv 3217 1.43 reinoud /* 3218 1.43 reinoud * Inlined x86_func_movs() call 3219 1.43 reinoud * (*instr->emul->func)(vcpu, &mem, state->gprs); 3220 1.43 reinoud */ 3221 1.43 reinoud 3222 1.43 reinoud if (gprs[NVMM_X64_GPR_RFLAGS] & PSL_D) { 3223 1.43 reinoud gprs[NVMM_X64_GPR_RSI] -= size; 3224 1.43 reinoud gprs[NVMM_X64_GPR_RDI] -= size; 3225 1.43 reinoud } else { 3226 1.43 reinoud gprs[NVMM_X64_GPR_RSI] += size; 3227 1.43 reinoud gprs[NVMM_X64_GPR_RDI] += size; 3228 1.43 reinoud } 3229 1.43 reinoud 3230 1.43 reinoud return 0; 3231 1.43 reinoud } 3232 1.43 reinoud 3233 1.43 reinoud static int 3234 1.43 reinoud assist_mem_cmps(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 3235 1.43 reinoud struct x86_instr *instr) 3236 1.43 reinoud { 3237 1.43 reinoud struct nvmm_x64_state *state = vcpu->state; 3238 1.43 reinoud uint64_t *gprs, op1, op2, fl; 3239 1.43 reinoud uint8_t data1[8], data2[8]; 3240 1.43 reinoud gvaddr_t gva; 3241 1.43 reinoud size_t size; 3242 1.43 reinoud int ret; 3243 1.43 reinoud 3244 1.43 reinoud size = instr->operand_size; 3245 1.43 reinoud gprs = state->gprs; 3246 1.43 reinoud 3247 1.43 reinoud /* Source 1. */ 3248 1.43 reinoud ret = store_to_gva(state, instr, &instr->src, &gva, size); 3249 1.43 reinoud if (ret == -1) 3250 1.43 reinoud return -1; 3251 1.43 reinoud ret = read_guest_memory(mach, vcpu, gva, data1, size); 3252 1.43 reinoud if (ret == -1) 3253 1.43 reinoud return -1; 3254 1.43 reinoud 3255 1.43 reinoud /* Source 2. */ 3256 1.43 reinoud ret = store_to_gva(state, instr, &instr->dst, &gva, size); 3257 1.43 reinoud if (ret == -1) 3258 1.43 reinoud return -1; 3259 1.43 reinoud ret = read_guest_memory(mach, vcpu, gva, data2, size); 3260 1.43 reinoud if (ret == -1) 3261 1.43 reinoud return -1; 3262 1.43 reinoud 3263 1.43 reinoud /* 3264 1.43 reinoud * Inlined x86_func_cmps() call 3265 1.43 reinoud * (*instr->emul->func)(vcpu, &mem, state->gprs); 3266 1.43 reinoud */ 3267 1.43 reinoud 3268 1.43 reinoud /* Perform the CMP. */ 3269 1.43 reinoud op1 = *((uint64_t *) data1); 3270 1.43 reinoud op2 = *((uint64_t *) data2); 3271 1.43 reinoud exec_sub(op1, op2, &fl, size); 3272 1.43 reinoud 3273 1.43 reinoud gprs[NVMM_X64_GPR_RFLAGS] &= ~PSL_SUB_MASK; 3274 1.43 reinoud gprs[NVMM_X64_GPR_RFLAGS] |= (fl & PSL_SUB_MASK); 3275 1.43 reinoud 3276 1.43 reinoud if (gprs[NVMM_X64_GPR_RFLAGS] & PSL_D) { 3277 1.43 reinoud gprs[NVMM_X64_GPR_RSI] -= size; 3278 1.43 reinoud gprs[NVMM_X64_GPR_RDI] -= size; 3279 1.43 reinoud } else { 3280 1.43 reinoud gprs[NVMM_X64_GPR_RSI] += size; 3281 1.43 reinoud gprs[NVMM_X64_GPR_RDI] += size; 3282 1.43 reinoud } 3283 1.5 maxv 3284 1.5 maxv return 0; 3285 1.5 maxv } 3286 1.5 maxv 3287 1.6 maxv static int 3288 1.37 maxv assist_mem_single(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 3289 1.37 maxv struct x86_instr *instr) 3290 1.5 maxv { 3291 1.37 maxv struct nvmm_x64_state *state = vcpu->state; 3292 1.37 maxv struct nvmm_vcpu_exit *exit = vcpu->exit; 3293 1.5 maxv struct nvmm_mem mem; 3294 1.10 maxv uint8_t membuf[8]; 3295 1.5 maxv uint64_t val; 3296 1.5 maxv 3297 1.11 maxv memset(membuf, 0, sizeof(membuf)); 3298 1.12 maxv 3299 1.37 maxv mem.mach = mach; 3300 1.37 maxv mem.vcpu = vcpu; 3301 1.12 maxv mem.gpa = exit->u.mem.gpa; 3302 1.12 maxv mem.size = instr->operand_size; 3303 1.10 maxv mem.data = membuf; 3304 1.5 maxv 3305 1.12 maxv /* Determine the direction. */ 3306 1.6 maxv switch (instr->src.type) { 3307 1.5 maxv case STORE_REG: 3308 1.6 maxv if (instr->src.disp.type != DISP_NONE) { 3309 1.5 maxv /* Indirect access. */ 3310 1.5 maxv mem.write = false; 3311 1.5 maxv } else { 3312 1.5 maxv /* Direct access. */ 3313 1.5 maxv mem.write = true; 3314 1.5 maxv } 3315 1.5 maxv break; 3316 1.32 maxv case STORE_DUALREG: 3317 1.32 maxv if (instr->src.disp.type == DISP_NONE) { 3318 1.32 maxv DISASSEMBLER_BUG(); 3319 1.32 maxv } 3320 1.32 maxv mem.write = false; 3321 1.32 maxv break; 3322 1.5 maxv case STORE_IMM: 3323 1.5 maxv mem.write = true; 3324 1.5 maxv break; 3325 1.5 maxv case STORE_SIB: 3326 1.5 maxv mem.write = false; 3327 1.5 maxv break; 3328 1.5 maxv case STORE_DMO: 3329 1.5 maxv mem.write = false; 3330 1.5 maxv break; 3331 1.5 maxv default: 3332 1.12 maxv DISASSEMBLER_BUG(); 3333 1.5 maxv } 3334 1.5 maxv 3335 1.12 maxv if (mem.write) { 3336 1.12 maxv switch (instr->src.type) { 3337 1.12 maxv case STORE_REG: 3338 1.33 maxv /* The instruction was "reg -> mem". Fetch the register 3339 1.33 maxv * in membuf. */ 3340 1.33 maxv if (__predict_false(instr->src.disp.type != DISP_NONE)) { 3341 1.5 maxv DISASSEMBLER_BUG(); 3342 1.5 maxv } 3343 1.12 maxv val = state->gprs[instr->src.u.reg->num]; 3344 1.12 maxv val = __SHIFTOUT(val, instr->src.u.reg->mask); 3345 1.12 maxv memcpy(mem.data, &val, mem.size); 3346 1.12 maxv break; 3347 1.12 maxv case STORE_IMM: 3348 1.33 maxv /* The instruction was "imm -> mem". Fetch the immediate 3349 1.33 maxv * in membuf. */ 3350 1.12 maxv memcpy(mem.data, &instr->src.u.imm.data, mem.size); 3351 1.12 maxv break; 3352 1.12 maxv default: 3353 1.5 maxv DISASSEMBLER_BUG(); 3354 1.5 maxv } 3355 1.33 maxv } else if (instr->emul->readreg) { 3356 1.33 maxv /* The instruction was "mem -> reg", but the value of the 3357 1.33 maxv * register matters for the emul func. Fetch it in membuf. */ 3358 1.33 maxv if (__predict_false(instr->dst.type != STORE_REG)) { 3359 1.19 maxv DISASSEMBLER_BUG(); 3360 1.19 maxv } 3361 1.33 maxv if (__predict_false(instr->dst.disp.type != DISP_NONE)) { 3362 1.19 maxv DISASSEMBLER_BUG(); 3363 1.19 maxv } 3364 1.19 maxv val = state->gprs[instr->dst.u.reg->num]; 3365 1.19 maxv val = __SHIFTOUT(val, instr->dst.u.reg->mask); 3366 1.19 maxv memcpy(mem.data, &val, mem.size); 3367 1.5 maxv } 3368 1.5 maxv 3369 1.37 maxv (*instr->emul->func)(vcpu, &mem, state->gprs); 3370 1.5 maxv 3371 1.33 maxv if (instr->emul->notouch) { 3372 1.33 maxv /* We're done. */ 3373 1.33 maxv return 0; 3374 1.33 maxv } 3375 1.33 maxv 3376 1.33 maxv if (!mem.write) { 3377 1.33 maxv /* The instruction was "mem -> reg". The emul func has filled 3378 1.33 maxv * membuf with the memory content. Install membuf in the 3379 1.33 maxv * register. */ 3380 1.33 maxv if (__predict_false(instr->dst.type != STORE_REG)) { 3381 1.33 maxv DISASSEMBLER_BUG(); 3382 1.33 maxv } 3383 1.33 maxv if (__predict_false(instr->dst.disp.type != DISP_NONE)) { 3384 1.12 maxv DISASSEMBLER_BUG(); 3385 1.12 maxv } 3386 1.19 maxv memcpy(&val, membuf, sizeof(uint64_t)); 3387 1.6 maxv val = __SHIFTIN(val, instr->dst.u.reg->mask); 3388 1.6 maxv state->gprs[instr->dst.u.reg->num] &= ~instr->dst.u.reg->mask; 3389 1.6 maxv state->gprs[instr->dst.u.reg->num] |= val; 3390 1.10 maxv state->gprs[instr->dst.u.reg->num] &= ~instr->zeroextend_mask; 3391 1.33 maxv } else if (instr->emul->backprop) { 3392 1.33 maxv /* The instruction was "reg -> mem", but the memory must be 3393 1.33 maxv * back-propagated to the register. Install membuf in the 3394 1.33 maxv * register. */ 3395 1.33 maxv if (__predict_false(instr->src.type != STORE_REG)) { 3396 1.33 maxv DISASSEMBLER_BUG(); 3397 1.33 maxv } 3398 1.33 maxv if (__predict_false(instr->src.disp.type != DISP_NONE)) { 3399 1.33 maxv DISASSEMBLER_BUG(); 3400 1.33 maxv } 3401 1.33 maxv memcpy(&val, membuf, sizeof(uint64_t)); 3402 1.33 maxv val = __SHIFTIN(val, instr->src.u.reg->mask); 3403 1.33 maxv state->gprs[instr->src.u.reg->num] &= ~instr->src.u.reg->mask; 3404 1.33 maxv state->gprs[instr->src.u.reg->num] |= val; 3405 1.33 maxv state->gprs[instr->src.u.reg->num] &= ~instr->zeroextend_mask; 3406 1.6 maxv } 3407 1.6 maxv 3408 1.6 maxv return 0; 3409 1.6 maxv } 3410 1.6 maxv 3411 1.6 maxv int 3412 1.31 maxv nvmm_assist_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) 3413 1.6 maxv { 3414 1.31 maxv struct nvmm_x64_state *state = vcpu->state; 3415 1.36 maxv struct nvmm_vcpu_exit *exit = vcpu->exit; 3416 1.6 maxv struct x86_instr instr; 3417 1.15 maxv uint64_t cnt = 0; /* GCC */ 3418 1.6 maxv int ret; 3419 1.6 maxv 3420 1.36 maxv if (__predict_false(exit->reason != NVMM_VCPU_EXIT_MEMORY)) { 3421 1.6 maxv errno = EINVAL; 3422 1.6 maxv return -1; 3423 1.6 maxv } 3424 1.6 maxv 3425 1.31 maxv ret = nvmm_vcpu_getstate(mach, vcpu, 3426 1.15 maxv NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS | 3427 1.15 maxv NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS); 3428 1.6 maxv if (ret == -1) 3429 1.6 maxv return -1; 3430 1.6 maxv 3431 1.6 maxv if (exit->u.mem.inst_len == 0) { 3432 1.6 maxv /* 3433 1.6 maxv * The instruction was not fetched from the kernel. Fetch 3434 1.6 maxv * it ourselves. 3435 1.6 maxv */ 3436 1.37 maxv ret = fetch_instruction(mach, vcpu, exit); 3437 1.6 maxv if (ret == -1) 3438 1.6 maxv return -1; 3439 1.6 maxv } 3440 1.6 maxv 3441 1.6 maxv ret = x86_decode(exit->u.mem.inst_bytes, exit->u.mem.inst_len, 3442 1.31 maxv &instr, state); 3443 1.6 maxv if (ret == -1) { 3444 1.6 maxv errno = ENODEV; 3445 1.6 maxv return -1; 3446 1.6 maxv } 3447 1.6 maxv 3448 1.15 maxv if (instr.legpref.rep || instr.legpref.repn) { 3449 1.31 maxv cnt = rep_get_cnt(state, instr.address_size); 3450 1.15 maxv if (__predict_false(cnt == 0)) { 3451 1.31 maxv state->gprs[NVMM_X64_GPR_RIP] += instr.len; 3452 1.15 maxv goto out; 3453 1.15 maxv } 3454 1.15 maxv } 3455 1.15 maxv 3456 1.6 maxv if (instr.opcode->movs) { 3457 1.43 reinoud ret = assist_mem_movs(mach, vcpu, &instr); 3458 1.43 reinoud } else if (instr.opcode->cmps) { 3459 1.43 reinoud instr.legpref.repe = !instr.legpref.repn; 3460 1.43 reinoud ret = assist_mem_cmps(mach, vcpu, &instr); 3461 1.6 maxv } else { 3462 1.37 maxv ret = assist_mem_single(mach, vcpu, &instr); 3463 1.6 maxv } 3464 1.6 maxv if (ret == -1) { 3465 1.6 maxv errno = ENODEV; 3466 1.6 maxv return -1; 3467 1.5 maxv } 3468 1.5 maxv 3469 1.14 maxv if (instr.legpref.rep || instr.legpref.repn) { 3470 1.15 maxv cnt -= 1; 3471 1.31 maxv rep_set_cnt(state, instr.address_size, cnt); 3472 1.6 maxv if (cnt == 0) { 3473 1.31 maxv state->gprs[NVMM_X64_GPR_RIP] += instr.len; 3474 1.14 maxv } else if (__predict_false(instr.legpref.repn)) { 3475 1.43 reinoud /* repn */ 3476 1.31 maxv if (state->gprs[NVMM_X64_GPR_RFLAGS] & PSL_Z) { 3477 1.31 maxv state->gprs[NVMM_X64_GPR_RIP] += instr.len; 3478 1.14 maxv } 3479 1.43 reinoud } else if (__predict_false(instr.legpref.repe)) { 3480 1.43 reinoud /* repe */ 3481 1.43 reinoud if ((state->gprs[NVMM_X64_GPR_RFLAGS] & PSL_Z) == 0) { 3482 1.43 reinoud state->gprs[NVMM_X64_GPR_RIP] += instr.len; 3483 1.43 reinoud } 3484 1.5 maxv } 3485 1.5 maxv } else { 3486 1.31 maxv state->gprs[NVMM_X64_GPR_RIP] += instr.len; 3487 1.5 maxv } 3488 1.5 maxv 3489 1.15 maxv out: 3490 1.31 maxv ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS); 3491 1.5 maxv if (ret == -1) 3492 1.5 maxv return -1; 3493 1.5 maxv 3494 1.5 maxv return 0; 3495 1.1 maxv } 3496