1 1.1 cherry /****************************************************************************** 2 1.1 cherry * vm_event.h 3 1.1 cherry * 4 1.1 cherry * Memory event common structures. 5 1.1 cherry * 6 1.1 cherry * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp) 7 1.1 cherry * 8 1.1 cherry * Permission is hereby granted, free of charge, to any person obtaining a copy 9 1.1 cherry * of this software and associated documentation files (the "Software"), to 10 1.1 cherry * deal in the Software without restriction, including without limitation the 11 1.1 cherry * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 12 1.1 cherry * sell copies of the Software, and to permit persons to whom the Software is 13 1.1 cherry * furnished to do so, subject to the following conditions: 14 1.1 cherry * 15 1.1 cherry * The above copyright notice and this permission notice shall be included in 16 1.1 cherry * all copies or substantial portions of the Software. 17 1.1 cherry * 18 1.1 cherry * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 1.1 cherry * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 1.1 cherry * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 1.1 cherry * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 1.1 cherry * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 1.1 cherry * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 1.1 cherry * DEALINGS IN THE SOFTWARE. 25 1.1 cherry */ 26 1.1 cherry 27 1.1 cherry #ifndef _XEN_PUBLIC_VM_EVENT_H 28 1.1 cherry #define _XEN_PUBLIC_VM_EVENT_H 29 1.1 cherry 30 1.1 cherry #include "xen.h" 31 1.1 cherry 32 1.1 cherry #define VM_EVENT_INTERFACE_VERSION 0x00000003 33 1.1 cherry 34 1.1 cherry #if defined(__XEN__) || defined(__XEN_TOOLS__) 35 1.1 cherry 36 1.1 cherry #include "io/ring.h" 37 1.1 cherry 38 1.1 cherry /* 39 1.1 cherry * Memory event flags 40 1.1 cherry */ 41 1.1 cherry 42 1.1 cherry /* 43 1.1 cherry * VCPU_PAUSED in a request signals that the vCPU triggering the event has been 44 1.1 cherry * paused 45 1.1 cherry * VCPU_PAUSED in a response signals to unpause the vCPU 46 1.1 cherry */ 47 1.1 cherry #define VM_EVENT_FLAG_VCPU_PAUSED (1 << 0) 48 1.1 cherry /* Flags to aid debugging vm_event */ 49 1.1 cherry #define VM_EVENT_FLAG_FOREIGN (1 << 1) 50 1.1 cherry /* 51 1.1 cherry * The following flags can be set in response to a mem_access event. 52 1.1 cherry * 53 1.1 cherry * Emulate the fault-causing instruction (if set in the event response flags). 54 1.1 cherry * This will allow the guest to continue execution without lifting the page 55 1.1 cherry * access restrictions. 56 1.1 cherry */ 57 1.1 cherry #define VM_EVENT_FLAG_EMULATE (1 << 2) 58 1.1 cherry /* 59 1.1 cherry * Same as VM_EVENT_FLAG_EMULATE, but with write operations or operations 60 1.1 cherry * potentially having side effects (like memory mapped or port I/O) disabled. 61 1.1 cherry */ 62 1.1 cherry #define VM_EVENT_FLAG_EMULATE_NOWRITE (1 << 3) 63 1.1 cherry /* 64 1.1 cherry * Toggle singlestepping on vm_event response. 65 1.1 cherry * Requires the vCPU to be paused already (synchronous events only). 66 1.1 cherry */ 67 1.1 cherry #define VM_EVENT_FLAG_TOGGLE_SINGLESTEP (1 << 4) 68 1.1 cherry /* 69 1.1 cherry * Data is being sent back to the hypervisor in the event response, to be 70 1.1 cherry * returned by the read function when emulating an instruction. 71 1.1 cherry * This flag is only useful when combined with VM_EVENT_FLAG_EMULATE 72 1.1 cherry * and takes precedence if combined with VM_EVENT_FLAG_EMULATE_NOWRITE 73 1.1 cherry * (i.e. if both VM_EVENT_FLAG_EMULATE_NOWRITE and 74 1.1 cherry * VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored). 75 1.1 cherry */ 76 1.1 cherry #define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5) 77 1.1 cherry /* 78 1.1 cherry * Deny completion of the operation that triggered the event. 79 1.1 cherry * Currently only useful for MSR and control-register write events. 80 1.1 cherry * Requires the vCPU to be paused already (synchronous events only). 81 1.1 cherry */ 82 1.1 cherry #define VM_EVENT_FLAG_DENY (1 << 6) 83 1.1 cherry /* 84 1.1 cherry * This flag can be set in a request or a response 85 1.1 cherry * 86 1.1 cherry * On a request, indicates that the event occurred in the alternate p2m 87 1.1 cherry * specified by the altp2m_idx request field. 88 1.1 cherry * 89 1.1 cherry * On a response, indicates that the VCPU should resume in the alternate p2m 90 1.1 cherry * specified by the altp2m_idx response field if possible. 91 1.1 cherry */ 92 1.1 cherry #define VM_EVENT_FLAG_ALTERNATE_P2M (1 << 7) 93 1.1 cherry /* 94 1.1 cherry * Set the vCPU registers to the values in the vm_event response. 95 1.1 cherry * At the moment x86-only, applies to EAX-EDX, ESP, EBP, ESI, EDI, R8-R15, 96 1.1 cherry * EFLAGS, and EIP. 97 1.1 cherry * Requires the vCPU to be paused already (synchronous events only). 98 1.1 cherry */ 99 1.1 cherry #define VM_EVENT_FLAG_SET_REGISTERS (1 << 8) 100 1.1 cherry /* 101 1.1 cherry * Instruction cache is being sent back to the hypervisor in the event response 102 1.1 cherry * to be used by the emulator. This flag is only useful when combined with 103 1.1 cherry * VM_EVENT_FLAG_EMULATE and does not take presedence if combined with 104 1.1 cherry * VM_EVENT_FLAG_EMULATE_NOWRITE or VM_EVENT_FLAG_SET_EMUL_READ_DATA, (i.e. 105 1.1 cherry * if any of those flags are set, only those will be honored). 106 1.1 cherry */ 107 1.1 cherry #define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9) 108 1.1 cherry /* 109 1.1 cherry * Have a one-shot VM_EVENT_REASON_INTERRUPT event sent for the first 110 1.1 cherry * interrupt pending after resuming the VCPU. 111 1.1 cherry */ 112 1.1 cherry #define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10) 113 1.1 cherry 114 1.1 cherry /* 115 1.1 cherry * Reasons for the vm event request 116 1.1 cherry */ 117 1.1 cherry 118 1.1 cherry /* Default case */ 119 1.1 cherry #define VM_EVENT_REASON_UNKNOWN 0 120 1.1 cherry /* Memory access violation */ 121 1.1 cherry #define VM_EVENT_REASON_MEM_ACCESS 1 122 1.1 cherry /* Memory sharing event */ 123 1.1 cherry #define VM_EVENT_REASON_MEM_SHARING 2 124 1.1 cherry /* Memory paging event */ 125 1.1 cherry #define VM_EVENT_REASON_MEM_PAGING 3 126 1.1 cherry /* A control register was updated */ 127 1.1 cherry #define VM_EVENT_REASON_WRITE_CTRLREG 4 128 1.1 cherry /* An MSR was updated. */ 129 1.1 cherry #define VM_EVENT_REASON_MOV_TO_MSR 5 130 1.1 cherry /* Debug operation executed (e.g. int3) */ 131 1.1 cherry #define VM_EVENT_REASON_SOFTWARE_BREAKPOINT 6 132 1.1 cherry /* Single-step (e.g. MTF) */ 133 1.1 cherry #define VM_EVENT_REASON_SINGLESTEP 7 134 1.1 cherry /* An event has been requested via HVMOP_guest_request_vm_event. */ 135 1.1 cherry #define VM_EVENT_REASON_GUEST_REQUEST 8 136 1.1 cherry /* A debug exception was caught */ 137 1.1 cherry #define VM_EVENT_REASON_DEBUG_EXCEPTION 9 138 1.1 cherry /* CPUID executed */ 139 1.1 cherry #define VM_EVENT_REASON_CPUID 10 140 1.1 cherry /* 141 1.1 cherry * Privileged call executed (e.g. SMC). 142 1.1 cherry * Note: event may be generated even if SMC condition check fails on some CPUs. 143 1.1 cherry * As this behavior is CPU-specific, users are advised to not rely on it. 144 1.1 cherry * These kinds of events will be filtered out in future versions. 145 1.1 cherry */ 146 1.1 cherry #define VM_EVENT_REASON_PRIVILEGED_CALL 11 147 1.1 cherry /* An interrupt has been delivered. */ 148 1.1 cherry #define VM_EVENT_REASON_INTERRUPT 12 149 1.1 cherry /* A descriptor table register was accessed. */ 150 1.1 cherry #define VM_EVENT_REASON_DESCRIPTOR_ACCESS 13 151 1.1 cherry /* Current instruction is not implemented by the emulator */ 152 1.1 cherry #define VM_EVENT_REASON_EMUL_UNIMPLEMENTED 14 153 1.1 cherry 154 1.1 cherry /* Supported values for the vm_event_write_ctrlreg index. */ 155 1.1 cherry #define VM_EVENT_X86_CR0 0 156 1.1 cherry #define VM_EVENT_X86_CR3 1 157 1.1 cherry #define VM_EVENT_X86_CR4 2 158 1.1 cherry #define VM_EVENT_X86_XCR0 3 159 1.1 cherry 160 1.1 cherry /* 161 1.1 cherry * Using custom vCPU structs (i.e. not hvm_hw_cpu) for both x86 and ARM 162 1.1 cherry * so as to not fill the vm_event ring buffer too quickly. 163 1.1 cherry */ 164 1.1 cherry struct vm_event_regs_x86 { 165 1.1 cherry uint64_t rax; 166 1.1 cherry uint64_t rcx; 167 1.1 cherry uint64_t rdx; 168 1.1 cherry uint64_t rbx; 169 1.1 cherry uint64_t rsp; 170 1.1 cherry uint64_t rbp; 171 1.1 cherry uint64_t rsi; 172 1.1 cherry uint64_t rdi; 173 1.1 cherry uint64_t r8; 174 1.1 cherry uint64_t r9; 175 1.1 cherry uint64_t r10; 176 1.1 cherry uint64_t r11; 177 1.1 cherry uint64_t r12; 178 1.1 cherry uint64_t r13; 179 1.1 cherry uint64_t r14; 180 1.1 cherry uint64_t r15; 181 1.1 cherry uint64_t rflags; 182 1.1 cherry uint64_t dr7; 183 1.1 cherry uint64_t rip; 184 1.1 cherry uint64_t cr0; 185 1.1 cherry uint64_t cr2; 186 1.1 cherry uint64_t cr3; 187 1.1 cherry uint64_t cr4; 188 1.1 cherry uint64_t sysenter_cs; 189 1.1 cherry uint64_t sysenter_esp; 190 1.1 cherry uint64_t sysenter_eip; 191 1.1 cherry uint64_t msr_efer; 192 1.1 cherry uint64_t msr_star; 193 1.1 cherry uint64_t msr_lstar; 194 1.1 cherry uint64_t fs_base; 195 1.1 cherry uint64_t gs_base; 196 1.1 cherry uint32_t cs_arbytes; 197 1.1 cherry uint32_t _pad; 198 1.1 cherry }; 199 1.1 cherry 200 1.1 cherry /* 201 1.1 cherry * Only the register 'pc' can be set on a vm_event response using the 202 1.1 cherry * VM_EVENT_FLAG_SET_REGISTERS flag. 203 1.1 cherry */ 204 1.1 cherry struct vm_event_regs_arm { 205 1.1 cherry uint64_t ttbr0; 206 1.1 cherry uint64_t ttbr1; 207 1.1 cherry uint64_t ttbcr; 208 1.1 cherry uint64_t pc; 209 1.1 cherry uint32_t cpsr; 210 1.1 cherry uint32_t _pad; 211 1.1 cherry }; 212 1.1 cherry 213 1.1 cherry /* 214 1.1 cherry * mem_access flag definitions 215 1.1 cherry * 216 1.1 cherry * These flags are set only as part of a mem_event request. 217 1.1 cherry * 218 1.1 cherry * R/W/X: Defines the type of violation that has triggered the event 219 1.1 cherry * Multiple types can be set in a single violation! 220 1.1 cherry * GLA_VALID: If the gla field holds a guest VA associated with the event 221 1.1 cherry * FAULT_WITH_GLA: If the violation was triggered by accessing gla 222 1.1 cherry * FAULT_IN_GPT: If the violation was triggered during translating gla 223 1.1 cherry */ 224 1.1 cherry #define MEM_ACCESS_R (1 << 0) 225 1.1 cherry #define MEM_ACCESS_W (1 << 1) 226 1.1 cherry #define MEM_ACCESS_X (1 << 2) 227 1.1 cherry #define MEM_ACCESS_RWX (MEM_ACCESS_R | MEM_ACCESS_W | MEM_ACCESS_X) 228 1.1 cherry #define MEM_ACCESS_RW (MEM_ACCESS_R | MEM_ACCESS_W) 229 1.1 cherry #define MEM_ACCESS_RX (MEM_ACCESS_R | MEM_ACCESS_X) 230 1.1 cherry #define MEM_ACCESS_WX (MEM_ACCESS_W | MEM_ACCESS_X) 231 1.1 cherry #define MEM_ACCESS_GLA_VALID (1 << 3) 232 1.1 cherry #define MEM_ACCESS_FAULT_WITH_GLA (1 << 4) 233 1.1 cherry #define MEM_ACCESS_FAULT_IN_GPT (1 << 5) 234 1.1 cherry 235 1.1 cherry struct vm_event_mem_access { 236 1.1 cherry uint64_t gfn; 237 1.1 cherry uint64_t offset; 238 1.1 cherry uint64_t gla; /* if flags has MEM_ACCESS_GLA_VALID set */ 239 1.1 cherry uint32_t flags; /* MEM_ACCESS_* */ 240 1.1 cherry uint32_t _pad; 241 1.1 cherry }; 242 1.1 cherry 243 1.1 cherry struct vm_event_write_ctrlreg { 244 1.1 cherry uint32_t index; 245 1.1 cherry uint32_t _pad; 246 1.1 cherry uint64_t new_value; 247 1.1 cherry uint64_t old_value; 248 1.1 cherry }; 249 1.1 cherry 250 1.1 cherry struct vm_event_singlestep { 251 1.1 cherry uint64_t gfn; 252 1.1 cherry }; 253 1.1 cherry 254 1.1 cherry struct vm_event_debug { 255 1.1 cherry uint64_t gfn; 256 1.1 cherry uint32_t insn_length; 257 1.1 cherry uint8_t type; /* HVMOP_TRAP_* */ 258 1.1 cherry uint8_t _pad[3]; 259 1.1 cherry }; 260 1.1 cherry 261 1.1 cherry struct vm_event_mov_to_msr { 262 1.1 cherry uint64_t msr; 263 1.1 cherry uint64_t new_value; 264 1.1 cherry uint64_t old_value; 265 1.1 cherry }; 266 1.1 cherry 267 1.1 cherry #define VM_EVENT_DESC_IDTR 1 268 1.1 cherry #define VM_EVENT_DESC_GDTR 2 269 1.1 cherry #define VM_EVENT_DESC_LDTR 3 270 1.1 cherry #define VM_EVENT_DESC_TR 4 271 1.1 cherry 272 1.1 cherry struct vm_event_desc_access { 273 1.1 cherry union { 274 1.1 cherry struct { 275 1.1 cherry uint32_t instr_info; /* VMX: VMCS Instruction-Information */ 276 1.1 cherry uint32_t _pad1; 277 1.1 cherry uint64_t exit_qualification; /* VMX: VMCS Exit Qualification */ 278 1.1 cherry } vmx; 279 1.1 cherry struct { 280 1.1 cherry uint64_t exitinfo; /* SVM: VMCB EXITINFO */ 281 1.1 cherry uint64_t _pad2; 282 1.1 cherry } svm; 283 1.1 cherry } arch; 284 1.1 cherry uint8_t descriptor; /* VM_EVENT_DESC_* */ 285 1.1 cherry uint8_t is_write; 286 1.1 cherry uint8_t _pad[6]; 287 1.1 cherry }; 288 1.1 cherry 289 1.1 cherry struct vm_event_cpuid { 290 1.1 cherry uint32_t insn_length; 291 1.1 cherry uint32_t leaf; 292 1.1 cherry uint32_t subleaf; 293 1.1 cherry uint32_t _pad; 294 1.1 cherry }; 295 1.1 cherry 296 1.1 cherry struct vm_event_interrupt_x86 { 297 1.1 cherry uint32_t vector; 298 1.1 cherry uint32_t type; 299 1.1 cherry uint32_t error_code; 300 1.1 cherry uint32_t _pad; 301 1.1 cherry uint64_t cr2; 302 1.1 cherry }; 303 1.1 cherry 304 1.1 cherry #define MEM_PAGING_DROP_PAGE (1 << 0) 305 1.1 cherry #define MEM_PAGING_EVICT_FAIL (1 << 1) 306 1.1 cherry 307 1.1 cherry struct vm_event_paging { 308 1.1 cherry uint64_t gfn; 309 1.1 cherry uint32_t p2mt; 310 1.1 cherry uint32_t flags; 311 1.1 cherry }; 312 1.1 cherry 313 1.1 cherry struct vm_event_sharing { 314 1.1 cherry uint64_t gfn; 315 1.1 cherry uint32_t p2mt; 316 1.1 cherry uint32_t _pad; 317 1.1 cherry }; 318 1.1 cherry 319 1.1 cherry struct vm_event_emul_read_data { 320 1.1 cherry uint32_t size; 321 1.1 cherry /* The struct is used in a union with vm_event_regs_x86. */ 322 1.1 cherry uint8_t data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)]; 323 1.1 cherry }; 324 1.1 cherry 325 1.1 cherry struct vm_event_emul_insn_data { 326 1.1 cherry uint8_t data[16]; /* Has to be completely filled */ 327 1.1 cherry }; 328 1.1 cherry 329 1.1 cherry typedef struct vm_event_st { 330 1.1 cherry uint32_t version; /* VM_EVENT_INTERFACE_VERSION */ 331 1.1 cherry uint32_t flags; /* VM_EVENT_FLAG_* */ 332 1.1 cherry uint32_t reason; /* VM_EVENT_REASON_* */ 333 1.1 cherry uint32_t vcpu_id; 334 1.1 cherry uint16_t altp2m_idx; /* may be used during request and response */ 335 1.1 cherry uint16_t _pad[3]; 336 1.1 cherry 337 1.1 cherry union { 338 1.1 cherry struct vm_event_paging mem_paging; 339 1.1 cherry struct vm_event_sharing mem_sharing; 340 1.1 cherry struct vm_event_mem_access mem_access; 341 1.1 cherry struct vm_event_write_ctrlreg write_ctrlreg; 342 1.1 cherry struct vm_event_mov_to_msr mov_to_msr; 343 1.1 cherry struct vm_event_desc_access desc_access; 344 1.1 cherry struct vm_event_singlestep singlestep; 345 1.1 cherry struct vm_event_debug software_breakpoint; 346 1.1 cherry struct vm_event_debug debug_exception; 347 1.1 cherry struct vm_event_cpuid cpuid; 348 1.1 cherry union { 349 1.1 cherry struct vm_event_interrupt_x86 x86; 350 1.1 cherry } interrupt; 351 1.1 cherry } u; 352 1.1 cherry 353 1.1 cherry union { 354 1.1 cherry union { 355 1.1 cherry struct vm_event_regs_x86 x86; 356 1.1 cherry struct vm_event_regs_arm arm; 357 1.1 cherry } regs; 358 1.1 cherry 359 1.1 cherry union { 360 1.1 cherry struct vm_event_emul_read_data read; 361 1.1 cherry struct vm_event_emul_insn_data insn; 362 1.1 cherry } emul; 363 1.1 cherry } data; 364 1.1 cherry } vm_event_request_t, vm_event_response_t; 365 1.1 cherry 366 1.1 cherry DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t); 367 1.1 cherry 368 1.1 cherry #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ 369 1.1 cherry #endif /* _XEN_PUBLIC_VM_EVENT_H */ 370 1.1 cherry 371 1.1 cherry /* 372 1.1 cherry * Local variables: 373 1.1 cherry * mode: C 374 1.1 cherry * c-file-style: "BSD" 375 1.1 cherry * c-basic-offset: 4 376 1.1 cherry * tab-width: 4 377 1.1 cherry * indent-tabs-mode: nil 378 1.1 cherry * End: 379 1.1 cherry */ 380