vm_event.h revision 1.1.1.1 1 /******************************************************************************
2 * vm_event.h
3 *
4 * Memory event common structures.
5 *
6 * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27 #ifndef _XEN_PUBLIC_VM_EVENT_H
28 #define _XEN_PUBLIC_VM_EVENT_H
29
30 #include "xen.h"
31
32 #define VM_EVENT_INTERFACE_VERSION 0x00000003
33
34 #if defined(__XEN__) || defined(__XEN_TOOLS__)
35
36 #include "io/ring.h"
37
38 /*
39 * Memory event flags
40 */
41
42 /*
43 * VCPU_PAUSED in a request signals that the vCPU triggering the event has been
44 * paused
45 * VCPU_PAUSED in a response signals to unpause the vCPU
46 */
47 #define VM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
48 /* Flags to aid debugging vm_event */
49 #define VM_EVENT_FLAG_FOREIGN (1 << 1)
50 /*
51 * The following flags can be set in response to a mem_access event.
52 *
53 * Emulate the fault-causing instruction (if set in the event response flags).
54 * This will allow the guest to continue execution without lifting the page
55 * access restrictions.
56 */
57 #define VM_EVENT_FLAG_EMULATE (1 << 2)
58 /*
59 * Same as VM_EVENT_FLAG_EMULATE, but with write operations or operations
60 * potentially having side effects (like memory mapped or port I/O) disabled.
61 */
62 #define VM_EVENT_FLAG_EMULATE_NOWRITE (1 << 3)
63 /*
64 * Toggle singlestepping on vm_event response.
65 * Requires the vCPU to be paused already (synchronous events only).
66 */
67 #define VM_EVENT_FLAG_TOGGLE_SINGLESTEP (1 << 4)
68 /*
69 * Data is being sent back to the hypervisor in the event response, to be
70 * returned by the read function when emulating an instruction.
71 * This flag is only useful when combined with VM_EVENT_FLAG_EMULATE
72 * and takes precedence if combined with VM_EVENT_FLAG_EMULATE_NOWRITE
73 * (i.e. if both VM_EVENT_FLAG_EMULATE_NOWRITE and
74 * VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
75 */
76 #define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
77 /*
78 * Deny completion of the operation that triggered the event.
79 * Currently only useful for MSR and control-register write events.
80 * Requires the vCPU to be paused already (synchronous events only).
81 */
82 #define VM_EVENT_FLAG_DENY (1 << 6)
83 /*
84 * This flag can be set in a request or a response
85 *
86 * On a request, indicates that the event occurred in the alternate p2m
87 * specified by the altp2m_idx request field.
88 *
89 * On a response, indicates that the VCPU should resume in the alternate p2m
90 * specified by the altp2m_idx response field if possible.
91 */
92 #define VM_EVENT_FLAG_ALTERNATE_P2M (1 << 7)
93 /*
94 * Set the vCPU registers to the values in the vm_event response.
95 * At the moment x86-only, applies to EAX-EDX, ESP, EBP, ESI, EDI, R8-R15,
96 * EFLAGS, and EIP.
97 * Requires the vCPU to be paused already (synchronous events only).
98 */
99 #define VM_EVENT_FLAG_SET_REGISTERS (1 << 8)
100 /*
101 * Instruction cache is being sent back to the hypervisor in the event response
102 * to be used by the emulator. This flag is only useful when combined with
103 * VM_EVENT_FLAG_EMULATE and does not take presedence if combined with
104 * VM_EVENT_FLAG_EMULATE_NOWRITE or VM_EVENT_FLAG_SET_EMUL_READ_DATA, (i.e.
105 * if any of those flags are set, only those will be honored).
106 */
107 #define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9)
108 /*
109 * Have a one-shot VM_EVENT_REASON_INTERRUPT event sent for the first
110 * interrupt pending after resuming the VCPU.
111 */
112 #define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
113
114 /*
115 * Reasons for the vm event request
116 */
117
118 /* Default case */
119 #define VM_EVENT_REASON_UNKNOWN 0
120 /* Memory access violation */
121 #define VM_EVENT_REASON_MEM_ACCESS 1
122 /* Memory sharing event */
123 #define VM_EVENT_REASON_MEM_SHARING 2
124 /* Memory paging event */
125 #define VM_EVENT_REASON_MEM_PAGING 3
126 /* A control register was updated */
127 #define VM_EVENT_REASON_WRITE_CTRLREG 4
128 /* An MSR was updated. */
129 #define VM_EVENT_REASON_MOV_TO_MSR 5
130 /* Debug operation executed (e.g. int3) */
131 #define VM_EVENT_REASON_SOFTWARE_BREAKPOINT 6
132 /* Single-step (e.g. MTF) */
133 #define VM_EVENT_REASON_SINGLESTEP 7
134 /* An event has been requested via HVMOP_guest_request_vm_event. */
135 #define VM_EVENT_REASON_GUEST_REQUEST 8
136 /* A debug exception was caught */
137 #define VM_EVENT_REASON_DEBUG_EXCEPTION 9
138 /* CPUID executed */
139 #define VM_EVENT_REASON_CPUID 10
140 /*
141 * Privileged call executed (e.g. SMC).
142 * Note: event may be generated even if SMC condition check fails on some CPUs.
143 * As this behavior is CPU-specific, users are advised to not rely on it.
144 * These kinds of events will be filtered out in future versions.
145 */
146 #define VM_EVENT_REASON_PRIVILEGED_CALL 11
147 /* An interrupt has been delivered. */
148 #define VM_EVENT_REASON_INTERRUPT 12
149 /* A descriptor table register was accessed. */
150 #define VM_EVENT_REASON_DESCRIPTOR_ACCESS 13
151 /* Current instruction is not implemented by the emulator */
152 #define VM_EVENT_REASON_EMUL_UNIMPLEMENTED 14
153
154 /* Supported values for the vm_event_write_ctrlreg index. */
155 #define VM_EVENT_X86_CR0 0
156 #define VM_EVENT_X86_CR3 1
157 #define VM_EVENT_X86_CR4 2
158 #define VM_EVENT_X86_XCR0 3
159
160 /*
161 * Using custom vCPU structs (i.e. not hvm_hw_cpu) for both x86 and ARM
162 * so as to not fill the vm_event ring buffer too quickly.
163 */
164 struct vm_event_regs_x86 {
165 uint64_t rax;
166 uint64_t rcx;
167 uint64_t rdx;
168 uint64_t rbx;
169 uint64_t rsp;
170 uint64_t rbp;
171 uint64_t rsi;
172 uint64_t rdi;
173 uint64_t r8;
174 uint64_t r9;
175 uint64_t r10;
176 uint64_t r11;
177 uint64_t r12;
178 uint64_t r13;
179 uint64_t r14;
180 uint64_t r15;
181 uint64_t rflags;
182 uint64_t dr7;
183 uint64_t rip;
184 uint64_t cr0;
185 uint64_t cr2;
186 uint64_t cr3;
187 uint64_t cr4;
188 uint64_t sysenter_cs;
189 uint64_t sysenter_esp;
190 uint64_t sysenter_eip;
191 uint64_t msr_efer;
192 uint64_t msr_star;
193 uint64_t msr_lstar;
194 uint64_t fs_base;
195 uint64_t gs_base;
196 uint32_t cs_arbytes;
197 uint32_t _pad;
198 };
199
200 /*
201 * Only the register 'pc' can be set on a vm_event response using the
202 * VM_EVENT_FLAG_SET_REGISTERS flag.
203 */
204 struct vm_event_regs_arm {
205 uint64_t ttbr0;
206 uint64_t ttbr1;
207 uint64_t ttbcr;
208 uint64_t pc;
209 uint32_t cpsr;
210 uint32_t _pad;
211 };
212
213 /*
214 * mem_access flag definitions
215 *
216 * These flags are set only as part of a mem_event request.
217 *
218 * R/W/X: Defines the type of violation that has triggered the event
219 * Multiple types can be set in a single violation!
220 * GLA_VALID: If the gla field holds a guest VA associated with the event
221 * FAULT_WITH_GLA: If the violation was triggered by accessing gla
222 * FAULT_IN_GPT: If the violation was triggered during translating gla
223 */
224 #define MEM_ACCESS_R (1 << 0)
225 #define MEM_ACCESS_W (1 << 1)
226 #define MEM_ACCESS_X (1 << 2)
227 #define MEM_ACCESS_RWX (MEM_ACCESS_R | MEM_ACCESS_W | MEM_ACCESS_X)
228 #define MEM_ACCESS_RW (MEM_ACCESS_R | MEM_ACCESS_W)
229 #define MEM_ACCESS_RX (MEM_ACCESS_R | MEM_ACCESS_X)
230 #define MEM_ACCESS_WX (MEM_ACCESS_W | MEM_ACCESS_X)
231 #define MEM_ACCESS_GLA_VALID (1 << 3)
232 #define MEM_ACCESS_FAULT_WITH_GLA (1 << 4)
233 #define MEM_ACCESS_FAULT_IN_GPT (1 << 5)
234
235 struct vm_event_mem_access {
236 uint64_t gfn;
237 uint64_t offset;
238 uint64_t gla; /* if flags has MEM_ACCESS_GLA_VALID set */
239 uint32_t flags; /* MEM_ACCESS_* */
240 uint32_t _pad;
241 };
242
243 struct vm_event_write_ctrlreg {
244 uint32_t index;
245 uint32_t _pad;
246 uint64_t new_value;
247 uint64_t old_value;
248 };
249
250 struct vm_event_singlestep {
251 uint64_t gfn;
252 };
253
254 struct vm_event_debug {
255 uint64_t gfn;
256 uint32_t insn_length;
257 uint8_t type; /* HVMOP_TRAP_* */
258 uint8_t _pad[3];
259 };
260
261 struct vm_event_mov_to_msr {
262 uint64_t msr;
263 uint64_t new_value;
264 uint64_t old_value;
265 };
266
267 #define VM_EVENT_DESC_IDTR 1
268 #define VM_EVENT_DESC_GDTR 2
269 #define VM_EVENT_DESC_LDTR 3
270 #define VM_EVENT_DESC_TR 4
271
272 struct vm_event_desc_access {
273 union {
274 struct {
275 uint32_t instr_info; /* VMX: VMCS Instruction-Information */
276 uint32_t _pad1;
277 uint64_t exit_qualification; /* VMX: VMCS Exit Qualification */
278 } vmx;
279 struct {
280 uint64_t exitinfo; /* SVM: VMCB EXITINFO */
281 uint64_t _pad2;
282 } svm;
283 } arch;
284 uint8_t descriptor; /* VM_EVENT_DESC_* */
285 uint8_t is_write;
286 uint8_t _pad[6];
287 };
288
289 struct vm_event_cpuid {
290 uint32_t insn_length;
291 uint32_t leaf;
292 uint32_t subleaf;
293 uint32_t _pad;
294 };
295
296 struct vm_event_interrupt_x86 {
297 uint32_t vector;
298 uint32_t type;
299 uint32_t error_code;
300 uint32_t _pad;
301 uint64_t cr2;
302 };
303
304 #define MEM_PAGING_DROP_PAGE (1 << 0)
305 #define MEM_PAGING_EVICT_FAIL (1 << 1)
306
307 struct vm_event_paging {
308 uint64_t gfn;
309 uint32_t p2mt;
310 uint32_t flags;
311 };
312
313 struct vm_event_sharing {
314 uint64_t gfn;
315 uint32_t p2mt;
316 uint32_t _pad;
317 };
318
319 struct vm_event_emul_read_data {
320 uint32_t size;
321 /* The struct is used in a union with vm_event_regs_x86. */
322 uint8_t data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
323 };
324
325 struct vm_event_emul_insn_data {
326 uint8_t data[16]; /* Has to be completely filled */
327 };
328
329 typedef struct vm_event_st {
330 uint32_t version; /* VM_EVENT_INTERFACE_VERSION */
331 uint32_t flags; /* VM_EVENT_FLAG_* */
332 uint32_t reason; /* VM_EVENT_REASON_* */
333 uint32_t vcpu_id;
334 uint16_t altp2m_idx; /* may be used during request and response */
335 uint16_t _pad[3];
336
337 union {
338 struct vm_event_paging mem_paging;
339 struct vm_event_sharing mem_sharing;
340 struct vm_event_mem_access mem_access;
341 struct vm_event_write_ctrlreg write_ctrlreg;
342 struct vm_event_mov_to_msr mov_to_msr;
343 struct vm_event_desc_access desc_access;
344 struct vm_event_singlestep singlestep;
345 struct vm_event_debug software_breakpoint;
346 struct vm_event_debug debug_exception;
347 struct vm_event_cpuid cpuid;
348 union {
349 struct vm_event_interrupt_x86 x86;
350 } interrupt;
351 } u;
352
353 union {
354 union {
355 struct vm_event_regs_x86 x86;
356 struct vm_event_regs_arm arm;
357 } regs;
358
359 union {
360 struct vm_event_emul_read_data read;
361 struct vm_event_emul_insn_data insn;
362 } emul;
363 } data;
364 } vm_event_request_t, vm_event_response_t;
365
366 DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
367
368 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
369 #endif /* _XEN_PUBLIC_VM_EVENT_H */
370
371 /*
372 * Local variables:
373 * mode: C
374 * c-file-style: "BSD"
375 * c-basic-offset: 4
376 * tab-width: 4
377 * indent-tabs-mode: nil
378 * End:
379 */
380