1 1.1 cherry /* 2 1.1 cherry * Permission is hereby granted, free of charge, to any person obtaining a copy 3 1.1 cherry * of this software and associated documentation files (the "Software"), to 4 1.1 cherry * deal in the Software without restriction, including without limitation the 5 1.1 cherry * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 6 1.1 cherry * sell copies of the Software, and to permit persons to whom the Software is 7 1.1 cherry * furnished to do so, subject to the following conditions: 8 1.1 cherry * 9 1.1 cherry * The above copyright notice and this permission notice shall be included in 10 1.1 cherry * all copies or substantial portions of the Software. 11 1.1 cherry * 12 1.1 cherry * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 1.1 cherry * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 1.1 cherry * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 15 1.1 cherry * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 16 1.1 cherry * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 17 1.1 cherry * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 18 1.1 cherry * DEALINGS IN THE SOFTWARE. 19 1.1 cherry * 20 1.1 cherry * Copyright (c) 2015 Oracle and/or its affiliates. All rights reserved. 21 1.1 cherry */ 22 1.1 cherry 23 1.1 cherry #ifndef __XEN_PUBLIC_ARCH_X86_PMU_H__ 24 1.1 cherry #define __XEN_PUBLIC_ARCH_X86_PMU_H__ 25 1.1 cherry 26 1.1 cherry /* x86-specific PMU definitions */ 27 1.1 cherry 28 1.1 cherry /* AMD PMU registers and structures */ 29 1.1 cherry struct xen_pmu_amd_ctxt { 30 1.1 cherry /* 31 1.1 cherry * Offsets to counter and control MSRs (relative to xen_pmu_arch.c.amd). 32 1.1 cherry * For PV(H) guests these fields are RO. 33 1.1 cherry */ 34 1.1 cherry uint32_t counters; 35 1.1 cherry uint32_t ctrls; 36 1.1 cherry 37 1.1 cherry /* Counter MSRs */ 38 1.1 cherry #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L 39 1.1 cherry uint64_t regs[]; 40 1.1 cherry #elif defined(__GNUC__) 41 1.1 cherry uint64_t regs[0]; 42 1.1 cherry #endif 43 1.1 cherry }; 44 1.1 cherry typedef struct xen_pmu_amd_ctxt xen_pmu_amd_ctxt_t; 45 1.1 cherry DEFINE_XEN_GUEST_HANDLE(xen_pmu_amd_ctxt_t); 46 1.1 cherry 47 1.1 cherry /* Intel PMU registers and structures */ 48 1.1 cherry struct xen_pmu_cntr_pair { 49 1.1 cherry uint64_t counter; 50 1.1 cherry uint64_t control; 51 1.1 cherry }; 52 1.1 cherry typedef struct xen_pmu_cntr_pair xen_pmu_cntr_pair_t; 53 1.1 cherry DEFINE_XEN_GUEST_HANDLE(xen_pmu_cntr_pair_t); 54 1.1 cherry 55 1.1 cherry struct xen_pmu_intel_ctxt { 56 1.1 cherry /* 57 1.1 cherry * Offsets to fixed and architectural counter MSRs (relative to 58 1.1 cherry * xen_pmu_arch.c.intel). 59 1.1 cherry * For PV(H) guests these fields are RO. 60 1.1 cherry */ 61 1.1 cherry uint32_t fixed_counters; 62 1.1 cherry uint32_t arch_counters; 63 1.1 cherry 64 1.1 cherry /* PMU registers */ 65 1.1 cherry uint64_t global_ctrl; 66 1.1 cherry uint64_t global_ovf_ctrl; 67 1.1 cherry uint64_t global_status; 68 1.1 cherry uint64_t fixed_ctrl; 69 1.1 cherry uint64_t ds_area; 70 1.1 cherry uint64_t pebs_enable; 71 1.1 cherry uint64_t debugctl; 72 1.1 cherry 73 1.1 cherry /* Fixed and architectural counter MSRs */ 74 1.1 cherry #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L 75 1.1 cherry uint64_t regs[]; 76 1.1 cherry #elif defined(__GNUC__) 77 1.1 cherry uint64_t regs[0]; 78 1.1 cherry #endif 79 1.1 cherry }; 80 1.1 cherry typedef struct xen_pmu_intel_ctxt xen_pmu_intel_ctxt_t; 81 1.1 cherry DEFINE_XEN_GUEST_HANDLE(xen_pmu_intel_ctxt_t); 82 1.1 cherry 83 1.1 cherry /* Sampled domain's registers */ 84 1.1 cherry struct xen_pmu_regs { 85 1.1 cherry uint64_t ip; 86 1.1 cherry uint64_t sp; 87 1.1 cherry uint64_t flags; 88 1.1 cherry uint16_t cs; 89 1.1 cherry uint16_t ss; 90 1.1 cherry uint8_t cpl; 91 1.1 cherry uint8_t pad[3]; 92 1.1 cherry }; 93 1.1 cherry typedef struct xen_pmu_regs xen_pmu_regs_t; 94 1.1 cherry DEFINE_XEN_GUEST_HANDLE(xen_pmu_regs_t); 95 1.1 cherry 96 1.1 cherry /* PMU flags */ 97 1.1 cherry #define PMU_CACHED (1<<0) /* PMU MSRs are cached in the context */ 98 1.1 cherry #define PMU_SAMPLE_USER (1<<1) /* Sample is from user or kernel mode */ 99 1.1 cherry #define PMU_SAMPLE_REAL (1<<2) /* Sample is from realmode */ 100 1.1 cherry #define PMU_SAMPLE_PV (1<<3) /* Sample from a PV guest */ 101 1.1 cherry 102 1.1 cherry /* 103 1.1 cherry * Architecture-specific information describing state of the processor at 104 1.1 cherry * the time of PMU interrupt. 105 1.1 cherry * Fields of this structure marked as RW for guest should only be written by 106 1.1 cherry * the guest when PMU_CACHED bit in pmu_flags is set (which is done by the 107 1.1 cherry * hypervisor during PMU interrupt). Hypervisor will read updated data in 108 1.1 cherry * XENPMU_flush hypercall and clear PMU_CACHED bit. 109 1.1 cherry */ 110 1.1 cherry struct xen_pmu_arch { 111 1.1 cherry union { 112 1.1 cherry /* 113 1.1 cherry * Processor's registers at the time of interrupt. 114 1.1 cherry * WO for hypervisor, RO for guests. 115 1.1 cherry */ 116 1.1 cherry struct xen_pmu_regs regs; 117 1.1 cherry /* Padding for adding new registers to xen_pmu_regs in the future */ 118 1.1 cherry #define XENPMU_REGS_PAD_SZ 64 119 1.1 cherry uint8_t pad[XENPMU_REGS_PAD_SZ]; 120 1.1 cherry } r; 121 1.1 cherry 122 1.1 cherry /* WO for hypervisor, RO for guest */ 123 1.1 cherry uint64_t pmu_flags; 124 1.1 cherry 125 1.1 cherry /* 126 1.1 cherry * APIC LVTPC register. 127 1.1 cherry * RW for both hypervisor and guest. 128 1.1 cherry * Only APIC_LVT_MASKED bit is loaded by the hypervisor into hardware 129 1.1 cherry * during XENPMU_flush or XENPMU_lvtpc_set. 130 1.1 cherry */ 131 1.1 cherry union { 132 1.1 cherry uint32_t lapic_lvtpc; 133 1.1 cherry uint64_t pad; 134 1.1 cherry } l; 135 1.1 cherry 136 1.1 cherry /* 137 1.1 cherry * Vendor-specific PMU registers. 138 1.1 cherry * RW for both hypervisor and guest (see exceptions above). 139 1.1 cherry * Guest's updates to this field are verified and then loaded by the 140 1.1 cherry * hypervisor into hardware during XENPMU_flush 141 1.1 cherry */ 142 1.1 cherry union { 143 1.1 cherry struct xen_pmu_amd_ctxt amd; 144 1.1 cherry struct xen_pmu_intel_ctxt intel; 145 1.1 cherry 146 1.1 cherry /* 147 1.1 cherry * Padding for contexts (fixed parts only, does not include MSR banks 148 1.1 cherry * that are specified by offsets) 149 1.1 cherry */ 150 1.1 cherry #define XENPMU_CTXT_PAD_SZ 128 151 1.1 cherry uint8_t pad[XENPMU_CTXT_PAD_SZ]; 152 1.1 cherry } c; 153 1.1 cherry }; 154 1.1 cherry typedef struct xen_pmu_arch xen_pmu_arch_t; 155 1.1 cherry DEFINE_XEN_GUEST_HANDLE(xen_pmu_arch_t); 156 1.1 cherry 157 1.1 cherry #endif /* __XEN_PUBLIC_ARCH_X86_PMU_H__ */ 158 1.1 cherry /* 159 1.1 cherry * Local variables: 160 1.1 cherry * mode: C 161 1.1 cherry * c-file-style: "BSD" 162 1.1 cherry * c-basic-offset: 4 163 1.1 cherry * tab-width: 4 164 1.1 cherry * indent-tabs-mode: nil 165 1.1 cherry * End: 166 1.1 cherry */ 167 1.1 cherry 168