tprof_armv8.c revision 1.13 1 1.13 skrll /* $NetBSD: tprof_armv8.c,v 1.13 2021/12/03 10:54:19 skrll Exp $ */
2 1.1 jmcneill
3 1.1 jmcneill /*-
4 1.1 jmcneill * Copyright (c) 2018 Jared McNeill <jmcneill (at) invisible.ca>
5 1.1 jmcneill * All rights reserved.
6 1.1 jmcneill *
7 1.1 jmcneill * Redistribution and use in source and binary forms, with or without
8 1.1 jmcneill * modification, are permitted provided that the following conditions
9 1.1 jmcneill * are met:
10 1.1 jmcneill * 1. Redistributions of source code must retain the above copyright
11 1.1 jmcneill * notice, this list of conditions and the following disclaimer.
12 1.1 jmcneill * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 jmcneill * notice, this list of conditions and the following disclaimer in the
14 1.1 jmcneill * documentation and/or other materials provided with the distribution.
15 1.1 jmcneill *
16 1.1 jmcneill * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.1 jmcneill * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.1 jmcneill * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.1 jmcneill * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.1 jmcneill * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 1.1 jmcneill * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 1.1 jmcneill * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 1.1 jmcneill * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 1.1 jmcneill * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 jmcneill * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 jmcneill * SUCH DAMAGE.
27 1.1 jmcneill */
28 1.1 jmcneill
29 1.1 jmcneill #include <sys/cdefs.h>
30 1.13 skrll __KERNEL_RCSID(0, "$NetBSD: tprof_armv8.c,v 1.13 2021/12/03 10:54:19 skrll Exp $");
31 1.1 jmcneill
32 1.1 jmcneill #include <sys/param.h>
33 1.1 jmcneill #include <sys/bus.h>
34 1.1 jmcneill #include <sys/cpu.h>
35 1.1 jmcneill #include <sys/xcall.h>
36 1.1 jmcneill
37 1.1 jmcneill #include <dev/tprof/tprof.h>
38 1.1 jmcneill
39 1.1 jmcneill #include <arm/armreg.h>
40 1.6 skrll #include <arm/cpufunc.h>
41 1.1 jmcneill
42 1.1 jmcneill #include <dev/tprof/tprof_armv8.h>
43 1.1 jmcneill
44 1.1 jmcneill static tprof_param_t armv8_pmu_param;
45 1.11 skrll static const u_int armv8_pmu_counter = 0;
46 1.1 jmcneill static uint32_t counter_val;
47 1.1 jmcneill static uint32_t counter_reset_val;
48 1.1 jmcneill
49 1.1 jmcneill static bool
50 1.1 jmcneill armv8_pmu_event_implemented(uint16_t event)
51 1.1 jmcneill {
52 1.1 jmcneill uint64_t eid[2];
53 1.1 jmcneill
54 1.1 jmcneill if (event >= 64)
55 1.1 jmcneill return false;
56 1.1 jmcneill
57 1.1 jmcneill eid[0] = reg_pmceid0_el0_read();
58 1.1 jmcneill eid[1] = reg_pmceid1_el0_read();
59 1.1 jmcneill
60 1.13 skrll /* The low 32bits of PMCEID[01]_EL0 contain the common events 0 to n */
61 1.1 jmcneill const u_int idx = event / 32;
62 1.1 jmcneill const u_int bit = event % 32;
63 1.1 jmcneill
64 1.1 jmcneill if (eid[idx] & __BIT(bit))
65 1.1 jmcneill return true;
66 1.1 jmcneill
67 1.1 jmcneill return false;
68 1.1 jmcneill }
69 1.1 jmcneill
70 1.1 jmcneill static void
71 1.1 jmcneill armv8_pmu_set_pmevtyper(u_int counter, uint64_t val)
72 1.1 jmcneill {
73 1.1 jmcneill reg_pmselr_el0_write(counter);
74 1.6 skrll isb();
75 1.1 jmcneill reg_pmxevtyper_el0_write(val);
76 1.1 jmcneill }
77 1.1 jmcneill
78 1.1 jmcneill static void
79 1.1 jmcneill armv8_pmu_set_pmevcntr(u_int counter, uint32_t val)
80 1.1 jmcneill {
81 1.1 jmcneill reg_pmselr_el0_write(counter);
82 1.6 skrll isb();
83 1.1 jmcneill reg_pmxevcntr_el0_write(val);
84 1.1 jmcneill }
85 1.1 jmcneill
86 1.1 jmcneill static void
87 1.1 jmcneill armv8_pmu_start_cpu(void *arg1, void *arg2)
88 1.1 jmcneill {
89 1.1 jmcneill const uint32_t counter_mask = __BIT(armv8_pmu_counter);
90 1.5 jmcneill uint64_t pmevtyper;
91 1.1 jmcneill
92 1.1 jmcneill /* Disable event counter */
93 1.1 jmcneill reg_pmcntenclr_el0_write(counter_mask);
94 1.1 jmcneill
95 1.1 jmcneill /* Configure event counter */
96 1.1 jmcneill pmevtyper = __SHIFTIN(armv8_pmu_param.p_event, PMEVTYPER_EVTCOUNT);
97 1.1 jmcneill if (!ISSET(armv8_pmu_param.p_flags, TPROF_PARAM_USER))
98 1.1 jmcneill pmevtyper |= PMEVTYPER_U;
99 1.1 jmcneill if (!ISSET(armv8_pmu_param.p_flags, TPROF_PARAM_KERN))
100 1.1 jmcneill pmevtyper |= PMEVTYPER_P;
101 1.1 jmcneill
102 1.1 jmcneill armv8_pmu_set_pmevtyper(armv8_pmu_counter, pmevtyper);
103 1.1 jmcneill
104 1.1 jmcneill /* Enable overflow interrupts */
105 1.1 jmcneill reg_pmintenset_el1_write(counter_mask);
106 1.1 jmcneill
107 1.1 jmcneill /* Clear overflow flag */
108 1.1 jmcneill reg_pmovsclr_el0_write(counter_mask);
109 1.1 jmcneill
110 1.1 jmcneill /* Initialize event counter value */
111 1.1 jmcneill armv8_pmu_set_pmevcntr(armv8_pmu_counter, counter_reset_val);
112 1.1 jmcneill
113 1.1 jmcneill /* Enable event counter */
114 1.1 jmcneill reg_pmcntenset_el0_write(counter_mask);
115 1.7 jmcneill reg_pmcr_el0_write(PMCR_E);
116 1.1 jmcneill }
117 1.1 jmcneill
118 1.1 jmcneill static void
119 1.1 jmcneill armv8_pmu_stop_cpu(void *arg1, void *arg2)
120 1.1 jmcneill {
121 1.1 jmcneill const uint32_t counter_mask = __BIT(armv8_pmu_counter);
122 1.1 jmcneill
123 1.1 jmcneill /* Disable overflow interrupts */
124 1.1 jmcneill reg_pmintenclr_el1_write(counter_mask);
125 1.1 jmcneill
126 1.1 jmcneill /* Disable event counter */
127 1.1 jmcneill reg_pmcntenclr_el0_write(counter_mask);
128 1.7 jmcneill reg_pmcr_el0_write(0);
129 1.1 jmcneill }
130 1.1 jmcneill
131 1.1 jmcneill static uint64_t
132 1.1 jmcneill armv8_pmu_estimate_freq(void)
133 1.1 jmcneill {
134 1.1 jmcneill uint64_t cpufreq = curcpu()->ci_data.cpu_cc_freq;
135 1.1 jmcneill uint64_t freq = 10000;
136 1.1 jmcneill
137 1.1 jmcneill counter_val = cpufreq / freq;
138 1.1 jmcneill if (counter_val == 0)
139 1.1 jmcneill counter_val = 4000000000ULL / freq;
140 1.1 jmcneill
141 1.1 jmcneill return freq;
142 1.1 jmcneill }
143 1.1 jmcneill
144 1.1 jmcneill static uint32_t
145 1.1 jmcneill armv8_pmu_ident(void)
146 1.1 jmcneill {
147 1.1 jmcneill return TPROF_IDENT_ARMV8_GENERIC;
148 1.1 jmcneill }
149 1.1 jmcneill
150 1.1 jmcneill static int
151 1.1 jmcneill armv8_pmu_start(const tprof_param_t *param)
152 1.1 jmcneill {
153 1.9 skrll /* PMCR.N of 0 means that no event counters are available */
154 1.9 skrll if (__SHIFTOUT(reg_pmcr_el0_read(), PMCR_N) == 0) {
155 1.9 skrll return EINVAL;
156 1.9 skrll }
157 1.1 jmcneill
158 1.1 jmcneill if (!armv8_pmu_event_implemented(param->p_event)) {
159 1.4 christos printf("%s: event %#" PRIx64 " not implemented on this CPU\n",
160 1.1 jmcneill __func__, param->p_event);
161 1.1 jmcneill return EINVAL;
162 1.1 jmcneill }
163 1.1 jmcneill
164 1.1 jmcneill counter_reset_val = -counter_val + 1;
165 1.1 jmcneill
166 1.1 jmcneill armv8_pmu_param = *param;
167 1.10 christos uint64_t xc = xc_broadcast(0, armv8_pmu_start_cpu, NULL, NULL);
168 1.1 jmcneill xc_wait(xc);
169 1.1 jmcneill
170 1.1 jmcneill return 0;
171 1.1 jmcneill }
172 1.1 jmcneill
173 1.1 jmcneill static void
174 1.1 jmcneill armv8_pmu_stop(const tprof_param_t *param)
175 1.1 jmcneill {
176 1.1 jmcneill uint64_t xc;
177 1.1 jmcneill
178 1.1 jmcneill xc = xc_broadcast(0, armv8_pmu_stop_cpu, NULL, NULL);
179 1.1 jmcneill xc_wait(xc);
180 1.1 jmcneill }
181 1.1 jmcneill
182 1.1 jmcneill static const tprof_backend_ops_t tprof_armv8_pmu_ops = {
183 1.1 jmcneill .tbo_estimate_freq = armv8_pmu_estimate_freq,
184 1.1 jmcneill .tbo_ident = armv8_pmu_ident,
185 1.1 jmcneill .tbo_start = armv8_pmu_start,
186 1.1 jmcneill .tbo_stop = armv8_pmu_stop,
187 1.1 jmcneill };
188 1.1 jmcneill
189 1.1 jmcneill int
190 1.1 jmcneill armv8_pmu_intr(void *priv)
191 1.1 jmcneill {
192 1.1 jmcneill const struct trapframe * const tf = priv;
193 1.1 jmcneill const uint32_t counter_mask = __BIT(armv8_pmu_counter);
194 1.1 jmcneill tprof_frame_info_t tfi;
195 1.1 jmcneill
196 1.1 jmcneill const uint32_t pmovs = reg_pmovsset_el0_read();
197 1.1 jmcneill if ((pmovs & counter_mask) != 0) {
198 1.1 jmcneill tfi.tfi_pc = tf->tf_pc;
199 1.1 jmcneill tfi.tfi_inkernel = tfi.tfi_pc >= VM_MIN_KERNEL_ADDRESS &&
200 1.1 jmcneill tfi.tfi_pc < VM_MAX_KERNEL_ADDRESS;
201 1.1 jmcneill tprof_sample(NULL, &tfi);
202 1.1 jmcneill
203 1.1 jmcneill armv8_pmu_set_pmevcntr(armv8_pmu_counter, counter_reset_val);
204 1.1 jmcneill }
205 1.1 jmcneill reg_pmovsclr_el0_write(pmovs);
206 1.1 jmcneill
207 1.1 jmcneill return 1;
208 1.1 jmcneill }
209 1.1 jmcneill
210 1.7 jmcneill static void
211 1.7 jmcneill armv8_pmu_init_cpu(void *arg1, void *arg2)
212 1.8 skrll {
213 1.3 jmcneill /* Disable EL0 access to performance monitors */
214 1.2 jmcneill reg_pmuserenr_el0_write(0);
215 1.2 jmcneill
216 1.2 jmcneill /* Disable interrupts */
217 1.2 jmcneill reg_pmintenclr_el1_write(~0U);
218 1.2 jmcneill
219 1.5 jmcneill /* Disable event counters */
220 1.5 jmcneill reg_pmcntenclr_el0_write(PMCNTEN_P);
221 1.7 jmcneill }
222 1.7 jmcneill
223 1.7 jmcneill int
224 1.7 jmcneill armv8_pmu_init(void)
225 1.7 jmcneill {
226 1.12 skrll uint64_t xc = xc_broadcast(0, armv8_pmu_init_cpu, NULL, NULL);
227 1.7 jmcneill xc_wait(xc);
228 1.2 jmcneill
229 1.1 jmcneill return tprof_backend_register("tprof_armv8", &tprof_armv8_pmu_ops,
230 1.1 jmcneill TPROF_BACKEND_VERSION);
231 1.1 jmcneill }
232