trap.c revision 1.48 1 1.48 riastrad /* $NetBSD: trap.c,v 1.48 2023/02/25 00:40:22 riastradh Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by Matt Thomas of 3am Software Foundry.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.1 matt
32 1.1 matt #include <sys/cdefs.h>
33 1.1 matt
34 1.48 riastrad __KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.48 2023/02/25 00:40:22 riastradh Exp $");
35 1.1 matt
36 1.4 ryo #include "opt_arm_intr_impl.h"
37 1.4 ryo #include "opt_compat_netbsd32.h"
38 1.22 jmcneill #include "opt_dtrace.h"
39 1.4 ryo
40 1.1 matt #include <sys/param.h>
41 1.8 ryo #include <sys/kauth.h>
42 1.1 matt #include <sys/types.h>
43 1.4 ryo #include <sys/atomic.h>
44 1.1 matt #include <sys/cpu.h>
45 1.28 ryo #include <sys/evcnt.h>
46 1.4 ryo #ifdef KDB
47 1.4 ryo #include <sys/kdb.h>
48 1.4 ryo #endif
49 1.3 nisimura #include <sys/proc.h>
50 1.3 nisimura #include <sys/systm.h>
51 1.3 nisimura #include <sys/signal.h>
52 1.3 nisimura #include <sys/signalvar.h>
53 1.3 nisimura #include <sys/siginfo.h>
54 1.28 ryo #include <sys/xcall.h>
55 1.1 matt
56 1.4 ryo #ifdef ARM_INTR_IMPL
57 1.4 ryo #include ARM_INTR_IMPL
58 1.4 ryo #else
59 1.4 ryo #error ARM_INTR_IMPL not defined
60 1.4 ryo #endif
61 1.4 ryo
62 1.4 ryo #ifndef ARM_IRQ_HANDLER
63 1.4 ryo #error ARM_IRQ_HANDLER not defined
64 1.4 ryo #endif
65 1.4 ryo
66 1.39 skrll #include <arm/cpufunc.h>
67 1.39 skrll
68 1.4 ryo #include <aarch64/userret.h>
69 1.4 ryo #include <aarch64/frame.h>
70 1.4 ryo #include <aarch64/machdep.h>
71 1.4 ryo #include <aarch64/armreg.h>
72 1.1 matt #include <aarch64/locore.h>
73 1.1 matt
74 1.41 skrll #include <arm/cpufunc.h>
75 1.41 skrll
76 1.4 ryo #ifdef KDB
77 1.4 ryo #include <machine/db_machdep.h>
78 1.4 ryo #endif
79 1.4 ryo #ifdef DDB
80 1.4 ryo #include <ddb/db_output.h>
81 1.4 ryo #include <machine/db_machdep.h>
82 1.4 ryo #endif
83 1.22 jmcneill #ifdef KDTRACE_HOOKS
84 1.22 jmcneill #include <sys/dtrace_bsd.h>
85 1.22 jmcneill #endif
86 1.4 ryo
87 1.8 ryo #ifdef DDB
88 1.8 ryo int sigill_debug = 0;
89 1.8 ryo #endif
90 1.4 ryo
91 1.22 jmcneill #ifdef KDTRACE_HOOKS
92 1.22 jmcneill dtrace_doubletrap_func_t dtrace_doubletrap_func = NULL;
93 1.22 jmcneill dtrace_trap_func_t dtrace_trap_func = NULL;
94 1.22 jmcneill int (*dtrace_invop_jump_addr)(struct trapframe *);
95 1.22 jmcneill #endif
96 1.22 jmcneill
97 1.28 ryo enum emul_arm_result {
98 1.28 ryo EMUL_ARM_SUCCESS = 0,
99 1.28 ryo EMUL_ARM_UNKNOWN,
100 1.28 ryo EMUL_ARM_FAULT,
101 1.28 ryo };
102 1.28 ryo
103 1.4 ryo const char * const trap_names[] = {
104 1.4 ryo [ESR_EC_UNKNOWN] = "Unknown Reason (Illegal Instruction)",
105 1.4 ryo [ESR_EC_SERROR] = "SError Interrupt",
106 1.4 ryo [ESR_EC_WFX] = "WFI or WFE instruction execution",
107 1.4 ryo [ESR_EC_ILL_STATE] = "Illegal Execution State",
108 1.4 ryo
109 1.25 maxv [ESR_EC_BTE_A64] = "Branch Target Exception",
110 1.25 maxv
111 1.4 ryo [ESR_EC_SYS_REG] = "MSR/MRS/SYS instruction",
112 1.4 ryo [ESR_EC_SVC_A64] = "SVC Instruction Execution",
113 1.4 ryo [ESR_EC_HVC_A64] = "HVC Instruction Execution",
114 1.4 ryo [ESR_EC_SMC_A64] = "SMC Instruction Execution",
115 1.4 ryo
116 1.4 ryo [ESR_EC_INSN_ABT_EL0] = "Instruction Abort (EL0)",
117 1.4 ryo [ESR_EC_INSN_ABT_EL1] = "Instruction Abort (EL1)",
118 1.4 ryo [ESR_EC_DATA_ABT_EL0] = "Data Abort (EL0)",
119 1.4 ryo [ESR_EC_DATA_ABT_EL1] = "Data Abort (EL1)",
120 1.4 ryo
121 1.4 ryo [ESR_EC_PC_ALIGNMENT] = "Misaligned PC",
122 1.4 ryo [ESR_EC_SP_ALIGNMENT] = "Misaligned SP",
123 1.4 ryo
124 1.4 ryo [ESR_EC_FP_ACCESS] = "Access to SIMD/FP Registers",
125 1.4 ryo [ESR_EC_FP_TRAP_A64] = "FP Exception",
126 1.4 ryo
127 1.4 ryo [ESR_EC_BRKPNT_EL0] = "Breakpoint Exception (EL0)",
128 1.4 ryo [ESR_EC_BRKPNT_EL1] = "Breakpoint Exception (EL1)",
129 1.4 ryo [ESR_EC_SW_STEP_EL0] = "Software Step (EL0)",
130 1.4 ryo [ESR_EC_SW_STEP_EL1] = "Software Step (EL1)",
131 1.4 ryo [ESR_EC_WTCHPNT_EL0] = "Watchpoint (EL0)",
132 1.4 ryo [ESR_EC_WTCHPNT_EL1] = "Watchpoint (EL1)",
133 1.4 ryo [ESR_EC_BKPT_INSN_A64] = "BKPT Instruction Execution",
134 1.4 ryo
135 1.4 ryo [ESR_EC_CP15_RT] = "A32: MCR/MRC access to CP15",
136 1.4 ryo [ESR_EC_CP15_RRT] = "A32: MCRR/MRRC access to CP15",
137 1.4 ryo [ESR_EC_CP14_RT] = "A32: MCR/MRC access to CP14",
138 1.4 ryo [ESR_EC_CP14_DT] = "A32: LDC/STC access to CP14",
139 1.4 ryo [ESR_EC_CP14_RRT] = "A32: MRRC access to CP14",
140 1.4 ryo [ESR_EC_SVC_A32] = "A32: SVC Instruction Execution",
141 1.4 ryo [ESR_EC_HVC_A32] = "A32: HVC Instruction Execution",
142 1.4 ryo [ESR_EC_SMC_A32] = "A32: SMC Instruction Execution",
143 1.4 ryo [ESR_EC_FPID] = "A32: MCR/MRC access to CP10",
144 1.4 ryo [ESR_EC_FP_TRAP_A32] = "A32: FP Exception",
145 1.4 ryo [ESR_EC_BKPT_INSN_A32] = "A32: BKPT Instruction Execution",
146 1.4 ryo [ESR_EC_VECTOR_CATCH] = "A32: Vector Catch Exception"
147 1.4 ryo };
148 1.4 ryo
149 1.6 christos const char *
150 1.4 ryo eclass_trapname(uint32_t eclass)
151 1.3 nisimura {
152 1.4 ryo static char trapnamebuf[sizeof("Unknown trap 0x????????")];
153 1.4 ryo
154 1.4 ryo if (eclass >= __arraycount(trap_names) || trap_names[eclass] == NULL) {
155 1.4 ryo snprintf(trapnamebuf, sizeof(trapnamebuf),
156 1.6 christos "Unknown trap %#02x", eclass);
157 1.4 ryo return trapnamebuf;
158 1.4 ryo }
159 1.4 ryo return trap_names[eclass];
160 1.3 nisimura }
161 1.3 nisimura
162 1.1 matt void
163 1.4 ryo userret(struct lwp *l)
164 1.1 matt {
165 1.1 matt mi_userret(l);
166 1.1 matt }
167 1.2 nisimura
168 1.3 nisimura void
169 1.4 ryo trap_doast(struct trapframe *tf)
170 1.3 nisimura {
171 1.3 nisimura struct lwp * const l = curlwp;
172 1.4 ryo
173 1.4 ryo /*
174 1.4 ryo * allow to have a chance of context switch just prior to user
175 1.4 ryo * exception return.
176 1.4 ryo */
177 1.4 ryo #ifdef __HAVE_PREEMPTION
178 1.4 ryo kpreempt_disable();
179 1.4 ryo #endif
180 1.4 ryo struct cpu_info * const ci = curcpu();
181 1.4 ryo
182 1.4 ryo ci->ci_data.cpu_ntrap++;
183 1.4 ryo
184 1.4 ryo KDASSERT(ci->ci_cpl == IPL_NONE);
185 1.4 ryo #ifdef __HAVE_PREEMPTION
186 1.4 ryo kpreempt_enable();
187 1.4 ryo #endif
188 1.4 ryo
189 1.4 ryo if (l->l_pflag & LP_OWEUPC) {
190 1.4 ryo l->l_pflag &= ~LP_OWEUPC;
191 1.4 ryo ADDUPROF(l);
192 1.3 nisimura }
193 1.4 ryo
194 1.4 ryo userret(l);
195 1.4 ryo }
196 1.4 ryo
197 1.4 ryo void
198 1.4 ryo trap_el1h_sync(struct trapframe *tf)
199 1.4 ryo {
200 1.4 ryo const uint32_t esr = tf->tf_esr;
201 1.4 ryo const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
202 1.4 ryo
203 1.4 ryo /* re-enable traps and interrupts */
204 1.4 ryo if (!(tf->tf_spsr & SPSR_I))
205 1.4 ryo daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
206 1.4 ryo else
207 1.4 ryo daif_enable(DAIF_D|DAIF_A);
208 1.4 ryo
209 1.22 jmcneill #ifdef KDTRACE_HOOKS
210 1.22 jmcneill if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, eclass))
211 1.22 jmcneill return;
212 1.22 jmcneill #endif
213 1.22 jmcneill
214 1.4 ryo switch (eclass) {
215 1.4 ryo case ESR_EC_INSN_ABT_EL1:
216 1.4 ryo case ESR_EC_DATA_ABT_EL1:
217 1.6 christos data_abort_handler(tf, eclass);
218 1.4 ryo break;
219 1.4 ryo
220 1.22 jmcneill case ESR_EC_BKPT_INSN_A64:
221 1.22 jmcneill #ifdef KDTRACE_HOOKS
222 1.22 jmcneill if (__SHIFTOUT(esr, ESR_ISS) == 0x40d &&
223 1.22 jmcneill dtrace_invop_jump_addr != 0) {
224 1.22 jmcneill (*dtrace_invop_jump_addr)(tf);
225 1.22 jmcneill break;
226 1.22 jmcneill }
227 1.22 jmcneill /* FALLTHROUGH */
228 1.22 jmcneill #endif
229 1.4 ryo case ESR_EC_BRKPNT_EL1:
230 1.4 ryo case ESR_EC_SW_STEP_EL1:
231 1.4 ryo case ESR_EC_WTCHPNT_EL1:
232 1.4 ryo #ifdef DDB
233 1.4 ryo if (eclass == ESR_EC_BRKPNT_EL1)
234 1.4 ryo kdb_trap(DB_TRAP_BREAKPOINT, tf);
235 1.4 ryo else if (eclass == ESR_EC_BKPT_INSN_A64)
236 1.4 ryo kdb_trap(DB_TRAP_BKPT_INSN, tf);
237 1.4 ryo else if (eclass == ESR_EC_WTCHPNT_EL1)
238 1.4 ryo kdb_trap(DB_TRAP_WATCHPOINT, tf);
239 1.4 ryo else if (eclass == ESR_EC_SW_STEP_EL1)
240 1.4 ryo kdb_trap(DB_TRAP_SW_STEP, tf);
241 1.4 ryo else
242 1.4 ryo kdb_trap(DB_TRAP_UNKNOWN, tf);
243 1.4 ryo #else
244 1.4 ryo panic("No debugger in kernel");
245 1.4 ryo #endif
246 1.4 ryo break;
247 1.4 ryo
248 1.4 ryo case ESR_EC_FP_ACCESS:
249 1.35 riastrad if ((curlwp->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) ==
250 1.35 riastrad (LW_SYSTEM|LW_SYSTEM_FPU)) {
251 1.35 riastrad fpu_load(curlwp);
252 1.35 riastrad break;
253 1.35 riastrad }
254 1.35 riastrad /*FALLTHROUGH*/
255 1.4 ryo case ESR_EC_FP_TRAP_A64:
256 1.4 ryo case ESR_EC_PC_ALIGNMENT:
257 1.4 ryo case ESR_EC_SP_ALIGNMENT:
258 1.4 ryo case ESR_EC_ILL_STATE:
259 1.27 maxv case ESR_EC_BTE_A64:
260 1.4 ryo default:
261 1.13 ryo panic("Trap: fatal %s: pc=%016" PRIx64 " sp=%016" PRIx64
262 1.13 ryo " esr=%08x", eclass_trapname(eclass), tf->tf_pc, tf->tf_sp,
263 1.6 christos esr);
264 1.4 ryo break;
265 1.3 nisimura }
266 1.3 nisimura }
267 1.3 nisimura
268 1.28 ryo /*
269 1.28 ryo * There are some systems with different cache line sizes for each cpu.
270 1.28 ryo * Userland programs can be preempted between CPUs at any time, so in such
271 1.28 ryo * a system, the minimum cache line size must be visible to userland.
272 1.28 ryo */
273 1.28 ryo #define CTR_EL0_USR_MASK \
274 1.28 ryo (CTR_EL0_DIC | CTR_EL0_IDC | CTR_EL0_DMIN_LINE | CTR_EL0_IMIN_LINE)
275 1.28 ryo uint64_t ctr_el0_usr __read_mostly;
276 1.28 ryo
277 1.28 ryo static xcfunc_t
278 1.28 ryo configure_cpu_traps0(void *arg1, void *arg2)
279 1.28 ryo {
280 1.28 ryo struct cpu_info * const ci = curcpu();
281 1.28 ryo uint64_t sctlr;
282 1.28 ryo uint64_t ctr_el0_raw = reg_ctr_el0_read();
283 1.28 ryo
284 1.28 ryo #ifdef DEBUG_FORCE_TRAP_CTR_EL0
285 1.28 ryo goto need_ctr_trap;
286 1.28 ryo #endif
287 1.28 ryo
288 1.28 ryo if ((__SHIFTOUT(ctr_el0_raw, CTR_EL0_DMIN_LINE) >
289 1.28 ryo __SHIFTOUT(ctr_el0_usr, CTR_EL0_DMIN_LINE)) ||
290 1.28 ryo (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IMIN_LINE) >
291 1.28 ryo __SHIFTOUT(ctr_el0_usr, CTR_EL0_IMIN_LINE)))
292 1.28 ryo goto need_ctr_trap;
293 1.28 ryo
294 1.28 ryo if ((__SHIFTOUT(ctr_el0_raw, CTR_EL0_DIC) == 1 &&
295 1.28 ryo __SHIFTOUT(ctr_el0_usr, CTR_EL0_DIC) == 0) ||
296 1.28 ryo (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IDC) == 1 &&
297 1.28 ryo __SHIFTOUT(ctr_el0_usr, CTR_EL0_IDC) == 0))
298 1.28 ryo goto need_ctr_trap;
299 1.28 ryo
300 1.28 ryo #if 0 /* XXX: To do or not to do */
301 1.28 ryo /*
302 1.28 ryo * IDC==0, but (LoC==0 || LoUIS==LoUU==0)?
303 1.28 ryo * Would it be better to show IDC=1 to userland?
304 1.28 ryo */
305 1.28 ryo if (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IDC) == 0 &&
306 1.28 ryo __SHIFTOUT(ctr_el0_usr, CTR_EL0_IDC) == 1)
307 1.28 ryo goto need_ctr_trap;
308 1.28 ryo #endif
309 1.28 ryo
310 1.28 ryo return 0;
311 1.28 ryo
312 1.28 ryo need_ctr_trap:
313 1.28 ryo evcnt_attach_dynamic(&ci->ci_uct_trap, EVCNT_TYPE_MISC, NULL,
314 1.28 ryo ci->ci_cpuname, "ctr_el0 trap");
315 1.28 ryo
316 1.28 ryo /* trap CTR_EL0 access from EL0 on this cpu */
317 1.28 ryo sctlr = reg_sctlr_el1_read();
318 1.28 ryo sctlr &= ~SCTLR_UCT;
319 1.28 ryo reg_sctlr_el1_write(sctlr);
320 1.28 ryo
321 1.28 ryo return 0;
322 1.28 ryo }
323 1.28 ryo
324 1.28 ryo void
325 1.28 ryo configure_cpu_traps(void)
326 1.28 ryo {
327 1.28 ryo CPU_INFO_ITERATOR cii;
328 1.28 ryo struct cpu_info *ci;
329 1.28 ryo uint64_t where;
330 1.28 ryo
331 1.28 ryo /* remember minimum cache line size out of all CPUs */
332 1.28 ryo for (CPU_INFO_FOREACH(cii, ci)) {
333 1.28 ryo uint64_t ctr_el0_cpu = ci->ci_id.ac_ctr;
334 1.28 ryo uint64_t clidr = ci->ci_id.ac_clidr;
335 1.28 ryo
336 1.28 ryo if (__SHIFTOUT(clidr, CLIDR_LOC) == 0 ||
337 1.28 ryo (__SHIFTOUT(clidr, CLIDR_LOUIS) == 0 &&
338 1.28 ryo __SHIFTOUT(clidr, CLIDR_LOUU) == 0)) {
339 1.28 ryo /* this means the same as IDC=1 */
340 1.28 ryo ctr_el0_cpu |= CTR_EL0_IDC;
341 1.28 ryo }
342 1.28 ryo
343 1.28 ryo /*
344 1.28 ryo * if DIC==1, there is no need to icache sync. however,
345 1.28 ryo * to calculate the minimum cacheline, in this case
346 1.28 ryo * ICacheLine is treated as the maximum.
347 1.28 ryo */
348 1.28 ryo if (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_DIC) == 1)
349 1.28 ryo ctr_el0_cpu |= CTR_EL0_IMIN_LINE;
350 1.28 ryo
351 1.29 ryo /* Neoverse N1 erratum 1542419 */
352 1.29 ryo if (CPU_ID_NEOVERSEN1_P(ci->ci_id.ac_midr) &&
353 1.29 ryo __SHIFTOUT(ctr_el0_cpu, CTR_EL0_DIC) == 1)
354 1.29 ryo ctr_el0_cpu &= ~CTR_EL0_DIC;
355 1.29 ryo
356 1.28 ryo if (cii == 0) {
357 1.28 ryo ctr_el0_usr = ctr_el0_cpu;
358 1.28 ryo continue;
359 1.28 ryo }
360 1.28 ryo
361 1.28 ryo /* keep minimum cache line size, and worst DIC/IDC */
362 1.28 ryo ctr_el0_usr &= (ctr_el0_cpu & CTR_EL0_DIC) | ~CTR_EL0_DIC;
363 1.28 ryo ctr_el0_usr &= (ctr_el0_cpu & CTR_EL0_IDC) | ~CTR_EL0_IDC;
364 1.28 ryo if (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_DMIN_LINE) <
365 1.28 ryo __SHIFTOUT(ctr_el0_usr, CTR_EL0_DMIN_LINE)) {
366 1.28 ryo ctr_el0_usr &= ~CTR_EL0_DMIN_LINE;
367 1.28 ryo ctr_el0_usr |= ctr_el0_cpu & CTR_EL0_DMIN_LINE;
368 1.28 ryo }
369 1.28 ryo if ((ctr_el0_cpu & CTR_EL0_DIC) == 0 &&
370 1.28 ryo (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_IMIN_LINE) <
371 1.28 ryo __SHIFTOUT(ctr_el0_usr, CTR_EL0_IMIN_LINE))) {
372 1.28 ryo ctr_el0_usr &= ~CTR_EL0_IMIN_LINE;
373 1.28 ryo ctr_el0_usr |= ctr_el0_cpu & CTR_EL0_IMIN_LINE;
374 1.28 ryo }
375 1.28 ryo }
376 1.28 ryo
377 1.28 ryo where = xc_broadcast(0,
378 1.28 ryo (xcfunc_t)configure_cpu_traps0, NULL, NULL);
379 1.28 ryo xc_wait(where);
380 1.28 ryo }
381 1.28 ryo
382 1.28 ryo static enum emul_arm_result
383 1.28 ryo emul_aarch64_insn(struct trapframe *tf)
384 1.28 ryo {
385 1.28 ryo uint32_t insn;
386 1.28 ryo
387 1.32 ryo if (ufetch_32((uint32_t *)tf->tf_pc, &insn)) {
388 1.32 ryo tf->tf_far = reg_far_el1_read();
389 1.28 ryo return EMUL_ARM_FAULT;
390 1.32 ryo }
391 1.28 ryo
392 1.37 ryo LE32TOH(insn);
393 1.28 ryo if ((insn & 0xffffffe0) == 0xd53b0020) {
394 1.28 ryo /* mrs x?,ctr_el0 */
395 1.28 ryo unsigned int Xt = insn & 31;
396 1.28 ryo if (Xt != 31) { /* !xzr */
397 1.28 ryo uint64_t ctr_el0 = reg_ctr_el0_read();
398 1.28 ryo ctr_el0 &= ~CTR_EL0_USR_MASK;
399 1.28 ryo ctr_el0 |= (ctr_el0_usr & CTR_EL0_USR_MASK);
400 1.28 ryo tf->tf_reg[Xt] = ctr_el0;
401 1.28 ryo }
402 1.28 ryo curcpu()->ci_uct_trap.ev_count++;
403 1.28 ryo
404 1.28 ryo } else {
405 1.28 ryo return EMUL_ARM_UNKNOWN;
406 1.28 ryo }
407 1.28 ryo
408 1.28 ryo tf->tf_pc += 4;
409 1.28 ryo return EMUL_ARM_SUCCESS;
410 1.28 ryo }
411 1.28 ryo
412 1.3 nisimura void
413 1.4 ryo trap_el0_sync(struct trapframe *tf)
414 1.3 nisimura {
415 1.4 ryo struct lwp * const l = curlwp;
416 1.4 ryo const uint32_t esr = tf->tf_esr;
417 1.4 ryo const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
418 1.4 ryo
419 1.45 ryo #ifdef DDB
420 1.45 ryo /* disable trace, and enable hardware breakpoint/watchpoint */
421 1.45 ryo reg_mdscr_el1_write(
422 1.45 ryo (reg_mdscr_el1_read() & ~MDSCR_SS) | MDSCR_KDE);
423 1.45 ryo #else
424 1.14 ryo /* disable trace */
425 1.14 ryo reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
426 1.45 ryo #endif
427 1.4 ryo /* enable traps and interrupts */
428 1.4 ryo daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
429 1.4 ryo
430 1.4 ryo switch (eclass) {
431 1.4 ryo case ESR_EC_INSN_ABT_EL0:
432 1.4 ryo case ESR_EC_DATA_ABT_EL0:
433 1.6 christos data_abort_handler(tf, eclass);
434 1.4 ryo userret(l);
435 1.4 ryo break;
436 1.4 ryo
437 1.4 ryo case ESR_EC_SVC_A64:
438 1.4 ryo (*l->l_proc->p_md.md_syscall)(tf);
439 1.4 ryo break;
440 1.4 ryo case ESR_EC_FP_ACCESS:
441 1.4 ryo fpu_load(l);
442 1.4 ryo userret(l);
443 1.4 ryo break;
444 1.4 ryo case ESR_EC_FP_TRAP_A64:
445 1.4 ryo do_trapsignal(l, SIGFPE, FPE_FLTUND, NULL, esr); /* XXX */
446 1.4 ryo userret(l);
447 1.4 ryo break;
448 1.4 ryo
449 1.4 ryo case ESR_EC_PC_ALIGNMENT:
450 1.5 christos do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_pc, esr);
451 1.4 ryo userret(l);
452 1.4 ryo break;
453 1.4 ryo case ESR_EC_SP_ALIGNMENT:
454 1.5 christos do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_sp, esr);
455 1.4 ryo userret(l);
456 1.4 ryo break;
457 1.4 ryo
458 1.4 ryo case ESR_EC_BKPT_INSN_A64:
459 1.4 ryo case ESR_EC_BRKPNT_EL0:
460 1.4 ryo case ESR_EC_WTCHPNT_EL0:
461 1.5 christos do_trapsignal(l, SIGTRAP, TRAP_BRKPT, (void *)tf->tf_pc, esr);
462 1.4 ryo userret(l);
463 1.4 ryo break;
464 1.14 ryo case ESR_EC_SW_STEP_EL0:
465 1.14 ryo /* disable trace, and send trace trap */
466 1.14 ryo tf->tf_spsr &= ~SPSR_SS;
467 1.14 ryo do_trapsignal(l, SIGTRAP, TRAP_TRACE, (void *)tf->tf_pc, esr);
468 1.14 ryo userret(l);
469 1.14 ryo break;
470 1.4 ryo
471 1.28 ryo case ESR_EC_SYS_REG:
472 1.28 ryo switch (emul_aarch64_insn(tf)) {
473 1.28 ryo case EMUL_ARM_SUCCESS:
474 1.28 ryo break;
475 1.28 ryo case EMUL_ARM_UNKNOWN:
476 1.28 ryo goto unknown;
477 1.28 ryo case EMUL_ARM_FAULT:
478 1.28 ryo do_trapsignal(l, SIGSEGV, SEGV_MAPERR,
479 1.32 ryo (void *)tf->tf_far, esr);
480 1.28 ryo break;
481 1.28 ryo }
482 1.28 ryo userret(l);
483 1.28 ryo break;
484 1.28 ryo
485 1.4 ryo default:
486 1.4 ryo case ESR_EC_UNKNOWN:
487 1.28 ryo unknown:
488 1.8 ryo #ifdef DDB
489 1.8 ryo if (sigill_debug) {
490 1.8 ryo /* show illegal instruction */
491 1.11 ryo printf("TRAP: pid %d (%s), uid %d: %s:"
492 1.11 ryo " esr=0x%lx: pc=0x%lx: %s\n",
493 1.8 ryo curlwp->l_proc->p_pid, curlwp->l_proc->p_comm,
494 1.8 ryo l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1,
495 1.11 ryo eclass_trapname(eclass), tf->tf_esr, tf->tf_pc,
496 1.31 ryo strdisasm(tf->tf_pc, tf->tf_spsr));
497 1.8 ryo }
498 1.8 ryo #endif
499 1.4 ryo /* illegal or not implemented instruction */
500 1.5 christos do_trapsignal(l, SIGILL, ILL_ILLTRP, (void *)tf->tf_pc, esr);
501 1.4 ryo userret(l);
502 1.4 ryo break;
503 1.4 ryo }
504 1.3 nisimura }
505 1.3 nisimura
506 1.4 ryo void
507 1.47 jmcneill cpu_irq(struct trapframe *tf)
508 1.4 ryo {
509 1.4 ryo struct cpu_info * const ci = curcpu();
510 1.2 nisimura
511 1.12 ryo #ifdef STACKCHECKS
512 1.12 ryo struct lwp *l = curlwp;
513 1.12 ryo void *sp = (void *)reg_sp_read();
514 1.12 ryo if (l->l_addr >= sp) {
515 1.12 ryo panic("lwp/interrupt stack overflow detected."
516 1.12 ryo " lwp=%p, sp=%p, l_addr=%p", l, sp, l->l_addr);
517 1.12 ryo }
518 1.12 ryo #endif
519 1.12 ryo
520 1.45 ryo #ifdef DDB
521 1.45 ryo /* disable trace, and enable hardware breakpoint/watchpoint */
522 1.45 ryo reg_mdscr_el1_write(
523 1.45 ryo (reg_mdscr_el1_read() & ~MDSCR_SS) | MDSCR_KDE);
524 1.45 ryo #else
525 1.14 ryo /* disable trace */
526 1.14 ryo reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
527 1.45 ryo #endif
528 1.48 riastrad
529 1.48 riastrad /*
530 1.48 riastrad * Prevent preemption once we enable traps, until we have
531 1.48 riastrad * finished running hard and soft interrupt handlers. This
532 1.48 riastrad * guarantees ci = curcpu() remains stable and we don't
533 1.48 riastrad * accidentally try to run its pending soft interrupts on
534 1.48 riastrad * another CPU.
535 1.48 riastrad */
536 1.48 riastrad kpreempt_disable();
537 1.48 riastrad
538 1.4 ryo /* enable traps */
539 1.4 ryo daif_enable(DAIF_D|DAIF_A);
540 1.2 nisimura
541 1.48 riastrad /* run hard interrupt handlers */
542 1.4 ryo ci->ci_intr_depth++;
543 1.4 ryo ARM_IRQ_HANDLER(tf);
544 1.4 ryo ci->ci_intr_depth--;
545 1.2 nisimura
546 1.48 riastrad /* run soft interrupt handlers */
547 1.43 jmcneill cpu_dosoftints();
548 1.48 riastrad
549 1.48 riastrad /* all done, preempt as you please */
550 1.48 riastrad kpreempt_enable();
551 1.4 ryo }
552 1.2 nisimura
553 1.47 jmcneill void
554 1.47 jmcneill cpu_fiq(struct trapframe *tf)
555 1.47 jmcneill {
556 1.47 jmcneill struct cpu_info * const ci = curcpu();
557 1.47 jmcneill
558 1.47 jmcneill #ifdef STACKCHECKS
559 1.47 jmcneill struct lwp *l = curlwp;
560 1.47 jmcneill void *sp = (void *)reg_sp_read();
561 1.47 jmcneill if (l->l_addr >= sp) {
562 1.47 jmcneill panic("lwp/interrupt stack overflow detected."
563 1.47 jmcneill " lwp=%p, sp=%p, l_addr=%p", l, sp, l->l_addr);
564 1.47 jmcneill }
565 1.47 jmcneill #endif
566 1.47 jmcneill
567 1.47 jmcneill /* disable trace */
568 1.47 jmcneill reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
569 1.47 jmcneill
570 1.48 riastrad /*
571 1.48 riastrad * Prevent preemption once we enable traps, until we have
572 1.48 riastrad * finished running hard and soft interrupt handlers. This
573 1.48 riastrad * guarantees ci = curcpu() remains stable and we don't
574 1.48 riastrad * accidentally try to run its pending soft interrupts on
575 1.48 riastrad * another CPU.
576 1.48 riastrad */
577 1.48 riastrad kpreempt_disable();
578 1.48 riastrad
579 1.47 jmcneill /* enable traps */
580 1.47 jmcneill daif_enable(DAIF_D|DAIF_A);
581 1.47 jmcneill
582 1.48 riastrad /* run hard interrupt handlers */
583 1.47 jmcneill ci->ci_intr_depth++;
584 1.47 jmcneill ARM_FIQ_HANDLER(tf);
585 1.47 jmcneill ci->ci_intr_depth--;
586 1.47 jmcneill
587 1.48 riastrad /* run soft interrupt handlers */
588 1.47 jmcneill cpu_dosoftints();
589 1.48 riastrad
590 1.48 riastrad /* all done, preempt as you please */
591 1.48 riastrad kpreempt_enable();
592 1.47 jmcneill }
593 1.47 jmcneill
594 1.21 rin #ifdef COMPAT_NETBSD32
595 1.21 rin
596 1.21 rin /*
597 1.21 rin * 32-bit length Thumb instruction. See ARMv7 DDI0406A A6.3.
598 1.21 rin */
599 1.21 rin #define THUMB_32BIT(hi) (((hi) & 0xe000) == 0xe000 && ((hi) & 0x1800))
600 1.21 rin
601 1.31 ryo int
602 1.31 ryo fetch_arm_insn(uint64_t pc, uint64_t spsr, uint32_t *insn)
603 1.21 rin {
604 1.21 rin
605 1.38 rin /*
606 1.38 rin * Instructions are stored in little endian for BE8,
607 1.38 rin * only a valid binary format for ILP32EB. Therefore,
608 1.38 rin * we need byte-swapping before decoding on aarch64eb.
609 1.38 rin */
610 1.38 rin
611 1.21 rin /* THUMB? */
612 1.31 ryo if (spsr & SPSR_A32_T) {
613 1.31 ryo uint16_t *p = (uint16_t *)(pc & ~1UL); /* XXX */
614 1.21 rin uint16_t hi, lo;
615 1.21 rin
616 1.31 ryo if (ufetch_16(p, &hi))
617 1.26 rin return -1;
618 1.38 rin LE16TOH(hi);
619 1.26 rin
620 1.21 rin if (!THUMB_32BIT(hi)) {
621 1.21 rin /* 16-bit Thumb instruction */
622 1.21 rin *insn = hi;
623 1.21 rin return 2;
624 1.21 rin }
625 1.21 rin
626 1.26 rin /* 32-bit Thumb instruction */
627 1.31 ryo if (ufetch_16(p + 1, &lo))
628 1.26 rin return -1;
629 1.38 rin LE16TOH(lo);
630 1.21 rin
631 1.21 rin *insn = ((uint32_t)hi << 16) | lo;
632 1.21 rin return 4;
633 1.21 rin }
634 1.21 rin
635 1.31 ryo if (ufetch_32((uint32_t *)pc, insn))
636 1.26 rin return -1;
637 1.38 rin LE32TOH(*insn);
638 1.26 rin
639 1.21 rin return 4;
640 1.21 rin }
641 1.21 rin
642 1.32 ryo static bool
643 1.32 ryo arm_cond_match(uint32_t insn, uint64_t spsr)
644 1.32 ryo {
645 1.32 ryo bool invert = (insn >> 28) & 1;
646 1.32 ryo bool match;
647 1.32 ryo
648 1.32 ryo switch (insn >> 29) {
649 1.32 ryo case 0: /* EQ or NE */
650 1.32 ryo match = spsr & SPSR_Z;
651 1.32 ryo break;
652 1.32 ryo case 1: /* CS/HI or CC/LO */
653 1.32 ryo match = spsr & SPSR_C;
654 1.32 ryo break;
655 1.32 ryo case 2: /* MI or PL */
656 1.32 ryo match = spsr & SPSR_N;
657 1.32 ryo break;
658 1.32 ryo case 3: /* VS or VC */
659 1.32 ryo match = spsr & SPSR_V;
660 1.32 ryo break;
661 1.32 ryo case 4: /* HI or LS */
662 1.32 ryo match = ((spsr & (SPSR_C | SPSR_Z)) == SPSR_C);
663 1.32 ryo break;
664 1.32 ryo case 5: /* GE or LT */
665 1.32 ryo match = (!(spsr & SPSR_N) == !(spsr & SPSR_V));
666 1.32 ryo break;
667 1.32 ryo case 6: /* GT or LE */
668 1.32 ryo match = !(spsr & SPSR_Z) &&
669 1.32 ryo (!(spsr & SPSR_N) == !(spsr & SPSR_V));
670 1.32 ryo break;
671 1.32 ryo case 7: /* AL */
672 1.32 ryo match = true;
673 1.32 ryo break;
674 1.32 ryo }
675 1.32 ryo return (!match != !invert);
676 1.32 ryo }
677 1.32 ryo
678 1.33 ryo uint8_t atomic_swap_8(volatile uint8_t *, uint8_t);
679 1.33 ryo
680 1.33 ryo static int
681 1.33 ryo emul_arm_swp(uint32_t insn, struct trapframe *tf)
682 1.33 ryo {
683 1.33 ryo struct faultbuf fb;
684 1.33 ryo vaddr_t vaddr;
685 1.33 ryo uint32_t val;
686 1.33 ryo int Rn, Rd, Rm, error;
687 1.33 ryo
688 1.33 ryo Rn = __SHIFTOUT(insn, 0x000f0000);
689 1.33 ryo Rd = __SHIFTOUT(insn, 0x0000f000);
690 1.33 ryo Rm = __SHIFTOUT(insn, 0x0000000f);
691 1.33 ryo
692 1.33 ryo vaddr = tf->tf_reg[Rn] & 0xffffffff;
693 1.33 ryo val = tf->tf_reg[Rm];
694 1.33 ryo
695 1.33 ryo /* fault if insn is swp, and unaligned access */
696 1.33 ryo if ((insn & 0x00400000) == 0 && (vaddr & 3) != 0) {
697 1.33 ryo tf->tf_far = vaddr;
698 1.33 ryo return EFAULT;
699 1.33 ryo }
700 1.33 ryo
701 1.33 ryo /* vaddr will always point to userspace, since it has only 32bit */
702 1.33 ryo if ((error = cpu_set_onfault(&fb)) == 0) {
703 1.36 maxv if (aarch64_pan_enabled)
704 1.36 maxv reg_pan_write(0); /* disable PAN */
705 1.33 ryo if (insn & 0x00400000) {
706 1.33 ryo /* swpb */
707 1.34 ryo val = atomic_swap_8((uint8_t *)vaddr, val);
708 1.33 ryo } else {
709 1.33 ryo /* swp */
710 1.34 ryo val = atomic_swap_32((uint32_t *)vaddr, val);
711 1.33 ryo }
712 1.33 ryo cpu_unset_onfault();
713 1.33 ryo tf->tf_reg[Rd] = val;
714 1.33 ryo } else {
715 1.33 ryo tf->tf_far = reg_far_el1_read();
716 1.33 ryo }
717 1.36 maxv if (aarch64_pan_enabled)
718 1.36 maxv reg_pan_write(1); /* enable PAN */
719 1.33 ryo return error;
720 1.33 ryo }
721 1.33 ryo
722 1.32 ryo static enum emul_arm_result
723 1.32 ryo emul_thumb_insn(struct trapframe *tf, uint32_t insn, int insn_size)
724 1.32 ryo {
725 1.32 ryo /* T32-16bit or 32bit instructions */
726 1.32 ryo switch (insn_size) {
727 1.32 ryo case 2:
728 1.32 ryo /* Breakpoint used by GDB */
729 1.32 ryo if (insn == 0xdefe) {
730 1.32 ryo do_trapsignal(curlwp, SIGTRAP, TRAP_BRKPT,
731 1.32 ryo (void *)tf->tf_pc, 0);
732 1.32 ryo return EMUL_ARM_SUCCESS;
733 1.32 ryo }
734 1.32 ryo /* XXX: some T32 IT instruction deprecated should be emulated */
735 1.32 ryo break;
736 1.32 ryo case 4:
737 1.32 ryo break;
738 1.32 ryo default:
739 1.32 ryo return EMUL_ARM_FAULT;
740 1.32 ryo }
741 1.32 ryo return EMUL_ARM_UNKNOWN;
742 1.32 ryo }
743 1.32 ryo
744 1.26 rin static enum emul_arm_result
745 1.21 rin emul_arm_insn(struct trapframe *tf)
746 1.21 rin {
747 1.21 rin uint32_t insn;
748 1.21 rin int insn_size;
749 1.21 rin
750 1.31 ryo insn_size = fetch_arm_insn(tf->tf_pc, tf->tf_spsr, &insn);
751 1.32 ryo tf->tf_far = reg_far_el1_read();
752 1.21 rin
753 1.32 ryo if (tf->tf_spsr & SPSR_A32_T)
754 1.32 ryo return emul_thumb_insn(tf, insn, insn_size);
755 1.32 ryo if (insn_size != 4)
756 1.32 ryo return EMUL_ARM_FAULT;
757 1.21 rin
758 1.32 ryo /* Breakpoint used by GDB */
759 1.32 ryo if (insn == 0xe6000011 || insn == 0xe7ffdefe) {
760 1.32 ryo do_trapsignal(curlwp, SIGTRAP, TRAP_BRKPT,
761 1.32 ryo (void *)tf->tf_pc, 0);
762 1.32 ryo return EMUL_ARM_SUCCESS;
763 1.32 ryo }
764 1.30 rin
765 1.32 ryo /* Unconditional instruction extension space? */
766 1.32 ryo if ((insn & 0xf0000000) == 0xf0000000)
767 1.32 ryo goto unknown_insn;
768 1.21 rin
769 1.33 ryo /* swp,swpb */
770 1.33 ryo if ((insn & 0x0fb00ff0) == 0x01000090) {
771 1.33 ryo if (arm_cond_match(insn, tf->tf_spsr)) {
772 1.33 ryo if (emul_arm_swp(insn, tf) != 0)
773 1.33 ryo return EMUL_ARM_FAULT;
774 1.33 ryo }
775 1.33 ryo goto emulated;
776 1.33 ryo }
777 1.33 ryo
778 1.32 ryo /*
779 1.32 ryo * Emulate ARMv6 instructions with cache operations
780 1.32 ryo * register (c7), that can be used in user mode.
781 1.32 ryo */
782 1.32 ryo switch (insn & 0x0fff0fff) {
783 1.32 ryo case 0x0e070f95:
784 1.32 ryo if (arm_cond_match(insn, tf->tf_spsr)) {
785 1.21 rin /*
786 1.21 rin * mcr p15, 0, <Rd>, c7, c5, 4
787 1.21 rin * (flush prefetch buffer)
788 1.21 rin */
789 1.40 skrll isb();
790 1.32 ryo }
791 1.32 ryo goto emulated;
792 1.32 ryo case 0x0e070f9a:
793 1.32 ryo if (arm_cond_match(insn, tf->tf_spsr)) {
794 1.21 rin /*
795 1.21 rin * mcr p15, 0, <Rd>, c7, c10, 4
796 1.21 rin * (data synchronization barrier)
797 1.21 rin */
798 1.40 skrll dsb(sy);
799 1.32 ryo }
800 1.32 ryo goto emulated;
801 1.32 ryo case 0x0e070fba:
802 1.32 ryo if (arm_cond_match(insn, tf->tf_spsr)) {
803 1.21 rin /*
804 1.21 rin * mcr p15, 0, <Rd>, c7, c10, 5
805 1.21 rin * (data memory barrier)
806 1.21 rin */
807 1.40 skrll dmb(sy);
808 1.21 rin }
809 1.32 ryo goto emulated;
810 1.32 ryo default:
811 1.21 rin break;
812 1.21 rin }
813 1.21 rin
814 1.32 ryo unknown_insn:
815 1.21 rin /* unknown, or unsupported instruction */
816 1.26 rin return EMUL_ARM_UNKNOWN;
817 1.21 rin
818 1.21 rin emulated:
819 1.21 rin tf->tf_pc += insn_size;
820 1.26 rin return EMUL_ARM_SUCCESS;
821 1.21 rin }
822 1.21 rin #endif /* COMPAT_NETBSD32 */
823 1.21 rin
824 1.2 nisimura void
825 1.4 ryo trap_el0_32sync(struct trapframe *tf)
826 1.2 nisimura {
827 1.4 ryo struct lwp * const l = curlwp;
828 1.4 ryo const uint32_t esr = tf->tf_esr;
829 1.4 ryo const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
830 1.4 ryo
831 1.45 ryo #ifdef DDB
832 1.45 ryo /* disable trace, and enable hardware breakpoint/watchpoint */
833 1.45 ryo reg_mdscr_el1_write(
834 1.45 ryo (reg_mdscr_el1_read() & ~MDSCR_SS) | MDSCR_KDE);
835 1.45 ryo #else
836 1.14 ryo /* disable trace */
837 1.14 ryo reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
838 1.45 ryo #endif
839 1.4 ryo /* enable traps and interrupts */
840 1.4 ryo daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
841 1.4 ryo
842 1.4 ryo switch (eclass) {
843 1.11 ryo #ifdef COMPAT_NETBSD32
844 1.11 ryo case ESR_EC_INSN_ABT_EL0:
845 1.11 ryo case ESR_EC_DATA_ABT_EL0:
846 1.11 ryo data_abort_handler(tf, eclass);
847 1.11 ryo userret(l);
848 1.11 ryo break;
849 1.11 ryo
850 1.11 ryo case ESR_EC_SVC_A32:
851 1.11 ryo (*l->l_proc->p_md.md_syscall)(tf);
852 1.11 ryo break;
853 1.19 skrll
854 1.4 ryo case ESR_EC_FP_ACCESS:
855 1.4 ryo fpu_load(l);
856 1.4 ryo userret(l);
857 1.4 ryo break;
858 1.19 skrll
859 1.11 ryo case ESR_EC_FP_TRAP_A32:
860 1.11 ryo do_trapsignal(l, SIGFPE, FPE_FLTUND, NULL, esr); /* XXX */
861 1.4 ryo userret(l);
862 1.18 jmcneill break;
863 1.4 ryo
864 1.4 ryo case ESR_EC_PC_ALIGNMENT:
865 1.5 christos do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_pc, esr);
866 1.4 ryo userret(l);
867 1.4 ryo break;
868 1.19 skrll
869 1.4 ryo case ESR_EC_SP_ALIGNMENT:
870 1.11 ryo do_trapsignal(l, SIGBUS, BUS_ADRALN,
871 1.11 ryo (void *)tf->tf_reg[13], esr); /* sp is r13 on AArch32 */
872 1.4 ryo userret(l);
873 1.4 ryo break;
874 1.4 ryo
875 1.11 ryo case ESR_EC_BKPT_INSN_A32:
876 1.11 ryo do_trapsignal(l, SIGTRAP, TRAP_BRKPT, (void *)tf->tf_pc, esr);
877 1.11 ryo userret(l);
878 1.4 ryo break;
879 1.11 ryo
880 1.21 rin case ESR_EC_UNKNOWN:
881 1.26 rin switch (emul_arm_insn(tf)) {
882 1.26 rin case EMUL_ARM_SUCCESS:
883 1.26 rin break;
884 1.26 rin case EMUL_ARM_UNKNOWN:
885 1.21 rin goto unknown;
886 1.26 rin case EMUL_ARM_FAULT:
887 1.26 rin do_trapsignal(l, SIGSEGV, SEGV_MAPERR,
888 1.32 ryo (void *)tf->tf_far, esr);
889 1.26 rin break;
890 1.26 rin }
891 1.21 rin userret(l);
892 1.21 rin break;
893 1.21 rin
894 1.4 ryo case ESR_EC_CP15_RT:
895 1.4 ryo case ESR_EC_CP15_RRT:
896 1.4 ryo case ESR_EC_CP14_RT:
897 1.4 ryo case ESR_EC_CP14_DT:
898 1.4 ryo case ESR_EC_CP14_RRT:
899 1.21 rin unknown:
900 1.4 ryo #endif /* COMPAT_NETBSD32 */
901 1.4 ryo default:
902 1.11 ryo #ifdef DDB
903 1.11 ryo if (sigill_debug) {
904 1.11 ryo /* show illegal instruction */
905 1.11 ryo printf("TRAP: pid %d (%s), uid %d: %s:"
906 1.11 ryo " esr=0x%lx: pc=0x%lx: %s\n",
907 1.11 ryo curlwp->l_proc->p_pid, curlwp->l_proc->p_comm,
908 1.11 ryo l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1,
909 1.11 ryo eclass_trapname(eclass), tf->tf_esr, tf->tf_pc,
910 1.31 ryo strdisasm(tf->tf_pc, tf->tf_spsr));
911 1.11 ryo }
912 1.11 ryo #endif
913 1.11 ryo /* illegal or not implemented instruction */
914 1.5 christos do_trapsignal(l, SIGILL, ILL_ILLTRP, (void *)tf->tf_pc, esr);
915 1.4 ryo userret(l);
916 1.4 ryo break;
917 1.4 ryo }
918 1.4 ryo }
919 1.4 ryo
920 1.46 ryo void
921 1.46 ryo trap_el1h_error(struct trapframe *tf)
922 1.46 ryo {
923 1.46 ryo /*
924 1.46 ryo * Normally, we should panic unconditionally,
925 1.46 ryo * but SError interrupt may occur when accessing to unmapped(?) I/O
926 1.46 ryo * spaces. bus_space_{peek,poke}_{1,2,4,8}() should trap these case.
927 1.46 ryo */
928 1.46 ryo struct faultbuf *fb;
929 1.46 ryo
930 1.46 ryo if (curcpu()->ci_intr_depth == 0) {
931 1.46 ryo fb = cpu_disable_onfault();
932 1.46 ryo if (fb != NULL) {
933 1.46 ryo cpu_jump_onfault(tf, fb, EFAULT);
934 1.46 ryo return;
935 1.46 ryo }
936 1.46 ryo }
937 1.46 ryo panic("%s", __func__);
938 1.46 ryo }
939 1.46 ryo
940 1.4 ryo #define bad_trap_panic(trapfunc) \
941 1.4 ryo void \
942 1.4 ryo trapfunc(struct trapframe *tf) \
943 1.4 ryo { \
944 1.4 ryo panic("%s", __func__); \
945 1.4 ryo }
946 1.4 ryo bad_trap_panic(trap_el1t_sync)
947 1.4 ryo bad_trap_panic(trap_el1t_irq)
948 1.4 ryo bad_trap_panic(trap_el1t_fiq)
949 1.4 ryo bad_trap_panic(trap_el1t_error)
950 1.4 ryo bad_trap_panic(trap_el1h_fiq)
951 1.4 ryo bad_trap_panic(trap_el0_fiq)
952 1.4 ryo bad_trap_panic(trap_el0_error)
953 1.4 ryo bad_trap_panic(trap_el0_32fiq)
954 1.4 ryo bad_trap_panic(trap_el0_32error)
955 1.2 nisimura
956 1.4 ryo void
957 1.4 ryo cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb, int val)
958 1.4 ryo {
959 1.2 nisimura tf->tf_reg[19] = fb->fb_reg[FB_X19];
960 1.2 nisimura tf->tf_reg[20] = fb->fb_reg[FB_X20];
961 1.2 nisimura tf->tf_reg[21] = fb->fb_reg[FB_X21];
962 1.2 nisimura tf->tf_reg[22] = fb->fb_reg[FB_X22];
963 1.2 nisimura tf->tf_reg[23] = fb->fb_reg[FB_X23];
964 1.2 nisimura tf->tf_reg[24] = fb->fb_reg[FB_X24];
965 1.2 nisimura tf->tf_reg[25] = fb->fb_reg[FB_X25];
966 1.2 nisimura tf->tf_reg[26] = fb->fb_reg[FB_X26];
967 1.2 nisimura tf->tf_reg[27] = fb->fb_reg[FB_X27];
968 1.2 nisimura tf->tf_reg[28] = fb->fb_reg[FB_X28];
969 1.2 nisimura tf->tf_reg[29] = fb->fb_reg[FB_X29];
970 1.2 nisimura tf->tf_sp = fb->fb_reg[FB_SP];
971 1.4 ryo tf->tf_pc = fb->fb_reg[FB_LR];
972 1.4 ryo tf->tf_reg[0] = val;
973 1.2 nisimura }
974 1.2 nisimura
975 1.6 christos #ifdef TRAP_SIGDEBUG
976 1.6 christos static void
977 1.6 christos frame_dump(const struct trapframe *tf)
978 1.6 christos {
979 1.6 christos const struct reg *r = &tf->tf_regs;
980 1.6 christos
981 1.6 christos printf("trapframe %p\n", tf);
982 1.6 christos for (size_t i = 0; i < __arraycount(r->r_reg); i++) {
983 1.7 christos printf(" r%.2zu %#018" PRIx64 "%c", i, r->r_reg[i],
984 1.6 christos " \n"[i && (i & 1) == 0]);
985 1.6 christos }
986 1.6 christos
987 1.6 christos printf("\n");
988 1.6 christos printf(" sp %#018" PRIx64 " pc %#018" PRIx64 "\n",
989 1.6 christos r->r_sp, r->r_pc);
990 1.6 christos printf(" spsr %#018" PRIx64 " tpidr %#018" PRIx64 "\n",
991 1.6 christos r->r_spsr, r->r_tpidr);
992 1.6 christos printf(" esr %#018" PRIx64 " far %#018" PRIx64 "\n",
993 1.6 christos tf->tf_esr, tf->tf_far);
994 1.6 christos
995 1.6 christos printf("\n");
996 1.6 christos hexdump(printf, "Stack dump", tf, 256);
997 1.6 christos }
998 1.6 christos
999 1.6 christos static void
1000 1.6 christos sigdebug(const struct trapframe *tf, const ksiginfo_t *ksi)
1001 1.6 christos {
1002 1.6 christos struct lwp *l = curlwp;
1003 1.6 christos struct proc *p = l->l_proc;
1004 1.6 christos const uint32_t eclass = __SHIFTOUT(ksi->ksi_trap, ESR_EC);
1005 1.6 christos
1006 1.6 christos printf("pid %d.%d (%s): signal %d (trap %#x) "
1007 1.6 christos "@pc %#" PRIx64 ", addr %p, error=%s\n",
1008 1.6 christos p->p_pid, l->l_lid, p->p_comm, ksi->ksi_signo, ksi->ksi_trap,
1009 1.6 christos tf->tf_regs.r_pc, ksi->ksi_addr, eclass_trapname(eclass));
1010 1.6 christos frame_dump(tf);
1011 1.6 christos }
1012 1.6 christos #endif
1013 1.6 christos
1014 1.44 jmcneill void
1015 1.44 jmcneill do_trapsignal1(
1016 1.6 christos #ifdef TRAP_SIGDEBUG
1017 1.6 christos const char *func,
1018 1.6 christos size_t line,
1019 1.6 christos struct trapframe *tf,
1020 1.6 christos #endif
1021 1.6 christos struct lwp *l, int signo, int code, void *addr, int trap)
1022 1.6 christos {
1023 1.6 christos ksiginfo_t ksi;
1024 1.6 christos
1025 1.6 christos KSI_INIT_TRAP(&ksi);
1026 1.6 christos ksi.ksi_signo = signo;
1027 1.6 christos ksi.ksi_code = code;
1028 1.6 christos ksi.ksi_addr = addr;
1029 1.6 christos ksi.ksi_trap = trap;
1030 1.6 christos #ifdef TRAP_SIGDEBUG
1031 1.6 christos printf("%s, %zu: ", func, line);
1032 1.6 christos sigdebug(tf, &ksi);
1033 1.6 christos #endif
1034 1.6 christos (*l->l_proc->p_emul->e_trapsignal)(l, &ksi);
1035 1.6 christos }
1036 1.23 ad
1037 1.23 ad bool
1038 1.23 ad cpu_intr_p(void)
1039 1.23 ad {
1040 1.23 ad uint64_t ncsw;
1041 1.23 ad int idepth;
1042 1.23 ad lwp_t *l;
1043 1.23 ad
1044 1.23 ad #ifdef __HAVE_PIC_FAST_SOFTINTS
1045 1.23 ad /* XXX Copied from cpu.h. Looks incomplete - needs fixing. */
1046 1.23 ad if (ci->ci_cpl < IPL_VM)
1047 1.23 ad return false;
1048 1.23 ad #endif
1049 1.23 ad
1050 1.23 ad l = curlwp;
1051 1.23 ad if (__predict_false(l->l_cpu == NULL)) {
1052 1.23 ad KASSERT(l == &lwp0);
1053 1.23 ad return false;
1054 1.23 ad }
1055 1.23 ad do {
1056 1.23 ad ncsw = l->l_ncsw;
1057 1.23 ad __insn_barrier();
1058 1.24 skrll idepth = l->l_cpu->ci_intr_depth;
1059 1.23 ad __insn_barrier();
1060 1.23 ad } while (__predict_false(ncsw != l->l_ncsw));
1061 1.23 ad
1062 1.23 ad return idepth > 0;
1063 1.23 ad }
1064