trap.c revision 1.35 1 /* $NetBSD: trap.c,v 1.35 2020/08/01 02:06:59 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33
34 __KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.35 2020/08/01 02:06:59 riastradh Exp $");
35
36 #include "opt_arm_intr_impl.h"
37 #include "opt_compat_netbsd32.h"
38 #include "opt_dtrace.h"
39
40 #include <sys/param.h>
41 #include <sys/kauth.h>
42 #include <sys/types.h>
43 #include <sys/atomic.h>
44 #include <sys/cpu.h>
45 #include <sys/evcnt.h>
46 #ifdef KDB
47 #include <sys/kdb.h>
48 #endif
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/signal.h>
52 #include <sys/signalvar.h>
53 #include <sys/siginfo.h>
54 #include <sys/xcall.h>
55
56 #ifdef ARM_INTR_IMPL
57 #include ARM_INTR_IMPL
58 #else
59 #error ARM_INTR_IMPL not defined
60 #endif
61
62 #ifndef ARM_IRQ_HANDLER
63 #error ARM_IRQ_HANDLER not defined
64 #endif
65
66 #include <aarch64/userret.h>
67 #include <aarch64/frame.h>
68 #include <aarch64/machdep.h>
69 #include <aarch64/armreg.h>
70 #include <aarch64/locore.h>
71
72 #ifdef KDB
73 #include <machine/db_machdep.h>
74 #endif
75 #ifdef DDB
76 #include <ddb/db_output.h>
77 #include <machine/db_machdep.h>
78 #endif
79 #ifdef KDTRACE_HOOKS
80 #include <sys/dtrace_bsd.h>
81 #endif
82
83 #ifdef DDB
84 int sigill_debug = 0;
85 #endif
86
87 #ifdef KDTRACE_HOOKS
88 dtrace_doubletrap_func_t dtrace_doubletrap_func = NULL;
89 dtrace_trap_func_t dtrace_trap_func = NULL;
90 int (*dtrace_invop_jump_addr)(struct trapframe *);
91 #endif
92
93 enum emul_arm_result {
94 EMUL_ARM_SUCCESS = 0,
95 EMUL_ARM_UNKNOWN,
96 EMUL_ARM_FAULT,
97 };
98
99 const char * const trap_names[] = {
100 [ESR_EC_UNKNOWN] = "Unknown Reason (Illegal Instruction)",
101 [ESR_EC_SERROR] = "SError Interrupt",
102 [ESR_EC_WFX] = "WFI or WFE instruction execution",
103 [ESR_EC_ILL_STATE] = "Illegal Execution State",
104
105 [ESR_EC_BTE_A64] = "Branch Target Exception",
106
107 [ESR_EC_SYS_REG] = "MSR/MRS/SYS instruction",
108 [ESR_EC_SVC_A64] = "SVC Instruction Execution",
109 [ESR_EC_HVC_A64] = "HVC Instruction Execution",
110 [ESR_EC_SMC_A64] = "SMC Instruction Execution",
111
112 [ESR_EC_INSN_ABT_EL0] = "Instruction Abort (EL0)",
113 [ESR_EC_INSN_ABT_EL1] = "Instruction Abort (EL1)",
114 [ESR_EC_DATA_ABT_EL0] = "Data Abort (EL0)",
115 [ESR_EC_DATA_ABT_EL1] = "Data Abort (EL1)",
116
117 [ESR_EC_PC_ALIGNMENT] = "Misaligned PC",
118 [ESR_EC_SP_ALIGNMENT] = "Misaligned SP",
119
120 [ESR_EC_FP_ACCESS] = "Access to SIMD/FP Registers",
121 [ESR_EC_FP_TRAP_A64] = "FP Exception",
122
123 [ESR_EC_BRKPNT_EL0] = "Breakpoint Exception (EL0)",
124 [ESR_EC_BRKPNT_EL1] = "Breakpoint Exception (EL1)",
125 [ESR_EC_SW_STEP_EL0] = "Software Step (EL0)",
126 [ESR_EC_SW_STEP_EL1] = "Software Step (EL1)",
127 [ESR_EC_WTCHPNT_EL0] = "Watchpoint (EL0)",
128 [ESR_EC_WTCHPNT_EL1] = "Watchpoint (EL1)",
129 [ESR_EC_BKPT_INSN_A64] = "BKPT Instruction Execution",
130
131 [ESR_EC_CP15_RT] = "A32: MCR/MRC access to CP15",
132 [ESR_EC_CP15_RRT] = "A32: MCRR/MRRC access to CP15",
133 [ESR_EC_CP14_RT] = "A32: MCR/MRC access to CP14",
134 [ESR_EC_CP14_DT] = "A32: LDC/STC access to CP14",
135 [ESR_EC_CP14_RRT] = "A32: MRRC access to CP14",
136 [ESR_EC_SVC_A32] = "A32: SVC Instruction Execution",
137 [ESR_EC_HVC_A32] = "A32: HVC Instruction Execution",
138 [ESR_EC_SMC_A32] = "A32: SMC Instruction Execution",
139 [ESR_EC_FPID] = "A32: MCR/MRC access to CP10",
140 [ESR_EC_FP_TRAP_A32] = "A32: FP Exception",
141 [ESR_EC_BKPT_INSN_A32] = "A32: BKPT Instruction Execution",
142 [ESR_EC_VECTOR_CATCH] = "A32: Vector Catch Exception"
143 };
144
145 const char *
146 eclass_trapname(uint32_t eclass)
147 {
148 static char trapnamebuf[sizeof("Unknown trap 0x????????")];
149
150 if (eclass >= __arraycount(trap_names) || trap_names[eclass] == NULL) {
151 snprintf(trapnamebuf, sizeof(trapnamebuf),
152 "Unknown trap %#02x", eclass);
153 return trapnamebuf;
154 }
155 return trap_names[eclass];
156 }
157
158 void
159 userret(struct lwp *l)
160 {
161 mi_userret(l);
162 }
163
164 void
165 trap_doast(struct trapframe *tf)
166 {
167 struct lwp * const l = curlwp;
168
169 /*
170 * allow to have a chance of context switch just prior to user
171 * exception return.
172 */
173 #ifdef __HAVE_PREEMPTION
174 kpreempt_disable();
175 #endif
176 struct cpu_info * const ci = curcpu();
177
178 ci->ci_data.cpu_ntrap++;
179
180 KDASSERT(ci->ci_cpl == IPL_NONE);
181 #ifdef __HAVE_PREEMPTION
182 kpreempt_enable();
183 #endif
184
185 if (l->l_pflag & LP_OWEUPC) {
186 l->l_pflag &= ~LP_OWEUPC;
187 ADDUPROF(l);
188 }
189
190 userret(l);
191 }
192
193 void
194 trap_el1h_sync(struct trapframe *tf)
195 {
196 const uint32_t esr = tf->tf_esr;
197 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
198
199 /* re-enable traps and interrupts */
200 if (!(tf->tf_spsr & SPSR_I))
201 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
202 else
203 daif_enable(DAIF_D|DAIF_A);
204
205 #ifdef KDTRACE_HOOKS
206 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, eclass))
207 return;
208 #endif
209
210 switch (eclass) {
211 case ESR_EC_INSN_ABT_EL1:
212 case ESR_EC_DATA_ABT_EL1:
213 data_abort_handler(tf, eclass);
214 break;
215
216 case ESR_EC_BKPT_INSN_A64:
217 #ifdef KDTRACE_HOOKS
218 if (__SHIFTOUT(esr, ESR_ISS) == 0x40d &&
219 dtrace_invop_jump_addr != 0) {
220 (*dtrace_invop_jump_addr)(tf);
221 break;
222 }
223 /* FALLTHROUGH */
224 #endif
225 case ESR_EC_BRKPNT_EL1:
226 case ESR_EC_SW_STEP_EL1:
227 case ESR_EC_WTCHPNT_EL1:
228 #ifdef DDB
229 if (eclass == ESR_EC_BRKPNT_EL1)
230 kdb_trap(DB_TRAP_BREAKPOINT, tf);
231 else if (eclass == ESR_EC_BKPT_INSN_A64)
232 kdb_trap(DB_TRAP_BKPT_INSN, tf);
233 else if (eclass == ESR_EC_WTCHPNT_EL1)
234 kdb_trap(DB_TRAP_WATCHPOINT, tf);
235 else if (eclass == ESR_EC_SW_STEP_EL1)
236 kdb_trap(DB_TRAP_SW_STEP, tf);
237 else
238 kdb_trap(DB_TRAP_UNKNOWN, tf);
239 #else
240 panic("No debugger in kernel");
241 #endif
242 break;
243
244 case ESR_EC_FP_ACCESS:
245 if ((curlwp->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) ==
246 (LW_SYSTEM|LW_SYSTEM_FPU)) {
247 fpu_load(curlwp);
248 break;
249 }
250 /*FALLTHROUGH*/
251 case ESR_EC_FP_TRAP_A64:
252 case ESR_EC_PC_ALIGNMENT:
253 case ESR_EC_SP_ALIGNMENT:
254 case ESR_EC_ILL_STATE:
255 case ESR_EC_BTE_A64:
256 default:
257 panic("Trap: fatal %s: pc=%016" PRIx64 " sp=%016" PRIx64
258 " esr=%08x", eclass_trapname(eclass), tf->tf_pc, tf->tf_sp,
259 esr);
260 break;
261 }
262 }
263
264 /*
265 * There are some systems with different cache line sizes for each cpu.
266 * Userland programs can be preempted between CPUs at any time, so in such
267 * a system, the minimum cache line size must be visible to userland.
268 */
269 #define CTR_EL0_USR_MASK \
270 (CTR_EL0_DIC | CTR_EL0_IDC | CTR_EL0_DMIN_LINE | CTR_EL0_IMIN_LINE)
271 uint64_t ctr_el0_usr __read_mostly;
272
273 static xcfunc_t
274 configure_cpu_traps0(void *arg1, void *arg2)
275 {
276 struct cpu_info * const ci = curcpu();
277 uint64_t sctlr;
278 uint64_t ctr_el0_raw = reg_ctr_el0_read();
279
280 #ifdef DEBUG_FORCE_TRAP_CTR_EL0
281 goto need_ctr_trap;
282 #endif
283
284 if ((__SHIFTOUT(ctr_el0_raw, CTR_EL0_DMIN_LINE) >
285 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DMIN_LINE)) ||
286 (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IMIN_LINE) >
287 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IMIN_LINE)))
288 goto need_ctr_trap;
289
290 if ((__SHIFTOUT(ctr_el0_raw, CTR_EL0_DIC) == 1 &&
291 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DIC) == 0) ||
292 (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IDC) == 1 &&
293 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IDC) == 0))
294 goto need_ctr_trap;
295
296 #if 0 /* XXX: To do or not to do */
297 /*
298 * IDC==0, but (LoC==0 || LoUIS==LoUU==0)?
299 * Would it be better to show IDC=1 to userland?
300 */
301 if (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IDC) == 0 &&
302 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IDC) == 1)
303 goto need_ctr_trap;
304 #endif
305
306 return 0;
307
308 need_ctr_trap:
309 evcnt_attach_dynamic(&ci->ci_uct_trap, EVCNT_TYPE_MISC, NULL,
310 ci->ci_cpuname, "ctr_el0 trap");
311
312 /* trap CTR_EL0 access from EL0 on this cpu */
313 sctlr = reg_sctlr_el1_read();
314 sctlr &= ~SCTLR_UCT;
315 reg_sctlr_el1_write(sctlr);
316
317 return 0;
318 }
319
320 void
321 configure_cpu_traps(void)
322 {
323 CPU_INFO_ITERATOR cii;
324 struct cpu_info *ci;
325 uint64_t where;
326
327 /* remember minimum cache line size out of all CPUs */
328 for (CPU_INFO_FOREACH(cii, ci)) {
329 uint64_t ctr_el0_cpu = ci->ci_id.ac_ctr;
330 uint64_t clidr = ci->ci_id.ac_clidr;
331
332 if (__SHIFTOUT(clidr, CLIDR_LOC) == 0 ||
333 (__SHIFTOUT(clidr, CLIDR_LOUIS) == 0 &&
334 __SHIFTOUT(clidr, CLIDR_LOUU) == 0)) {
335 /* this means the same as IDC=1 */
336 ctr_el0_cpu |= CTR_EL0_IDC;
337 }
338
339 /*
340 * if DIC==1, there is no need to icache sync. however,
341 * to calculate the minimum cacheline, in this case
342 * ICacheLine is treated as the maximum.
343 */
344 if (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_DIC) == 1)
345 ctr_el0_cpu |= CTR_EL0_IMIN_LINE;
346
347 /* Neoverse N1 erratum 1542419 */
348 if (CPU_ID_NEOVERSEN1_P(ci->ci_id.ac_midr) &&
349 __SHIFTOUT(ctr_el0_cpu, CTR_EL0_DIC) == 1)
350 ctr_el0_cpu &= ~CTR_EL0_DIC;
351
352 if (cii == 0) {
353 ctr_el0_usr = ctr_el0_cpu;
354 continue;
355 }
356
357 /* keep minimum cache line size, and worst DIC/IDC */
358 ctr_el0_usr &= (ctr_el0_cpu & CTR_EL0_DIC) | ~CTR_EL0_DIC;
359 ctr_el0_usr &= (ctr_el0_cpu & CTR_EL0_IDC) | ~CTR_EL0_IDC;
360 if (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_DMIN_LINE) <
361 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DMIN_LINE)) {
362 ctr_el0_usr &= ~CTR_EL0_DMIN_LINE;
363 ctr_el0_usr |= ctr_el0_cpu & CTR_EL0_DMIN_LINE;
364 }
365 if ((ctr_el0_cpu & CTR_EL0_DIC) == 0 &&
366 (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_IMIN_LINE) <
367 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IMIN_LINE))) {
368 ctr_el0_usr &= ~CTR_EL0_IMIN_LINE;
369 ctr_el0_usr |= ctr_el0_cpu & CTR_EL0_IMIN_LINE;
370 }
371 }
372
373 where = xc_broadcast(0,
374 (xcfunc_t)configure_cpu_traps0, NULL, NULL);
375 xc_wait(where);
376 }
377
378 static enum emul_arm_result
379 emul_aarch64_insn(struct trapframe *tf)
380 {
381 uint32_t insn;
382
383 if (ufetch_32((uint32_t *)tf->tf_pc, &insn)) {
384 tf->tf_far = reg_far_el1_read();
385 return EMUL_ARM_FAULT;
386 }
387
388 if ((insn & 0xffffffe0) == 0xd53b0020) {
389 /* mrs x?,ctr_el0 */
390 unsigned int Xt = insn & 31;
391 if (Xt != 31) { /* !xzr */
392 uint64_t ctr_el0 = reg_ctr_el0_read();
393 ctr_el0 &= ~CTR_EL0_USR_MASK;
394 ctr_el0 |= (ctr_el0_usr & CTR_EL0_USR_MASK);
395 tf->tf_reg[Xt] = ctr_el0;
396 }
397 curcpu()->ci_uct_trap.ev_count++;
398
399 } else {
400 return EMUL_ARM_UNKNOWN;
401 }
402
403 tf->tf_pc += 4;
404 return EMUL_ARM_SUCCESS;
405 }
406
407 void
408 trap_el0_sync(struct trapframe *tf)
409 {
410 struct lwp * const l = curlwp;
411 const uint32_t esr = tf->tf_esr;
412 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
413
414 /* disable trace */
415 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
416 /* enable traps and interrupts */
417 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
418
419 switch (eclass) {
420 case ESR_EC_INSN_ABT_EL0:
421 case ESR_EC_DATA_ABT_EL0:
422 data_abort_handler(tf, eclass);
423 userret(l);
424 break;
425
426 case ESR_EC_SVC_A64:
427 (*l->l_proc->p_md.md_syscall)(tf);
428 break;
429 case ESR_EC_FP_ACCESS:
430 fpu_load(l);
431 userret(l);
432 break;
433 case ESR_EC_FP_TRAP_A64:
434 do_trapsignal(l, SIGFPE, FPE_FLTUND, NULL, esr); /* XXX */
435 userret(l);
436 break;
437
438 case ESR_EC_PC_ALIGNMENT:
439 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_pc, esr);
440 userret(l);
441 break;
442 case ESR_EC_SP_ALIGNMENT:
443 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_sp, esr);
444 userret(l);
445 break;
446
447 case ESR_EC_BKPT_INSN_A64:
448 case ESR_EC_BRKPNT_EL0:
449 case ESR_EC_WTCHPNT_EL0:
450 do_trapsignal(l, SIGTRAP, TRAP_BRKPT, (void *)tf->tf_pc, esr);
451 userret(l);
452 break;
453 case ESR_EC_SW_STEP_EL0:
454 /* disable trace, and send trace trap */
455 tf->tf_spsr &= ~SPSR_SS;
456 do_trapsignal(l, SIGTRAP, TRAP_TRACE, (void *)tf->tf_pc, esr);
457 userret(l);
458 break;
459
460 case ESR_EC_SYS_REG:
461 switch (emul_aarch64_insn(tf)) {
462 case EMUL_ARM_SUCCESS:
463 break;
464 case EMUL_ARM_UNKNOWN:
465 goto unknown;
466 case EMUL_ARM_FAULT:
467 do_trapsignal(l, SIGSEGV, SEGV_MAPERR,
468 (void *)tf->tf_far, esr);
469 break;
470 }
471 userret(l);
472 break;
473
474 default:
475 case ESR_EC_UNKNOWN:
476 unknown:
477 #ifdef DDB
478 if (sigill_debug) {
479 /* show illegal instruction */
480 printf("TRAP: pid %d (%s), uid %d: %s:"
481 " esr=0x%lx: pc=0x%lx: %s\n",
482 curlwp->l_proc->p_pid, curlwp->l_proc->p_comm,
483 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1,
484 eclass_trapname(eclass), tf->tf_esr, tf->tf_pc,
485 strdisasm(tf->tf_pc, tf->tf_spsr));
486 }
487 #endif
488 /* illegal or not implemented instruction */
489 do_trapsignal(l, SIGILL, ILL_ILLTRP, (void *)tf->tf_pc, esr);
490 userret(l);
491 break;
492 }
493 }
494
495 void
496 interrupt(struct trapframe *tf)
497 {
498 struct cpu_info * const ci = curcpu();
499
500 #ifdef STACKCHECKS
501 struct lwp *l = curlwp;
502 void *sp = (void *)reg_sp_read();
503 if (l->l_addr >= sp) {
504 panic("lwp/interrupt stack overflow detected."
505 " lwp=%p, sp=%p, l_addr=%p", l, sp, l->l_addr);
506 }
507 #endif
508
509 /* disable trace */
510 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
511
512 /* enable traps */
513 daif_enable(DAIF_D|DAIF_A);
514
515 ci->ci_intr_depth++;
516 ARM_IRQ_HANDLER(tf);
517 ci->ci_intr_depth--;
518
519 cpu_dosoftints();
520 }
521
522 #ifdef COMPAT_NETBSD32
523
524 /*
525 * 32-bit length Thumb instruction. See ARMv7 DDI0406A A6.3.
526 */
527 #define THUMB_32BIT(hi) (((hi) & 0xe000) == 0xe000 && ((hi) & 0x1800))
528
529 int
530 fetch_arm_insn(uint64_t pc, uint64_t spsr, uint32_t *insn)
531 {
532
533 /* THUMB? */
534 if (spsr & SPSR_A32_T) {
535 uint16_t *p = (uint16_t *)(pc & ~1UL); /* XXX */
536 uint16_t hi, lo;
537
538 if (ufetch_16(p, &hi))
539 return -1;
540
541 if (!THUMB_32BIT(hi)) {
542 /* 16-bit Thumb instruction */
543 *insn = hi;
544 return 2;
545 }
546
547 /* 32-bit Thumb instruction */
548 if (ufetch_16(p + 1, &lo))
549 return -1;
550
551 *insn = ((uint32_t)hi << 16) | lo;
552 return 4;
553 }
554
555 if (ufetch_32((uint32_t *)pc, insn))
556 return -1;
557
558 return 4;
559 }
560
561 static bool
562 arm_cond_match(uint32_t insn, uint64_t spsr)
563 {
564 bool invert = (insn >> 28) & 1;
565 bool match;
566
567 switch (insn >> 29) {
568 case 0: /* EQ or NE */
569 match = spsr & SPSR_Z;
570 break;
571 case 1: /* CS/HI or CC/LO */
572 match = spsr & SPSR_C;
573 break;
574 case 2: /* MI or PL */
575 match = spsr & SPSR_N;
576 break;
577 case 3: /* VS or VC */
578 match = spsr & SPSR_V;
579 break;
580 case 4: /* HI or LS */
581 match = ((spsr & (SPSR_C | SPSR_Z)) == SPSR_C);
582 break;
583 case 5: /* GE or LT */
584 match = (!(spsr & SPSR_N) == !(spsr & SPSR_V));
585 break;
586 case 6: /* GT or LE */
587 match = !(spsr & SPSR_Z) &&
588 (!(spsr & SPSR_N) == !(spsr & SPSR_V));
589 break;
590 case 7: /* AL */
591 match = true;
592 break;
593 }
594 return (!match != !invert);
595 }
596
597 uint8_t atomic_swap_8(volatile uint8_t *, uint8_t);
598
599 static int
600 emul_arm_swp(uint32_t insn, struct trapframe *tf)
601 {
602 struct faultbuf fb;
603 vaddr_t vaddr;
604 uint32_t val;
605 int Rn, Rd, Rm, error;
606
607 Rn = __SHIFTOUT(insn, 0x000f0000);
608 Rd = __SHIFTOUT(insn, 0x0000f000);
609 Rm = __SHIFTOUT(insn, 0x0000000f);
610
611 vaddr = tf->tf_reg[Rn] & 0xffffffff;
612 val = tf->tf_reg[Rm];
613
614 /* fault if insn is swp, and unaligned access */
615 if ((insn & 0x00400000) == 0 && (vaddr & 3) != 0) {
616 tf->tf_far = vaddr;
617 return EFAULT;
618 }
619
620 /* vaddr will always point to userspace, since it has only 32bit */
621 if ((error = cpu_set_onfault(&fb)) == 0) {
622 if (insn & 0x00400000) {
623 /* swpb */
624 val = atomic_swap_8((uint8_t *)vaddr, val);
625 } else {
626 /* swp */
627 val = atomic_swap_32((uint32_t *)vaddr, val);
628 }
629 cpu_unset_onfault();
630 tf->tf_reg[Rd] = val;
631 } else {
632 tf->tf_far = reg_far_el1_read();
633 }
634 return error;
635 }
636
637 static enum emul_arm_result
638 emul_thumb_insn(struct trapframe *tf, uint32_t insn, int insn_size)
639 {
640 /* T32-16bit or 32bit instructions */
641 switch (insn_size) {
642 case 2:
643 /* Breakpoint used by GDB */
644 if (insn == 0xdefe) {
645 do_trapsignal(curlwp, SIGTRAP, TRAP_BRKPT,
646 (void *)tf->tf_pc, 0);
647 return EMUL_ARM_SUCCESS;
648 }
649 /* XXX: some T32 IT instruction deprecated should be emulated */
650 break;
651 case 4:
652 break;
653 default:
654 return EMUL_ARM_FAULT;
655 }
656 return EMUL_ARM_UNKNOWN;
657 }
658
659 static enum emul_arm_result
660 emul_arm_insn(struct trapframe *tf)
661 {
662 uint32_t insn;
663 int insn_size;
664
665 insn_size = fetch_arm_insn(tf->tf_pc, tf->tf_spsr, &insn);
666 tf->tf_far = reg_far_el1_read();
667
668 if (tf->tf_spsr & SPSR_A32_T)
669 return emul_thumb_insn(tf, insn, insn_size);
670 if (insn_size != 4)
671 return EMUL_ARM_FAULT;
672
673 /* Breakpoint used by GDB */
674 if (insn == 0xe6000011 || insn == 0xe7ffdefe) {
675 do_trapsignal(curlwp, SIGTRAP, TRAP_BRKPT,
676 (void *)tf->tf_pc, 0);
677 return EMUL_ARM_SUCCESS;
678 }
679
680 /* Unconditional instruction extension space? */
681 if ((insn & 0xf0000000) == 0xf0000000)
682 goto unknown_insn;
683
684 /* swp,swpb */
685 if ((insn & 0x0fb00ff0) == 0x01000090) {
686 if (arm_cond_match(insn, tf->tf_spsr)) {
687 if (emul_arm_swp(insn, tf) != 0)
688 return EMUL_ARM_FAULT;
689 }
690 goto emulated;
691 }
692
693 /*
694 * Emulate ARMv6 instructions with cache operations
695 * register (c7), that can be used in user mode.
696 */
697 switch (insn & 0x0fff0fff) {
698 case 0x0e070f95:
699 if (arm_cond_match(insn, tf->tf_spsr)) {
700 /*
701 * mcr p15, 0, <Rd>, c7, c5, 4
702 * (flush prefetch buffer)
703 */
704 __asm __volatile("isb sy" ::: "memory");
705 }
706 goto emulated;
707 case 0x0e070f9a:
708 if (arm_cond_match(insn, tf->tf_spsr)) {
709 /*
710 * mcr p15, 0, <Rd>, c7, c10, 4
711 * (data synchronization barrier)
712 */
713 __asm __volatile("dsb sy" ::: "memory");
714 }
715 goto emulated;
716 case 0x0e070fba:
717 if (arm_cond_match(insn, tf->tf_spsr)) {
718 /*
719 * mcr p15, 0, <Rd>, c7, c10, 5
720 * (data memory barrier)
721 */
722 __asm __volatile("dmb sy" ::: "memory");
723 }
724 goto emulated;
725 default:
726 break;
727 }
728
729 unknown_insn:
730 /* unknown, or unsupported instruction */
731 return EMUL_ARM_UNKNOWN;
732
733 emulated:
734 tf->tf_pc += insn_size;
735 return EMUL_ARM_SUCCESS;
736 }
737 #endif /* COMPAT_NETBSD32 */
738
739 void
740 trap_el0_32sync(struct trapframe *tf)
741 {
742 struct lwp * const l = curlwp;
743 const uint32_t esr = tf->tf_esr;
744 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
745
746 /* disable trace */
747 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
748 /* enable traps and interrupts */
749 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
750
751 switch (eclass) {
752 #ifdef COMPAT_NETBSD32
753 case ESR_EC_INSN_ABT_EL0:
754 case ESR_EC_DATA_ABT_EL0:
755 data_abort_handler(tf, eclass);
756 userret(l);
757 break;
758
759 case ESR_EC_SVC_A32:
760 (*l->l_proc->p_md.md_syscall)(tf);
761 break;
762
763 case ESR_EC_FP_ACCESS:
764 fpu_load(l);
765 userret(l);
766 break;
767
768 case ESR_EC_FP_TRAP_A32:
769 do_trapsignal(l, SIGFPE, FPE_FLTUND, NULL, esr); /* XXX */
770 userret(l);
771 break;
772
773 case ESR_EC_PC_ALIGNMENT:
774 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_pc, esr);
775 userret(l);
776 break;
777
778 case ESR_EC_SP_ALIGNMENT:
779 do_trapsignal(l, SIGBUS, BUS_ADRALN,
780 (void *)tf->tf_reg[13], esr); /* sp is r13 on AArch32 */
781 userret(l);
782 break;
783
784 case ESR_EC_BKPT_INSN_A32:
785 do_trapsignal(l, SIGTRAP, TRAP_BRKPT, (void *)tf->tf_pc, esr);
786 userret(l);
787 break;
788
789 case ESR_EC_UNKNOWN:
790 switch (emul_arm_insn(tf)) {
791 case EMUL_ARM_SUCCESS:
792 break;
793 case EMUL_ARM_UNKNOWN:
794 goto unknown;
795 case EMUL_ARM_FAULT:
796 do_trapsignal(l, SIGSEGV, SEGV_MAPERR,
797 (void *)tf->tf_far, esr);
798 break;
799 }
800 userret(l);
801 break;
802
803 case ESR_EC_CP15_RT:
804 case ESR_EC_CP15_RRT:
805 case ESR_EC_CP14_RT:
806 case ESR_EC_CP14_DT:
807 case ESR_EC_CP14_RRT:
808 unknown:
809 #endif /* COMPAT_NETBSD32 */
810 default:
811 #ifdef DDB
812 if (sigill_debug) {
813 /* show illegal instruction */
814 printf("TRAP: pid %d (%s), uid %d: %s:"
815 " esr=0x%lx: pc=0x%lx: %s\n",
816 curlwp->l_proc->p_pid, curlwp->l_proc->p_comm,
817 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1,
818 eclass_trapname(eclass), tf->tf_esr, tf->tf_pc,
819 strdisasm(tf->tf_pc, tf->tf_spsr));
820 }
821 #endif
822 /* illegal or not implemented instruction */
823 do_trapsignal(l, SIGILL, ILL_ILLTRP, (void *)tf->tf_pc, esr);
824 userret(l);
825 break;
826 }
827 }
828
829 #define bad_trap_panic(trapfunc) \
830 void \
831 trapfunc(struct trapframe *tf) \
832 { \
833 panic("%s", __func__); \
834 }
835 bad_trap_panic(trap_el1t_sync)
836 bad_trap_panic(trap_el1t_irq)
837 bad_trap_panic(trap_el1t_fiq)
838 bad_trap_panic(trap_el1t_error)
839 bad_trap_panic(trap_el1h_fiq)
840 bad_trap_panic(trap_el1h_error)
841 bad_trap_panic(trap_el0_fiq)
842 bad_trap_panic(trap_el0_error)
843 bad_trap_panic(trap_el0_32fiq)
844 bad_trap_panic(trap_el0_32error)
845
846 void
847 cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb, int val)
848 {
849 tf->tf_reg[19] = fb->fb_reg[FB_X19];
850 tf->tf_reg[20] = fb->fb_reg[FB_X20];
851 tf->tf_reg[21] = fb->fb_reg[FB_X21];
852 tf->tf_reg[22] = fb->fb_reg[FB_X22];
853 tf->tf_reg[23] = fb->fb_reg[FB_X23];
854 tf->tf_reg[24] = fb->fb_reg[FB_X24];
855 tf->tf_reg[25] = fb->fb_reg[FB_X25];
856 tf->tf_reg[26] = fb->fb_reg[FB_X26];
857 tf->tf_reg[27] = fb->fb_reg[FB_X27];
858 tf->tf_reg[28] = fb->fb_reg[FB_X28];
859 tf->tf_reg[29] = fb->fb_reg[FB_X29];
860 tf->tf_sp = fb->fb_reg[FB_SP];
861 tf->tf_pc = fb->fb_reg[FB_LR];
862 tf->tf_reg[0] = val;
863 }
864
865 #ifdef TRAP_SIGDEBUG
866 static void
867 frame_dump(const struct trapframe *tf)
868 {
869 const struct reg *r = &tf->tf_regs;
870
871 printf("trapframe %p\n", tf);
872 for (size_t i = 0; i < __arraycount(r->r_reg); i++) {
873 printf(" r%.2zu %#018" PRIx64 "%c", i, r->r_reg[i],
874 " \n"[i && (i & 1) == 0]);
875 }
876
877 printf("\n");
878 printf(" sp %#018" PRIx64 " pc %#018" PRIx64 "\n",
879 r->r_sp, r->r_pc);
880 printf(" spsr %#018" PRIx64 " tpidr %#018" PRIx64 "\n",
881 r->r_spsr, r->r_tpidr);
882 printf(" esr %#018" PRIx64 " far %#018" PRIx64 "\n",
883 tf->tf_esr, tf->tf_far);
884
885 printf("\n");
886 hexdump(printf, "Stack dump", tf, 256);
887 }
888
889 static void
890 sigdebug(const struct trapframe *tf, const ksiginfo_t *ksi)
891 {
892 struct lwp *l = curlwp;
893 struct proc *p = l->l_proc;
894 const uint32_t eclass = __SHIFTOUT(ksi->ksi_trap, ESR_EC);
895
896 printf("pid %d.%d (%s): signal %d (trap %#x) "
897 "@pc %#" PRIx64 ", addr %p, error=%s\n",
898 p->p_pid, l->l_lid, p->p_comm, ksi->ksi_signo, ksi->ksi_trap,
899 tf->tf_regs.r_pc, ksi->ksi_addr, eclass_trapname(eclass));
900 frame_dump(tf);
901 }
902 #endif
903
904 void do_trapsignal1(
905 #ifdef TRAP_SIGDEBUG
906 const char *func,
907 size_t line,
908 struct trapframe *tf,
909 #endif
910 struct lwp *l, int signo, int code, void *addr, int trap)
911 {
912 ksiginfo_t ksi;
913
914 KSI_INIT_TRAP(&ksi);
915 ksi.ksi_signo = signo;
916 ksi.ksi_code = code;
917 ksi.ksi_addr = addr;
918 ksi.ksi_trap = trap;
919 #ifdef TRAP_SIGDEBUG
920 printf("%s, %zu: ", func, line);
921 sigdebug(tf, &ksi);
922 #endif
923 (*l->l_proc->p_emul->e_trapsignal)(l, &ksi);
924 }
925
926 bool
927 cpu_intr_p(void)
928 {
929 uint64_t ncsw;
930 int idepth;
931 lwp_t *l;
932
933 #ifdef __HAVE_PIC_FAST_SOFTINTS
934 /* XXX Copied from cpu.h. Looks incomplete - needs fixing. */
935 if (ci->ci_cpl < IPL_VM)
936 return false;
937 #endif
938
939 l = curlwp;
940 if (__predict_false(l->l_cpu == NULL)) {
941 KASSERT(l == &lwp0);
942 return false;
943 }
944 do {
945 ncsw = l->l_ncsw;
946 __insn_barrier();
947 idepth = l->l_cpu->ci_intr_depth;
948 __insn_barrier();
949 } while (__predict_false(ncsw != l->l_ncsw));
950
951 return idepth > 0;
952 }
953