trap.c revision 1.39 1 /* $NetBSD: trap.c,v 1.39 2020/10/22 07:23:24 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33
34 __KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.39 2020/10/22 07:23:24 skrll Exp $");
35
36 #include "opt_arm_intr_impl.h"
37 #include "opt_compat_netbsd32.h"
38 #include "opt_dtrace.h"
39
40 #include <sys/param.h>
41 #include <sys/kauth.h>
42 #include <sys/types.h>
43 #include <sys/atomic.h>
44 #include <sys/cpu.h>
45 #include <sys/evcnt.h>
46 #ifdef KDB
47 #include <sys/kdb.h>
48 #endif
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/signal.h>
52 #include <sys/signalvar.h>
53 #include <sys/siginfo.h>
54 #include <sys/xcall.h>
55
56 #ifdef ARM_INTR_IMPL
57 #include ARM_INTR_IMPL
58 #else
59 #error ARM_INTR_IMPL not defined
60 #endif
61
62 #ifndef ARM_IRQ_HANDLER
63 #error ARM_IRQ_HANDLER not defined
64 #endif
65
66 #include <arm/cpufunc.h>
67
68 #include <aarch64/userret.h>
69 #include <aarch64/frame.h>
70 #include <aarch64/machdep.h>
71 #include <aarch64/armreg.h>
72 #include <aarch64/locore.h>
73
74 #ifdef KDB
75 #include <machine/db_machdep.h>
76 #endif
77 #ifdef DDB
78 #include <ddb/db_output.h>
79 #include <machine/db_machdep.h>
80 #endif
81 #ifdef KDTRACE_HOOKS
82 #include <sys/dtrace_bsd.h>
83 #endif
84
85 #ifdef DDB
86 int sigill_debug = 0;
87 #endif
88
89 #ifdef KDTRACE_HOOKS
90 dtrace_doubletrap_func_t dtrace_doubletrap_func = NULL;
91 dtrace_trap_func_t dtrace_trap_func = NULL;
92 int (*dtrace_invop_jump_addr)(struct trapframe *);
93 #endif
94
95 enum emul_arm_result {
96 EMUL_ARM_SUCCESS = 0,
97 EMUL_ARM_UNKNOWN,
98 EMUL_ARM_FAULT,
99 };
100
101 const char * const trap_names[] = {
102 [ESR_EC_UNKNOWN] = "Unknown Reason (Illegal Instruction)",
103 [ESR_EC_SERROR] = "SError Interrupt",
104 [ESR_EC_WFX] = "WFI or WFE instruction execution",
105 [ESR_EC_ILL_STATE] = "Illegal Execution State",
106
107 [ESR_EC_BTE_A64] = "Branch Target Exception",
108
109 [ESR_EC_SYS_REG] = "MSR/MRS/SYS instruction",
110 [ESR_EC_SVC_A64] = "SVC Instruction Execution",
111 [ESR_EC_HVC_A64] = "HVC Instruction Execution",
112 [ESR_EC_SMC_A64] = "SMC Instruction Execution",
113
114 [ESR_EC_INSN_ABT_EL0] = "Instruction Abort (EL0)",
115 [ESR_EC_INSN_ABT_EL1] = "Instruction Abort (EL1)",
116 [ESR_EC_DATA_ABT_EL0] = "Data Abort (EL0)",
117 [ESR_EC_DATA_ABT_EL1] = "Data Abort (EL1)",
118
119 [ESR_EC_PC_ALIGNMENT] = "Misaligned PC",
120 [ESR_EC_SP_ALIGNMENT] = "Misaligned SP",
121
122 [ESR_EC_FP_ACCESS] = "Access to SIMD/FP Registers",
123 [ESR_EC_FP_TRAP_A64] = "FP Exception",
124
125 [ESR_EC_BRKPNT_EL0] = "Breakpoint Exception (EL0)",
126 [ESR_EC_BRKPNT_EL1] = "Breakpoint Exception (EL1)",
127 [ESR_EC_SW_STEP_EL0] = "Software Step (EL0)",
128 [ESR_EC_SW_STEP_EL1] = "Software Step (EL1)",
129 [ESR_EC_WTCHPNT_EL0] = "Watchpoint (EL0)",
130 [ESR_EC_WTCHPNT_EL1] = "Watchpoint (EL1)",
131 [ESR_EC_BKPT_INSN_A64] = "BKPT Instruction Execution",
132
133 [ESR_EC_CP15_RT] = "A32: MCR/MRC access to CP15",
134 [ESR_EC_CP15_RRT] = "A32: MCRR/MRRC access to CP15",
135 [ESR_EC_CP14_RT] = "A32: MCR/MRC access to CP14",
136 [ESR_EC_CP14_DT] = "A32: LDC/STC access to CP14",
137 [ESR_EC_CP14_RRT] = "A32: MRRC access to CP14",
138 [ESR_EC_SVC_A32] = "A32: SVC Instruction Execution",
139 [ESR_EC_HVC_A32] = "A32: HVC Instruction Execution",
140 [ESR_EC_SMC_A32] = "A32: SMC Instruction Execution",
141 [ESR_EC_FPID] = "A32: MCR/MRC access to CP10",
142 [ESR_EC_FP_TRAP_A32] = "A32: FP Exception",
143 [ESR_EC_BKPT_INSN_A32] = "A32: BKPT Instruction Execution",
144 [ESR_EC_VECTOR_CATCH] = "A32: Vector Catch Exception"
145 };
146
147 const char *
148 eclass_trapname(uint32_t eclass)
149 {
150 static char trapnamebuf[sizeof("Unknown trap 0x????????")];
151
152 if (eclass >= __arraycount(trap_names) || trap_names[eclass] == NULL) {
153 snprintf(trapnamebuf, sizeof(trapnamebuf),
154 "Unknown trap %#02x", eclass);
155 return trapnamebuf;
156 }
157 return trap_names[eclass];
158 }
159
160 void
161 userret(struct lwp *l)
162 {
163 mi_userret(l);
164 }
165
166 void
167 trap_doast(struct trapframe *tf)
168 {
169 struct lwp * const l = curlwp;
170
171 /*
172 * allow to have a chance of context switch just prior to user
173 * exception return.
174 */
175 #ifdef __HAVE_PREEMPTION
176 kpreempt_disable();
177 #endif
178 struct cpu_info * const ci = curcpu();
179
180 ci->ci_data.cpu_ntrap++;
181
182 KDASSERT(ci->ci_cpl == IPL_NONE);
183 #ifdef __HAVE_PREEMPTION
184 kpreempt_enable();
185 #endif
186
187 if (l->l_pflag & LP_OWEUPC) {
188 l->l_pflag &= ~LP_OWEUPC;
189 ADDUPROF(l);
190 }
191
192 userret(l);
193 }
194
195 void
196 trap_el1h_sync(struct trapframe *tf)
197 {
198 const uint32_t esr = tf->tf_esr;
199 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
200
201 /* re-enable traps and interrupts */
202 if (!(tf->tf_spsr & SPSR_I))
203 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
204 else
205 daif_enable(DAIF_D|DAIF_A);
206
207 #ifdef KDTRACE_HOOKS
208 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, eclass))
209 return;
210 #endif
211
212 switch (eclass) {
213 case ESR_EC_INSN_ABT_EL1:
214 case ESR_EC_DATA_ABT_EL1:
215 data_abort_handler(tf, eclass);
216 break;
217
218 case ESR_EC_BKPT_INSN_A64:
219 #ifdef KDTRACE_HOOKS
220 if (__SHIFTOUT(esr, ESR_ISS) == 0x40d &&
221 dtrace_invop_jump_addr != 0) {
222 (*dtrace_invop_jump_addr)(tf);
223 break;
224 }
225 /* FALLTHROUGH */
226 #endif
227 case ESR_EC_BRKPNT_EL1:
228 case ESR_EC_SW_STEP_EL1:
229 case ESR_EC_WTCHPNT_EL1:
230 #ifdef DDB
231 if (eclass == ESR_EC_BRKPNT_EL1)
232 kdb_trap(DB_TRAP_BREAKPOINT, tf);
233 else if (eclass == ESR_EC_BKPT_INSN_A64)
234 kdb_trap(DB_TRAP_BKPT_INSN, tf);
235 else if (eclass == ESR_EC_WTCHPNT_EL1)
236 kdb_trap(DB_TRAP_WATCHPOINT, tf);
237 else if (eclass == ESR_EC_SW_STEP_EL1)
238 kdb_trap(DB_TRAP_SW_STEP, tf);
239 else
240 kdb_trap(DB_TRAP_UNKNOWN, tf);
241 #else
242 panic("No debugger in kernel");
243 #endif
244 break;
245
246 case ESR_EC_FP_ACCESS:
247 if ((curlwp->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) ==
248 (LW_SYSTEM|LW_SYSTEM_FPU)) {
249 fpu_load(curlwp);
250 break;
251 }
252 /*FALLTHROUGH*/
253 case ESR_EC_FP_TRAP_A64:
254 case ESR_EC_PC_ALIGNMENT:
255 case ESR_EC_SP_ALIGNMENT:
256 case ESR_EC_ILL_STATE:
257 case ESR_EC_BTE_A64:
258 default:
259 panic("Trap: fatal %s: pc=%016" PRIx64 " sp=%016" PRIx64
260 " esr=%08x", eclass_trapname(eclass), tf->tf_pc, tf->tf_sp,
261 esr);
262 break;
263 }
264 }
265
266 /*
267 * There are some systems with different cache line sizes for each cpu.
268 * Userland programs can be preempted between CPUs at any time, so in such
269 * a system, the minimum cache line size must be visible to userland.
270 */
271 #define CTR_EL0_USR_MASK \
272 (CTR_EL0_DIC | CTR_EL0_IDC | CTR_EL0_DMIN_LINE | CTR_EL0_IMIN_LINE)
273 uint64_t ctr_el0_usr __read_mostly;
274
275 static xcfunc_t
276 configure_cpu_traps0(void *arg1, void *arg2)
277 {
278 struct cpu_info * const ci = curcpu();
279 uint64_t sctlr;
280 uint64_t ctr_el0_raw = reg_ctr_el0_read();
281
282 #ifdef DEBUG_FORCE_TRAP_CTR_EL0
283 goto need_ctr_trap;
284 #endif
285
286 if ((__SHIFTOUT(ctr_el0_raw, CTR_EL0_DMIN_LINE) >
287 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DMIN_LINE)) ||
288 (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IMIN_LINE) >
289 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IMIN_LINE)))
290 goto need_ctr_trap;
291
292 if ((__SHIFTOUT(ctr_el0_raw, CTR_EL0_DIC) == 1 &&
293 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DIC) == 0) ||
294 (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IDC) == 1 &&
295 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IDC) == 0))
296 goto need_ctr_trap;
297
298 #if 0 /* XXX: To do or not to do */
299 /*
300 * IDC==0, but (LoC==0 || LoUIS==LoUU==0)?
301 * Would it be better to show IDC=1 to userland?
302 */
303 if (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IDC) == 0 &&
304 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IDC) == 1)
305 goto need_ctr_trap;
306 #endif
307
308 return 0;
309
310 need_ctr_trap:
311 evcnt_attach_dynamic(&ci->ci_uct_trap, EVCNT_TYPE_MISC, NULL,
312 ci->ci_cpuname, "ctr_el0 trap");
313
314 /* trap CTR_EL0 access from EL0 on this cpu */
315 sctlr = reg_sctlr_el1_read();
316 sctlr &= ~SCTLR_UCT;
317 reg_sctlr_el1_write(sctlr);
318
319 return 0;
320 }
321
322 void
323 configure_cpu_traps(void)
324 {
325 CPU_INFO_ITERATOR cii;
326 struct cpu_info *ci;
327 uint64_t where;
328
329 /* remember minimum cache line size out of all CPUs */
330 for (CPU_INFO_FOREACH(cii, ci)) {
331 uint64_t ctr_el0_cpu = ci->ci_id.ac_ctr;
332 uint64_t clidr = ci->ci_id.ac_clidr;
333
334 if (__SHIFTOUT(clidr, CLIDR_LOC) == 0 ||
335 (__SHIFTOUT(clidr, CLIDR_LOUIS) == 0 &&
336 __SHIFTOUT(clidr, CLIDR_LOUU) == 0)) {
337 /* this means the same as IDC=1 */
338 ctr_el0_cpu |= CTR_EL0_IDC;
339 }
340
341 /*
342 * if DIC==1, there is no need to icache sync. however,
343 * to calculate the minimum cacheline, in this case
344 * ICacheLine is treated as the maximum.
345 */
346 if (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_DIC) == 1)
347 ctr_el0_cpu |= CTR_EL0_IMIN_LINE;
348
349 /* Neoverse N1 erratum 1542419 */
350 if (CPU_ID_NEOVERSEN1_P(ci->ci_id.ac_midr) &&
351 __SHIFTOUT(ctr_el0_cpu, CTR_EL0_DIC) == 1)
352 ctr_el0_cpu &= ~CTR_EL0_DIC;
353
354 if (cii == 0) {
355 ctr_el0_usr = ctr_el0_cpu;
356 continue;
357 }
358
359 /* keep minimum cache line size, and worst DIC/IDC */
360 ctr_el0_usr &= (ctr_el0_cpu & CTR_EL0_DIC) | ~CTR_EL0_DIC;
361 ctr_el0_usr &= (ctr_el0_cpu & CTR_EL0_IDC) | ~CTR_EL0_IDC;
362 if (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_DMIN_LINE) <
363 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DMIN_LINE)) {
364 ctr_el0_usr &= ~CTR_EL0_DMIN_LINE;
365 ctr_el0_usr |= ctr_el0_cpu & CTR_EL0_DMIN_LINE;
366 }
367 if ((ctr_el0_cpu & CTR_EL0_DIC) == 0 &&
368 (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_IMIN_LINE) <
369 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IMIN_LINE))) {
370 ctr_el0_usr &= ~CTR_EL0_IMIN_LINE;
371 ctr_el0_usr |= ctr_el0_cpu & CTR_EL0_IMIN_LINE;
372 }
373 }
374
375 where = xc_broadcast(0,
376 (xcfunc_t)configure_cpu_traps0, NULL, NULL);
377 xc_wait(where);
378 }
379
380 static enum emul_arm_result
381 emul_aarch64_insn(struct trapframe *tf)
382 {
383 uint32_t insn;
384
385 if (ufetch_32((uint32_t *)tf->tf_pc, &insn)) {
386 tf->tf_far = reg_far_el1_read();
387 return EMUL_ARM_FAULT;
388 }
389
390 LE32TOH(insn);
391 if ((insn & 0xffffffe0) == 0xd53b0020) {
392 /* mrs x?,ctr_el0 */
393 unsigned int Xt = insn & 31;
394 if (Xt != 31) { /* !xzr */
395 uint64_t ctr_el0 = reg_ctr_el0_read();
396 ctr_el0 &= ~CTR_EL0_USR_MASK;
397 ctr_el0 |= (ctr_el0_usr & CTR_EL0_USR_MASK);
398 tf->tf_reg[Xt] = ctr_el0;
399 }
400 curcpu()->ci_uct_trap.ev_count++;
401
402 } else {
403 return EMUL_ARM_UNKNOWN;
404 }
405
406 tf->tf_pc += 4;
407 return EMUL_ARM_SUCCESS;
408 }
409
410 void
411 trap_el0_sync(struct trapframe *tf)
412 {
413 struct lwp * const l = curlwp;
414 const uint32_t esr = tf->tf_esr;
415 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
416
417 /* disable trace */
418 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
419 /* enable traps and interrupts */
420 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
421
422 switch (eclass) {
423 case ESR_EC_INSN_ABT_EL0:
424 case ESR_EC_DATA_ABT_EL0:
425 data_abort_handler(tf, eclass);
426 userret(l);
427 break;
428
429 case ESR_EC_SVC_A64:
430 (*l->l_proc->p_md.md_syscall)(tf);
431 break;
432 case ESR_EC_FP_ACCESS:
433 fpu_load(l);
434 userret(l);
435 break;
436 case ESR_EC_FP_TRAP_A64:
437 do_trapsignal(l, SIGFPE, FPE_FLTUND, NULL, esr); /* XXX */
438 userret(l);
439 break;
440
441 case ESR_EC_PC_ALIGNMENT:
442 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_pc, esr);
443 userret(l);
444 break;
445 case ESR_EC_SP_ALIGNMENT:
446 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_sp, esr);
447 userret(l);
448 break;
449
450 case ESR_EC_BKPT_INSN_A64:
451 case ESR_EC_BRKPNT_EL0:
452 case ESR_EC_WTCHPNT_EL0:
453 do_trapsignal(l, SIGTRAP, TRAP_BRKPT, (void *)tf->tf_pc, esr);
454 userret(l);
455 break;
456 case ESR_EC_SW_STEP_EL0:
457 /* disable trace, and send trace trap */
458 tf->tf_spsr &= ~SPSR_SS;
459 do_trapsignal(l, SIGTRAP, TRAP_TRACE, (void *)tf->tf_pc, esr);
460 userret(l);
461 break;
462
463 case ESR_EC_SYS_REG:
464 switch (emul_aarch64_insn(tf)) {
465 case EMUL_ARM_SUCCESS:
466 break;
467 case EMUL_ARM_UNKNOWN:
468 goto unknown;
469 case EMUL_ARM_FAULT:
470 do_trapsignal(l, SIGSEGV, SEGV_MAPERR,
471 (void *)tf->tf_far, esr);
472 break;
473 }
474 userret(l);
475 break;
476
477 default:
478 case ESR_EC_UNKNOWN:
479 unknown:
480 #ifdef DDB
481 if (sigill_debug) {
482 /* show illegal instruction */
483 printf("TRAP: pid %d (%s), uid %d: %s:"
484 " esr=0x%lx: pc=0x%lx: %s\n",
485 curlwp->l_proc->p_pid, curlwp->l_proc->p_comm,
486 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1,
487 eclass_trapname(eclass), tf->tf_esr, tf->tf_pc,
488 strdisasm(tf->tf_pc, tf->tf_spsr));
489 }
490 #endif
491 /* illegal or not implemented instruction */
492 do_trapsignal(l, SIGILL, ILL_ILLTRP, (void *)tf->tf_pc, esr);
493 userret(l);
494 break;
495 }
496 }
497
498 void
499 interrupt(struct trapframe *tf)
500 {
501 struct cpu_info * const ci = curcpu();
502
503 #ifdef STACKCHECKS
504 struct lwp *l = curlwp;
505 void *sp = (void *)reg_sp_read();
506 if (l->l_addr >= sp) {
507 panic("lwp/interrupt stack overflow detected."
508 " lwp=%p, sp=%p, l_addr=%p", l, sp, l->l_addr);
509 }
510 #endif
511
512 /* disable trace */
513 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
514
515 /* enable traps */
516 daif_enable(DAIF_D|DAIF_A);
517
518 ci->ci_intr_depth++;
519 ARM_IRQ_HANDLER(tf);
520 ci->ci_intr_depth--;
521
522 cpu_dosoftints();
523 }
524
525 #ifdef COMPAT_NETBSD32
526
527 /*
528 * 32-bit length Thumb instruction. See ARMv7 DDI0406A A6.3.
529 */
530 #define THUMB_32BIT(hi) (((hi) & 0xe000) == 0xe000 && ((hi) & 0x1800))
531
532 int
533 fetch_arm_insn(uint64_t pc, uint64_t spsr, uint32_t *insn)
534 {
535
536 /*
537 * Instructions are stored in little endian for BE8,
538 * only a valid binary format for ILP32EB. Therefore,
539 * we need byte-swapping before decoding on aarch64eb.
540 */
541
542 /* THUMB? */
543 if (spsr & SPSR_A32_T) {
544 uint16_t *p = (uint16_t *)(pc & ~1UL); /* XXX */
545 uint16_t hi, lo;
546
547 if (ufetch_16(p, &hi))
548 return -1;
549 LE16TOH(hi);
550
551 if (!THUMB_32BIT(hi)) {
552 /* 16-bit Thumb instruction */
553 *insn = hi;
554 return 2;
555 }
556
557 /* 32-bit Thumb instruction */
558 if (ufetch_16(p + 1, &lo))
559 return -1;
560 LE16TOH(lo);
561
562 *insn = ((uint32_t)hi << 16) | lo;
563 return 4;
564 }
565
566 if (ufetch_32((uint32_t *)pc, insn))
567 return -1;
568 LE32TOH(*insn);
569
570 return 4;
571 }
572
573 static bool
574 arm_cond_match(uint32_t insn, uint64_t spsr)
575 {
576 bool invert = (insn >> 28) & 1;
577 bool match;
578
579 switch (insn >> 29) {
580 case 0: /* EQ or NE */
581 match = spsr & SPSR_Z;
582 break;
583 case 1: /* CS/HI or CC/LO */
584 match = spsr & SPSR_C;
585 break;
586 case 2: /* MI or PL */
587 match = spsr & SPSR_N;
588 break;
589 case 3: /* VS or VC */
590 match = spsr & SPSR_V;
591 break;
592 case 4: /* HI or LS */
593 match = ((spsr & (SPSR_C | SPSR_Z)) == SPSR_C);
594 break;
595 case 5: /* GE or LT */
596 match = (!(spsr & SPSR_N) == !(spsr & SPSR_V));
597 break;
598 case 6: /* GT or LE */
599 match = !(spsr & SPSR_Z) &&
600 (!(spsr & SPSR_N) == !(spsr & SPSR_V));
601 break;
602 case 7: /* AL */
603 match = true;
604 break;
605 }
606 return (!match != !invert);
607 }
608
609 uint8_t atomic_swap_8(volatile uint8_t *, uint8_t);
610
611 static int
612 emul_arm_swp(uint32_t insn, struct trapframe *tf)
613 {
614 struct faultbuf fb;
615 vaddr_t vaddr;
616 uint32_t val;
617 int Rn, Rd, Rm, error;
618
619 Rn = __SHIFTOUT(insn, 0x000f0000);
620 Rd = __SHIFTOUT(insn, 0x0000f000);
621 Rm = __SHIFTOUT(insn, 0x0000000f);
622
623 vaddr = tf->tf_reg[Rn] & 0xffffffff;
624 val = tf->tf_reg[Rm];
625
626 /* fault if insn is swp, and unaligned access */
627 if ((insn & 0x00400000) == 0 && (vaddr & 3) != 0) {
628 tf->tf_far = vaddr;
629 return EFAULT;
630 }
631
632 /* vaddr will always point to userspace, since it has only 32bit */
633 if ((error = cpu_set_onfault(&fb)) == 0) {
634 if (aarch64_pan_enabled)
635 reg_pan_write(0); /* disable PAN */
636 if (insn & 0x00400000) {
637 /* swpb */
638 val = atomic_swap_8((uint8_t *)vaddr, val);
639 } else {
640 /* swp */
641 val = atomic_swap_32((uint32_t *)vaddr, val);
642 }
643 cpu_unset_onfault();
644 tf->tf_reg[Rd] = val;
645 } else {
646 tf->tf_far = reg_far_el1_read();
647 }
648 if (aarch64_pan_enabled)
649 reg_pan_write(1); /* enable PAN */
650 return error;
651 }
652
653 static enum emul_arm_result
654 emul_thumb_insn(struct trapframe *tf, uint32_t insn, int insn_size)
655 {
656 /* T32-16bit or 32bit instructions */
657 switch (insn_size) {
658 case 2:
659 /* Breakpoint used by GDB */
660 if (insn == 0xdefe) {
661 do_trapsignal(curlwp, SIGTRAP, TRAP_BRKPT,
662 (void *)tf->tf_pc, 0);
663 return EMUL_ARM_SUCCESS;
664 }
665 /* XXX: some T32 IT instruction deprecated should be emulated */
666 break;
667 case 4:
668 break;
669 default:
670 return EMUL_ARM_FAULT;
671 }
672 return EMUL_ARM_UNKNOWN;
673 }
674
675 static enum emul_arm_result
676 emul_arm_insn(struct trapframe *tf)
677 {
678 uint32_t insn;
679 int insn_size;
680
681 insn_size = fetch_arm_insn(tf->tf_pc, tf->tf_spsr, &insn);
682 tf->tf_far = reg_far_el1_read();
683
684 if (tf->tf_spsr & SPSR_A32_T)
685 return emul_thumb_insn(tf, insn, insn_size);
686 if (insn_size != 4)
687 return EMUL_ARM_FAULT;
688
689 /* Breakpoint used by GDB */
690 if (insn == 0xe6000011 || insn == 0xe7ffdefe) {
691 do_trapsignal(curlwp, SIGTRAP, TRAP_BRKPT,
692 (void *)tf->tf_pc, 0);
693 return EMUL_ARM_SUCCESS;
694 }
695
696 /* Unconditional instruction extension space? */
697 if ((insn & 0xf0000000) == 0xf0000000)
698 goto unknown_insn;
699
700 /* swp,swpb */
701 if ((insn & 0x0fb00ff0) == 0x01000090) {
702 if (arm_cond_match(insn, tf->tf_spsr)) {
703 if (emul_arm_swp(insn, tf) != 0)
704 return EMUL_ARM_FAULT;
705 }
706 goto emulated;
707 }
708
709 /*
710 * Emulate ARMv6 instructions with cache operations
711 * register (c7), that can be used in user mode.
712 */
713 switch (insn & 0x0fff0fff) {
714 case 0x0e070f95:
715 if (arm_cond_match(insn, tf->tf_spsr)) {
716 /*
717 * mcr p15, 0, <Rd>, c7, c5, 4
718 * (flush prefetch buffer)
719 */
720 __asm __volatile("isb sy" ::: "memory");
721 }
722 goto emulated;
723 case 0x0e070f9a:
724 if (arm_cond_match(insn, tf->tf_spsr)) {
725 /*
726 * mcr p15, 0, <Rd>, c7, c10, 4
727 * (data synchronization barrier)
728 */
729 __asm __volatile("dsb sy" ::: "memory");
730 }
731 goto emulated;
732 case 0x0e070fba:
733 if (arm_cond_match(insn, tf->tf_spsr)) {
734 /*
735 * mcr p15, 0, <Rd>, c7, c10, 5
736 * (data memory barrier)
737 */
738 __asm __volatile("dmb sy" ::: "memory");
739 }
740 goto emulated;
741 default:
742 break;
743 }
744
745 unknown_insn:
746 /* unknown, or unsupported instruction */
747 return EMUL_ARM_UNKNOWN;
748
749 emulated:
750 tf->tf_pc += insn_size;
751 return EMUL_ARM_SUCCESS;
752 }
753 #endif /* COMPAT_NETBSD32 */
754
755 void
756 trap_el0_32sync(struct trapframe *tf)
757 {
758 struct lwp * const l = curlwp;
759 const uint32_t esr = tf->tf_esr;
760 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
761
762 /* disable trace */
763 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
764 /* enable traps and interrupts */
765 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
766
767 switch (eclass) {
768 #ifdef COMPAT_NETBSD32
769 case ESR_EC_INSN_ABT_EL0:
770 case ESR_EC_DATA_ABT_EL0:
771 data_abort_handler(tf, eclass);
772 userret(l);
773 break;
774
775 case ESR_EC_SVC_A32:
776 (*l->l_proc->p_md.md_syscall)(tf);
777 break;
778
779 case ESR_EC_FP_ACCESS:
780 fpu_load(l);
781 userret(l);
782 break;
783
784 case ESR_EC_FP_TRAP_A32:
785 do_trapsignal(l, SIGFPE, FPE_FLTUND, NULL, esr); /* XXX */
786 userret(l);
787 break;
788
789 case ESR_EC_PC_ALIGNMENT:
790 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_pc, esr);
791 userret(l);
792 break;
793
794 case ESR_EC_SP_ALIGNMENT:
795 do_trapsignal(l, SIGBUS, BUS_ADRALN,
796 (void *)tf->tf_reg[13], esr); /* sp is r13 on AArch32 */
797 userret(l);
798 break;
799
800 case ESR_EC_BKPT_INSN_A32:
801 do_trapsignal(l, SIGTRAP, TRAP_BRKPT, (void *)tf->tf_pc, esr);
802 userret(l);
803 break;
804
805 case ESR_EC_UNKNOWN:
806 switch (emul_arm_insn(tf)) {
807 case EMUL_ARM_SUCCESS:
808 break;
809 case EMUL_ARM_UNKNOWN:
810 goto unknown;
811 case EMUL_ARM_FAULT:
812 do_trapsignal(l, SIGSEGV, SEGV_MAPERR,
813 (void *)tf->tf_far, esr);
814 break;
815 }
816 userret(l);
817 break;
818
819 case ESR_EC_CP15_RT:
820 case ESR_EC_CP15_RRT:
821 case ESR_EC_CP14_RT:
822 case ESR_EC_CP14_DT:
823 case ESR_EC_CP14_RRT:
824 unknown:
825 #endif /* COMPAT_NETBSD32 */
826 default:
827 #ifdef DDB
828 if (sigill_debug) {
829 /* show illegal instruction */
830 printf("TRAP: pid %d (%s), uid %d: %s:"
831 " esr=0x%lx: pc=0x%lx: %s\n",
832 curlwp->l_proc->p_pid, curlwp->l_proc->p_comm,
833 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1,
834 eclass_trapname(eclass), tf->tf_esr, tf->tf_pc,
835 strdisasm(tf->tf_pc, tf->tf_spsr));
836 }
837 #endif
838 /* illegal or not implemented instruction */
839 do_trapsignal(l, SIGILL, ILL_ILLTRP, (void *)tf->tf_pc, esr);
840 userret(l);
841 break;
842 }
843 }
844
845 #define bad_trap_panic(trapfunc) \
846 void \
847 trapfunc(struct trapframe *tf) \
848 { \
849 panic("%s", __func__); \
850 }
851 bad_trap_panic(trap_el1t_sync)
852 bad_trap_panic(trap_el1t_irq)
853 bad_trap_panic(trap_el1t_fiq)
854 bad_trap_panic(trap_el1t_error)
855 bad_trap_panic(trap_el1h_fiq)
856 bad_trap_panic(trap_el1h_error)
857 bad_trap_panic(trap_el0_fiq)
858 bad_trap_panic(trap_el0_error)
859 bad_trap_panic(trap_el0_32fiq)
860 bad_trap_panic(trap_el0_32error)
861
862 void
863 cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb, int val)
864 {
865 tf->tf_reg[19] = fb->fb_reg[FB_X19];
866 tf->tf_reg[20] = fb->fb_reg[FB_X20];
867 tf->tf_reg[21] = fb->fb_reg[FB_X21];
868 tf->tf_reg[22] = fb->fb_reg[FB_X22];
869 tf->tf_reg[23] = fb->fb_reg[FB_X23];
870 tf->tf_reg[24] = fb->fb_reg[FB_X24];
871 tf->tf_reg[25] = fb->fb_reg[FB_X25];
872 tf->tf_reg[26] = fb->fb_reg[FB_X26];
873 tf->tf_reg[27] = fb->fb_reg[FB_X27];
874 tf->tf_reg[28] = fb->fb_reg[FB_X28];
875 tf->tf_reg[29] = fb->fb_reg[FB_X29];
876 tf->tf_sp = fb->fb_reg[FB_SP];
877 tf->tf_pc = fb->fb_reg[FB_LR];
878 tf->tf_reg[0] = val;
879 }
880
881 #ifdef TRAP_SIGDEBUG
882 static void
883 frame_dump(const struct trapframe *tf)
884 {
885 const struct reg *r = &tf->tf_regs;
886
887 printf("trapframe %p\n", tf);
888 for (size_t i = 0; i < __arraycount(r->r_reg); i++) {
889 printf(" r%.2zu %#018" PRIx64 "%c", i, r->r_reg[i],
890 " \n"[i && (i & 1) == 0]);
891 }
892
893 printf("\n");
894 printf(" sp %#018" PRIx64 " pc %#018" PRIx64 "\n",
895 r->r_sp, r->r_pc);
896 printf(" spsr %#018" PRIx64 " tpidr %#018" PRIx64 "\n",
897 r->r_spsr, r->r_tpidr);
898 printf(" esr %#018" PRIx64 " far %#018" PRIx64 "\n",
899 tf->tf_esr, tf->tf_far);
900
901 printf("\n");
902 hexdump(printf, "Stack dump", tf, 256);
903 }
904
905 static void
906 sigdebug(const struct trapframe *tf, const ksiginfo_t *ksi)
907 {
908 struct lwp *l = curlwp;
909 struct proc *p = l->l_proc;
910 const uint32_t eclass = __SHIFTOUT(ksi->ksi_trap, ESR_EC);
911
912 printf("pid %d.%d (%s): signal %d (trap %#x) "
913 "@pc %#" PRIx64 ", addr %p, error=%s\n",
914 p->p_pid, l->l_lid, p->p_comm, ksi->ksi_signo, ksi->ksi_trap,
915 tf->tf_regs.r_pc, ksi->ksi_addr, eclass_trapname(eclass));
916 frame_dump(tf);
917 }
918 #endif
919
920 void do_trapsignal1(
921 #ifdef TRAP_SIGDEBUG
922 const char *func,
923 size_t line,
924 struct trapframe *tf,
925 #endif
926 struct lwp *l, int signo, int code, void *addr, int trap)
927 {
928 ksiginfo_t ksi;
929
930 KSI_INIT_TRAP(&ksi);
931 ksi.ksi_signo = signo;
932 ksi.ksi_code = code;
933 ksi.ksi_addr = addr;
934 ksi.ksi_trap = trap;
935 #ifdef TRAP_SIGDEBUG
936 printf("%s, %zu: ", func, line);
937 sigdebug(tf, &ksi);
938 #endif
939 (*l->l_proc->p_emul->e_trapsignal)(l, &ksi);
940 }
941
942 bool
943 cpu_intr_p(void)
944 {
945 uint64_t ncsw;
946 int idepth;
947 lwp_t *l;
948
949 #ifdef __HAVE_PIC_FAST_SOFTINTS
950 /* XXX Copied from cpu.h. Looks incomplete - needs fixing. */
951 if (ci->ci_cpl < IPL_VM)
952 return false;
953 #endif
954
955 l = curlwp;
956 if (__predict_false(l->l_cpu == NULL)) {
957 KASSERT(l == &lwp0);
958 return false;
959 }
960 do {
961 ncsw = l->l_ncsw;
962 __insn_barrier();
963 idepth = l->l_cpu->ci_intr_depth;
964 __insn_barrier();
965 } while (__predict_false(ncsw != l->l_ncsw));
966
967 return idepth > 0;
968 }
969