trap.c revision 1.38 1 /* $NetBSD: trap.c,v 1.38 2020/10/15 23:15:36 rin Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33
34 __KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.38 2020/10/15 23:15:36 rin Exp $");
35
36 #include "opt_arm_intr_impl.h"
37 #include "opt_compat_netbsd32.h"
38 #include "opt_dtrace.h"
39
40 #include <sys/param.h>
41 #include <sys/kauth.h>
42 #include <sys/types.h>
43 #include <sys/atomic.h>
44 #include <sys/cpu.h>
45 #include <sys/evcnt.h>
46 #ifdef KDB
47 #include <sys/kdb.h>
48 #endif
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/signal.h>
52 #include <sys/signalvar.h>
53 #include <sys/siginfo.h>
54 #include <sys/xcall.h>
55
56 #ifdef ARM_INTR_IMPL
57 #include ARM_INTR_IMPL
58 #else
59 #error ARM_INTR_IMPL not defined
60 #endif
61
62 #ifndef ARM_IRQ_HANDLER
63 #error ARM_IRQ_HANDLER not defined
64 #endif
65
66 #include <aarch64/userret.h>
67 #include <aarch64/frame.h>
68 #include <aarch64/machdep.h>
69 #include <aarch64/armreg.h>
70 #include <aarch64/locore.h>
71 #include <aarch64/cpufunc.h>
72
73 #ifdef KDB
74 #include <machine/db_machdep.h>
75 #endif
76 #ifdef DDB
77 #include <ddb/db_output.h>
78 #include <machine/db_machdep.h>
79 #endif
80 #ifdef KDTRACE_HOOKS
81 #include <sys/dtrace_bsd.h>
82 #endif
83
84 #ifdef DDB
85 int sigill_debug = 0;
86 #endif
87
88 #ifdef KDTRACE_HOOKS
89 dtrace_doubletrap_func_t dtrace_doubletrap_func = NULL;
90 dtrace_trap_func_t dtrace_trap_func = NULL;
91 int (*dtrace_invop_jump_addr)(struct trapframe *);
92 #endif
93
94 enum emul_arm_result {
95 EMUL_ARM_SUCCESS = 0,
96 EMUL_ARM_UNKNOWN,
97 EMUL_ARM_FAULT,
98 };
99
100 const char * const trap_names[] = {
101 [ESR_EC_UNKNOWN] = "Unknown Reason (Illegal Instruction)",
102 [ESR_EC_SERROR] = "SError Interrupt",
103 [ESR_EC_WFX] = "WFI or WFE instruction execution",
104 [ESR_EC_ILL_STATE] = "Illegal Execution State",
105
106 [ESR_EC_BTE_A64] = "Branch Target Exception",
107
108 [ESR_EC_SYS_REG] = "MSR/MRS/SYS instruction",
109 [ESR_EC_SVC_A64] = "SVC Instruction Execution",
110 [ESR_EC_HVC_A64] = "HVC Instruction Execution",
111 [ESR_EC_SMC_A64] = "SMC Instruction Execution",
112
113 [ESR_EC_INSN_ABT_EL0] = "Instruction Abort (EL0)",
114 [ESR_EC_INSN_ABT_EL1] = "Instruction Abort (EL1)",
115 [ESR_EC_DATA_ABT_EL0] = "Data Abort (EL0)",
116 [ESR_EC_DATA_ABT_EL1] = "Data Abort (EL1)",
117
118 [ESR_EC_PC_ALIGNMENT] = "Misaligned PC",
119 [ESR_EC_SP_ALIGNMENT] = "Misaligned SP",
120
121 [ESR_EC_FP_ACCESS] = "Access to SIMD/FP Registers",
122 [ESR_EC_FP_TRAP_A64] = "FP Exception",
123
124 [ESR_EC_BRKPNT_EL0] = "Breakpoint Exception (EL0)",
125 [ESR_EC_BRKPNT_EL1] = "Breakpoint Exception (EL1)",
126 [ESR_EC_SW_STEP_EL0] = "Software Step (EL0)",
127 [ESR_EC_SW_STEP_EL1] = "Software Step (EL1)",
128 [ESR_EC_WTCHPNT_EL0] = "Watchpoint (EL0)",
129 [ESR_EC_WTCHPNT_EL1] = "Watchpoint (EL1)",
130 [ESR_EC_BKPT_INSN_A64] = "BKPT Instruction Execution",
131
132 [ESR_EC_CP15_RT] = "A32: MCR/MRC access to CP15",
133 [ESR_EC_CP15_RRT] = "A32: MCRR/MRRC access to CP15",
134 [ESR_EC_CP14_RT] = "A32: MCR/MRC access to CP14",
135 [ESR_EC_CP14_DT] = "A32: LDC/STC access to CP14",
136 [ESR_EC_CP14_RRT] = "A32: MRRC access to CP14",
137 [ESR_EC_SVC_A32] = "A32: SVC Instruction Execution",
138 [ESR_EC_HVC_A32] = "A32: HVC Instruction Execution",
139 [ESR_EC_SMC_A32] = "A32: SMC Instruction Execution",
140 [ESR_EC_FPID] = "A32: MCR/MRC access to CP10",
141 [ESR_EC_FP_TRAP_A32] = "A32: FP Exception",
142 [ESR_EC_BKPT_INSN_A32] = "A32: BKPT Instruction Execution",
143 [ESR_EC_VECTOR_CATCH] = "A32: Vector Catch Exception"
144 };
145
146 const char *
147 eclass_trapname(uint32_t eclass)
148 {
149 static char trapnamebuf[sizeof("Unknown trap 0x????????")];
150
151 if (eclass >= __arraycount(trap_names) || trap_names[eclass] == NULL) {
152 snprintf(trapnamebuf, sizeof(trapnamebuf),
153 "Unknown trap %#02x", eclass);
154 return trapnamebuf;
155 }
156 return trap_names[eclass];
157 }
158
159 void
160 userret(struct lwp *l)
161 {
162 mi_userret(l);
163 }
164
165 void
166 trap_doast(struct trapframe *tf)
167 {
168 struct lwp * const l = curlwp;
169
170 /*
171 * allow to have a chance of context switch just prior to user
172 * exception return.
173 */
174 #ifdef __HAVE_PREEMPTION
175 kpreempt_disable();
176 #endif
177 struct cpu_info * const ci = curcpu();
178
179 ci->ci_data.cpu_ntrap++;
180
181 KDASSERT(ci->ci_cpl == IPL_NONE);
182 #ifdef __HAVE_PREEMPTION
183 kpreempt_enable();
184 #endif
185
186 if (l->l_pflag & LP_OWEUPC) {
187 l->l_pflag &= ~LP_OWEUPC;
188 ADDUPROF(l);
189 }
190
191 userret(l);
192 }
193
194 void
195 trap_el1h_sync(struct trapframe *tf)
196 {
197 const uint32_t esr = tf->tf_esr;
198 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
199
200 /* re-enable traps and interrupts */
201 if (!(tf->tf_spsr & SPSR_I))
202 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
203 else
204 daif_enable(DAIF_D|DAIF_A);
205
206 #ifdef KDTRACE_HOOKS
207 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, eclass))
208 return;
209 #endif
210
211 switch (eclass) {
212 case ESR_EC_INSN_ABT_EL1:
213 case ESR_EC_DATA_ABT_EL1:
214 data_abort_handler(tf, eclass);
215 break;
216
217 case ESR_EC_BKPT_INSN_A64:
218 #ifdef KDTRACE_HOOKS
219 if (__SHIFTOUT(esr, ESR_ISS) == 0x40d &&
220 dtrace_invop_jump_addr != 0) {
221 (*dtrace_invop_jump_addr)(tf);
222 break;
223 }
224 /* FALLTHROUGH */
225 #endif
226 case ESR_EC_BRKPNT_EL1:
227 case ESR_EC_SW_STEP_EL1:
228 case ESR_EC_WTCHPNT_EL1:
229 #ifdef DDB
230 if (eclass == ESR_EC_BRKPNT_EL1)
231 kdb_trap(DB_TRAP_BREAKPOINT, tf);
232 else if (eclass == ESR_EC_BKPT_INSN_A64)
233 kdb_trap(DB_TRAP_BKPT_INSN, tf);
234 else if (eclass == ESR_EC_WTCHPNT_EL1)
235 kdb_trap(DB_TRAP_WATCHPOINT, tf);
236 else if (eclass == ESR_EC_SW_STEP_EL1)
237 kdb_trap(DB_TRAP_SW_STEP, tf);
238 else
239 kdb_trap(DB_TRAP_UNKNOWN, tf);
240 #else
241 panic("No debugger in kernel");
242 #endif
243 break;
244
245 case ESR_EC_FP_ACCESS:
246 if ((curlwp->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) ==
247 (LW_SYSTEM|LW_SYSTEM_FPU)) {
248 fpu_load(curlwp);
249 break;
250 }
251 /*FALLTHROUGH*/
252 case ESR_EC_FP_TRAP_A64:
253 case ESR_EC_PC_ALIGNMENT:
254 case ESR_EC_SP_ALIGNMENT:
255 case ESR_EC_ILL_STATE:
256 case ESR_EC_BTE_A64:
257 default:
258 panic("Trap: fatal %s: pc=%016" PRIx64 " sp=%016" PRIx64
259 " esr=%08x", eclass_trapname(eclass), tf->tf_pc, tf->tf_sp,
260 esr);
261 break;
262 }
263 }
264
265 /*
266 * There are some systems with different cache line sizes for each cpu.
267 * Userland programs can be preempted between CPUs at any time, so in such
268 * a system, the minimum cache line size must be visible to userland.
269 */
270 #define CTR_EL0_USR_MASK \
271 (CTR_EL0_DIC | CTR_EL0_IDC | CTR_EL0_DMIN_LINE | CTR_EL0_IMIN_LINE)
272 uint64_t ctr_el0_usr __read_mostly;
273
274 static xcfunc_t
275 configure_cpu_traps0(void *arg1, void *arg2)
276 {
277 struct cpu_info * const ci = curcpu();
278 uint64_t sctlr;
279 uint64_t ctr_el0_raw = reg_ctr_el0_read();
280
281 #ifdef DEBUG_FORCE_TRAP_CTR_EL0
282 goto need_ctr_trap;
283 #endif
284
285 if ((__SHIFTOUT(ctr_el0_raw, CTR_EL0_DMIN_LINE) >
286 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DMIN_LINE)) ||
287 (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IMIN_LINE) >
288 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IMIN_LINE)))
289 goto need_ctr_trap;
290
291 if ((__SHIFTOUT(ctr_el0_raw, CTR_EL0_DIC) == 1 &&
292 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DIC) == 0) ||
293 (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IDC) == 1 &&
294 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IDC) == 0))
295 goto need_ctr_trap;
296
297 #if 0 /* XXX: To do or not to do */
298 /*
299 * IDC==0, but (LoC==0 || LoUIS==LoUU==0)?
300 * Would it be better to show IDC=1 to userland?
301 */
302 if (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IDC) == 0 &&
303 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IDC) == 1)
304 goto need_ctr_trap;
305 #endif
306
307 return 0;
308
309 need_ctr_trap:
310 evcnt_attach_dynamic(&ci->ci_uct_trap, EVCNT_TYPE_MISC, NULL,
311 ci->ci_cpuname, "ctr_el0 trap");
312
313 /* trap CTR_EL0 access from EL0 on this cpu */
314 sctlr = reg_sctlr_el1_read();
315 sctlr &= ~SCTLR_UCT;
316 reg_sctlr_el1_write(sctlr);
317
318 return 0;
319 }
320
321 void
322 configure_cpu_traps(void)
323 {
324 CPU_INFO_ITERATOR cii;
325 struct cpu_info *ci;
326 uint64_t where;
327
328 /* remember minimum cache line size out of all CPUs */
329 for (CPU_INFO_FOREACH(cii, ci)) {
330 uint64_t ctr_el0_cpu = ci->ci_id.ac_ctr;
331 uint64_t clidr = ci->ci_id.ac_clidr;
332
333 if (__SHIFTOUT(clidr, CLIDR_LOC) == 0 ||
334 (__SHIFTOUT(clidr, CLIDR_LOUIS) == 0 &&
335 __SHIFTOUT(clidr, CLIDR_LOUU) == 0)) {
336 /* this means the same as IDC=1 */
337 ctr_el0_cpu |= CTR_EL0_IDC;
338 }
339
340 /*
341 * if DIC==1, there is no need to icache sync. however,
342 * to calculate the minimum cacheline, in this case
343 * ICacheLine is treated as the maximum.
344 */
345 if (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_DIC) == 1)
346 ctr_el0_cpu |= CTR_EL0_IMIN_LINE;
347
348 /* Neoverse N1 erratum 1542419 */
349 if (CPU_ID_NEOVERSEN1_P(ci->ci_id.ac_midr) &&
350 __SHIFTOUT(ctr_el0_cpu, CTR_EL0_DIC) == 1)
351 ctr_el0_cpu &= ~CTR_EL0_DIC;
352
353 if (cii == 0) {
354 ctr_el0_usr = ctr_el0_cpu;
355 continue;
356 }
357
358 /* keep minimum cache line size, and worst DIC/IDC */
359 ctr_el0_usr &= (ctr_el0_cpu & CTR_EL0_DIC) | ~CTR_EL0_DIC;
360 ctr_el0_usr &= (ctr_el0_cpu & CTR_EL0_IDC) | ~CTR_EL0_IDC;
361 if (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_DMIN_LINE) <
362 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DMIN_LINE)) {
363 ctr_el0_usr &= ~CTR_EL0_DMIN_LINE;
364 ctr_el0_usr |= ctr_el0_cpu & CTR_EL0_DMIN_LINE;
365 }
366 if ((ctr_el0_cpu & CTR_EL0_DIC) == 0 &&
367 (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_IMIN_LINE) <
368 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IMIN_LINE))) {
369 ctr_el0_usr &= ~CTR_EL0_IMIN_LINE;
370 ctr_el0_usr |= ctr_el0_cpu & CTR_EL0_IMIN_LINE;
371 }
372 }
373
374 where = xc_broadcast(0,
375 (xcfunc_t)configure_cpu_traps0, NULL, NULL);
376 xc_wait(where);
377 }
378
379 static enum emul_arm_result
380 emul_aarch64_insn(struct trapframe *tf)
381 {
382 uint32_t insn;
383
384 if (ufetch_32((uint32_t *)tf->tf_pc, &insn)) {
385 tf->tf_far = reg_far_el1_read();
386 return EMUL_ARM_FAULT;
387 }
388
389 LE32TOH(insn);
390 if ((insn & 0xffffffe0) == 0xd53b0020) {
391 /* mrs x?,ctr_el0 */
392 unsigned int Xt = insn & 31;
393 if (Xt != 31) { /* !xzr */
394 uint64_t ctr_el0 = reg_ctr_el0_read();
395 ctr_el0 &= ~CTR_EL0_USR_MASK;
396 ctr_el0 |= (ctr_el0_usr & CTR_EL0_USR_MASK);
397 tf->tf_reg[Xt] = ctr_el0;
398 }
399 curcpu()->ci_uct_trap.ev_count++;
400
401 } else {
402 return EMUL_ARM_UNKNOWN;
403 }
404
405 tf->tf_pc += 4;
406 return EMUL_ARM_SUCCESS;
407 }
408
409 void
410 trap_el0_sync(struct trapframe *tf)
411 {
412 struct lwp * const l = curlwp;
413 const uint32_t esr = tf->tf_esr;
414 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
415
416 /* disable trace */
417 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
418 /* enable traps and interrupts */
419 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
420
421 switch (eclass) {
422 case ESR_EC_INSN_ABT_EL0:
423 case ESR_EC_DATA_ABT_EL0:
424 data_abort_handler(tf, eclass);
425 userret(l);
426 break;
427
428 case ESR_EC_SVC_A64:
429 (*l->l_proc->p_md.md_syscall)(tf);
430 break;
431 case ESR_EC_FP_ACCESS:
432 fpu_load(l);
433 userret(l);
434 break;
435 case ESR_EC_FP_TRAP_A64:
436 do_trapsignal(l, SIGFPE, FPE_FLTUND, NULL, esr); /* XXX */
437 userret(l);
438 break;
439
440 case ESR_EC_PC_ALIGNMENT:
441 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_pc, esr);
442 userret(l);
443 break;
444 case ESR_EC_SP_ALIGNMENT:
445 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_sp, esr);
446 userret(l);
447 break;
448
449 case ESR_EC_BKPT_INSN_A64:
450 case ESR_EC_BRKPNT_EL0:
451 case ESR_EC_WTCHPNT_EL0:
452 do_trapsignal(l, SIGTRAP, TRAP_BRKPT, (void *)tf->tf_pc, esr);
453 userret(l);
454 break;
455 case ESR_EC_SW_STEP_EL0:
456 /* disable trace, and send trace trap */
457 tf->tf_spsr &= ~SPSR_SS;
458 do_trapsignal(l, SIGTRAP, TRAP_TRACE, (void *)tf->tf_pc, esr);
459 userret(l);
460 break;
461
462 case ESR_EC_SYS_REG:
463 switch (emul_aarch64_insn(tf)) {
464 case EMUL_ARM_SUCCESS:
465 break;
466 case EMUL_ARM_UNKNOWN:
467 goto unknown;
468 case EMUL_ARM_FAULT:
469 do_trapsignal(l, SIGSEGV, SEGV_MAPERR,
470 (void *)tf->tf_far, esr);
471 break;
472 }
473 userret(l);
474 break;
475
476 default:
477 case ESR_EC_UNKNOWN:
478 unknown:
479 #ifdef DDB
480 if (sigill_debug) {
481 /* show illegal instruction */
482 printf("TRAP: pid %d (%s), uid %d: %s:"
483 " esr=0x%lx: pc=0x%lx: %s\n",
484 curlwp->l_proc->p_pid, curlwp->l_proc->p_comm,
485 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1,
486 eclass_trapname(eclass), tf->tf_esr, tf->tf_pc,
487 strdisasm(tf->tf_pc, tf->tf_spsr));
488 }
489 #endif
490 /* illegal or not implemented instruction */
491 do_trapsignal(l, SIGILL, ILL_ILLTRP, (void *)tf->tf_pc, esr);
492 userret(l);
493 break;
494 }
495 }
496
497 void
498 interrupt(struct trapframe *tf)
499 {
500 struct cpu_info * const ci = curcpu();
501
502 #ifdef STACKCHECKS
503 struct lwp *l = curlwp;
504 void *sp = (void *)reg_sp_read();
505 if (l->l_addr >= sp) {
506 panic("lwp/interrupt stack overflow detected."
507 " lwp=%p, sp=%p, l_addr=%p", l, sp, l->l_addr);
508 }
509 #endif
510
511 /* disable trace */
512 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
513
514 /* enable traps */
515 daif_enable(DAIF_D|DAIF_A);
516
517 ci->ci_intr_depth++;
518 ARM_IRQ_HANDLER(tf);
519 ci->ci_intr_depth--;
520
521 cpu_dosoftints();
522 }
523
524 #ifdef COMPAT_NETBSD32
525
526 /*
527 * 32-bit length Thumb instruction. See ARMv7 DDI0406A A6.3.
528 */
529 #define THUMB_32BIT(hi) (((hi) & 0xe000) == 0xe000 && ((hi) & 0x1800))
530
531 int
532 fetch_arm_insn(uint64_t pc, uint64_t spsr, uint32_t *insn)
533 {
534
535 /*
536 * Instructions are stored in little endian for BE8,
537 * only a valid binary format for ILP32EB. Therefore,
538 * we need byte-swapping before decoding on aarch64eb.
539 */
540
541 /* THUMB? */
542 if (spsr & SPSR_A32_T) {
543 uint16_t *p = (uint16_t *)(pc & ~1UL); /* XXX */
544 uint16_t hi, lo;
545
546 if (ufetch_16(p, &hi))
547 return -1;
548 LE16TOH(hi);
549
550 if (!THUMB_32BIT(hi)) {
551 /* 16-bit Thumb instruction */
552 *insn = hi;
553 return 2;
554 }
555
556 /* 32-bit Thumb instruction */
557 if (ufetch_16(p + 1, &lo))
558 return -1;
559 LE16TOH(lo);
560
561 *insn = ((uint32_t)hi << 16) | lo;
562 return 4;
563 }
564
565 if (ufetch_32((uint32_t *)pc, insn))
566 return -1;
567 LE32TOH(*insn);
568
569 return 4;
570 }
571
572 static bool
573 arm_cond_match(uint32_t insn, uint64_t spsr)
574 {
575 bool invert = (insn >> 28) & 1;
576 bool match;
577
578 switch (insn >> 29) {
579 case 0: /* EQ or NE */
580 match = spsr & SPSR_Z;
581 break;
582 case 1: /* CS/HI or CC/LO */
583 match = spsr & SPSR_C;
584 break;
585 case 2: /* MI or PL */
586 match = spsr & SPSR_N;
587 break;
588 case 3: /* VS or VC */
589 match = spsr & SPSR_V;
590 break;
591 case 4: /* HI or LS */
592 match = ((spsr & (SPSR_C | SPSR_Z)) == SPSR_C);
593 break;
594 case 5: /* GE or LT */
595 match = (!(spsr & SPSR_N) == !(spsr & SPSR_V));
596 break;
597 case 6: /* GT or LE */
598 match = !(spsr & SPSR_Z) &&
599 (!(spsr & SPSR_N) == !(spsr & SPSR_V));
600 break;
601 case 7: /* AL */
602 match = true;
603 break;
604 }
605 return (!match != !invert);
606 }
607
608 uint8_t atomic_swap_8(volatile uint8_t *, uint8_t);
609
610 static int
611 emul_arm_swp(uint32_t insn, struct trapframe *tf)
612 {
613 struct faultbuf fb;
614 vaddr_t vaddr;
615 uint32_t val;
616 int Rn, Rd, Rm, error;
617
618 Rn = __SHIFTOUT(insn, 0x000f0000);
619 Rd = __SHIFTOUT(insn, 0x0000f000);
620 Rm = __SHIFTOUT(insn, 0x0000000f);
621
622 vaddr = tf->tf_reg[Rn] & 0xffffffff;
623 val = tf->tf_reg[Rm];
624
625 /* fault if insn is swp, and unaligned access */
626 if ((insn & 0x00400000) == 0 && (vaddr & 3) != 0) {
627 tf->tf_far = vaddr;
628 return EFAULT;
629 }
630
631 /* vaddr will always point to userspace, since it has only 32bit */
632 if ((error = cpu_set_onfault(&fb)) == 0) {
633 if (aarch64_pan_enabled)
634 reg_pan_write(0); /* disable PAN */
635 if (insn & 0x00400000) {
636 /* swpb */
637 val = atomic_swap_8((uint8_t *)vaddr, val);
638 } else {
639 /* swp */
640 val = atomic_swap_32((uint32_t *)vaddr, val);
641 }
642 cpu_unset_onfault();
643 tf->tf_reg[Rd] = val;
644 } else {
645 tf->tf_far = reg_far_el1_read();
646 }
647 if (aarch64_pan_enabled)
648 reg_pan_write(1); /* enable PAN */
649 return error;
650 }
651
652 static enum emul_arm_result
653 emul_thumb_insn(struct trapframe *tf, uint32_t insn, int insn_size)
654 {
655 /* T32-16bit or 32bit instructions */
656 switch (insn_size) {
657 case 2:
658 /* Breakpoint used by GDB */
659 if (insn == 0xdefe) {
660 do_trapsignal(curlwp, SIGTRAP, TRAP_BRKPT,
661 (void *)tf->tf_pc, 0);
662 return EMUL_ARM_SUCCESS;
663 }
664 /* XXX: some T32 IT instruction deprecated should be emulated */
665 break;
666 case 4:
667 break;
668 default:
669 return EMUL_ARM_FAULT;
670 }
671 return EMUL_ARM_UNKNOWN;
672 }
673
674 static enum emul_arm_result
675 emul_arm_insn(struct trapframe *tf)
676 {
677 uint32_t insn;
678 int insn_size;
679
680 insn_size = fetch_arm_insn(tf->tf_pc, tf->tf_spsr, &insn);
681 tf->tf_far = reg_far_el1_read();
682
683 if (tf->tf_spsr & SPSR_A32_T)
684 return emul_thumb_insn(tf, insn, insn_size);
685 if (insn_size != 4)
686 return EMUL_ARM_FAULT;
687
688 /* Breakpoint used by GDB */
689 if (insn == 0xe6000011 || insn == 0xe7ffdefe) {
690 do_trapsignal(curlwp, SIGTRAP, TRAP_BRKPT,
691 (void *)tf->tf_pc, 0);
692 return EMUL_ARM_SUCCESS;
693 }
694
695 /* Unconditional instruction extension space? */
696 if ((insn & 0xf0000000) == 0xf0000000)
697 goto unknown_insn;
698
699 /* swp,swpb */
700 if ((insn & 0x0fb00ff0) == 0x01000090) {
701 if (arm_cond_match(insn, tf->tf_spsr)) {
702 if (emul_arm_swp(insn, tf) != 0)
703 return EMUL_ARM_FAULT;
704 }
705 goto emulated;
706 }
707
708 /*
709 * Emulate ARMv6 instructions with cache operations
710 * register (c7), that can be used in user mode.
711 */
712 switch (insn & 0x0fff0fff) {
713 case 0x0e070f95:
714 if (arm_cond_match(insn, tf->tf_spsr)) {
715 /*
716 * mcr p15, 0, <Rd>, c7, c5, 4
717 * (flush prefetch buffer)
718 */
719 __asm __volatile("isb sy" ::: "memory");
720 }
721 goto emulated;
722 case 0x0e070f9a:
723 if (arm_cond_match(insn, tf->tf_spsr)) {
724 /*
725 * mcr p15, 0, <Rd>, c7, c10, 4
726 * (data synchronization barrier)
727 */
728 __asm __volatile("dsb sy" ::: "memory");
729 }
730 goto emulated;
731 case 0x0e070fba:
732 if (arm_cond_match(insn, tf->tf_spsr)) {
733 /*
734 * mcr p15, 0, <Rd>, c7, c10, 5
735 * (data memory barrier)
736 */
737 __asm __volatile("dmb sy" ::: "memory");
738 }
739 goto emulated;
740 default:
741 break;
742 }
743
744 unknown_insn:
745 /* unknown, or unsupported instruction */
746 return EMUL_ARM_UNKNOWN;
747
748 emulated:
749 tf->tf_pc += insn_size;
750 return EMUL_ARM_SUCCESS;
751 }
752 #endif /* COMPAT_NETBSD32 */
753
754 void
755 trap_el0_32sync(struct trapframe *tf)
756 {
757 struct lwp * const l = curlwp;
758 const uint32_t esr = tf->tf_esr;
759 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
760
761 /* disable trace */
762 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
763 /* enable traps and interrupts */
764 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
765
766 switch (eclass) {
767 #ifdef COMPAT_NETBSD32
768 case ESR_EC_INSN_ABT_EL0:
769 case ESR_EC_DATA_ABT_EL0:
770 data_abort_handler(tf, eclass);
771 userret(l);
772 break;
773
774 case ESR_EC_SVC_A32:
775 (*l->l_proc->p_md.md_syscall)(tf);
776 break;
777
778 case ESR_EC_FP_ACCESS:
779 fpu_load(l);
780 userret(l);
781 break;
782
783 case ESR_EC_FP_TRAP_A32:
784 do_trapsignal(l, SIGFPE, FPE_FLTUND, NULL, esr); /* XXX */
785 userret(l);
786 break;
787
788 case ESR_EC_PC_ALIGNMENT:
789 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_pc, esr);
790 userret(l);
791 break;
792
793 case ESR_EC_SP_ALIGNMENT:
794 do_trapsignal(l, SIGBUS, BUS_ADRALN,
795 (void *)tf->tf_reg[13], esr); /* sp is r13 on AArch32 */
796 userret(l);
797 break;
798
799 case ESR_EC_BKPT_INSN_A32:
800 do_trapsignal(l, SIGTRAP, TRAP_BRKPT, (void *)tf->tf_pc, esr);
801 userret(l);
802 break;
803
804 case ESR_EC_UNKNOWN:
805 switch (emul_arm_insn(tf)) {
806 case EMUL_ARM_SUCCESS:
807 break;
808 case EMUL_ARM_UNKNOWN:
809 goto unknown;
810 case EMUL_ARM_FAULT:
811 do_trapsignal(l, SIGSEGV, SEGV_MAPERR,
812 (void *)tf->tf_far, esr);
813 break;
814 }
815 userret(l);
816 break;
817
818 case ESR_EC_CP15_RT:
819 case ESR_EC_CP15_RRT:
820 case ESR_EC_CP14_RT:
821 case ESR_EC_CP14_DT:
822 case ESR_EC_CP14_RRT:
823 unknown:
824 #endif /* COMPAT_NETBSD32 */
825 default:
826 #ifdef DDB
827 if (sigill_debug) {
828 /* show illegal instruction */
829 printf("TRAP: pid %d (%s), uid %d: %s:"
830 " esr=0x%lx: pc=0x%lx: %s\n",
831 curlwp->l_proc->p_pid, curlwp->l_proc->p_comm,
832 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1,
833 eclass_trapname(eclass), tf->tf_esr, tf->tf_pc,
834 strdisasm(tf->tf_pc, tf->tf_spsr));
835 }
836 #endif
837 /* illegal or not implemented instruction */
838 do_trapsignal(l, SIGILL, ILL_ILLTRP, (void *)tf->tf_pc, esr);
839 userret(l);
840 break;
841 }
842 }
843
844 #define bad_trap_panic(trapfunc) \
845 void \
846 trapfunc(struct trapframe *tf) \
847 { \
848 panic("%s", __func__); \
849 }
850 bad_trap_panic(trap_el1t_sync)
851 bad_trap_panic(trap_el1t_irq)
852 bad_trap_panic(trap_el1t_fiq)
853 bad_trap_panic(trap_el1t_error)
854 bad_trap_panic(trap_el1h_fiq)
855 bad_trap_panic(trap_el1h_error)
856 bad_trap_panic(trap_el0_fiq)
857 bad_trap_panic(trap_el0_error)
858 bad_trap_panic(trap_el0_32fiq)
859 bad_trap_panic(trap_el0_32error)
860
861 void
862 cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb, int val)
863 {
864 tf->tf_reg[19] = fb->fb_reg[FB_X19];
865 tf->tf_reg[20] = fb->fb_reg[FB_X20];
866 tf->tf_reg[21] = fb->fb_reg[FB_X21];
867 tf->tf_reg[22] = fb->fb_reg[FB_X22];
868 tf->tf_reg[23] = fb->fb_reg[FB_X23];
869 tf->tf_reg[24] = fb->fb_reg[FB_X24];
870 tf->tf_reg[25] = fb->fb_reg[FB_X25];
871 tf->tf_reg[26] = fb->fb_reg[FB_X26];
872 tf->tf_reg[27] = fb->fb_reg[FB_X27];
873 tf->tf_reg[28] = fb->fb_reg[FB_X28];
874 tf->tf_reg[29] = fb->fb_reg[FB_X29];
875 tf->tf_sp = fb->fb_reg[FB_SP];
876 tf->tf_pc = fb->fb_reg[FB_LR];
877 tf->tf_reg[0] = val;
878 }
879
880 #ifdef TRAP_SIGDEBUG
881 static void
882 frame_dump(const struct trapframe *tf)
883 {
884 const struct reg *r = &tf->tf_regs;
885
886 printf("trapframe %p\n", tf);
887 for (size_t i = 0; i < __arraycount(r->r_reg); i++) {
888 printf(" r%.2zu %#018" PRIx64 "%c", i, r->r_reg[i],
889 " \n"[i && (i & 1) == 0]);
890 }
891
892 printf("\n");
893 printf(" sp %#018" PRIx64 " pc %#018" PRIx64 "\n",
894 r->r_sp, r->r_pc);
895 printf(" spsr %#018" PRIx64 " tpidr %#018" PRIx64 "\n",
896 r->r_spsr, r->r_tpidr);
897 printf(" esr %#018" PRIx64 " far %#018" PRIx64 "\n",
898 tf->tf_esr, tf->tf_far);
899
900 printf("\n");
901 hexdump(printf, "Stack dump", tf, 256);
902 }
903
904 static void
905 sigdebug(const struct trapframe *tf, const ksiginfo_t *ksi)
906 {
907 struct lwp *l = curlwp;
908 struct proc *p = l->l_proc;
909 const uint32_t eclass = __SHIFTOUT(ksi->ksi_trap, ESR_EC);
910
911 printf("pid %d.%d (%s): signal %d (trap %#x) "
912 "@pc %#" PRIx64 ", addr %p, error=%s\n",
913 p->p_pid, l->l_lid, p->p_comm, ksi->ksi_signo, ksi->ksi_trap,
914 tf->tf_regs.r_pc, ksi->ksi_addr, eclass_trapname(eclass));
915 frame_dump(tf);
916 }
917 #endif
918
919 void do_trapsignal1(
920 #ifdef TRAP_SIGDEBUG
921 const char *func,
922 size_t line,
923 struct trapframe *tf,
924 #endif
925 struct lwp *l, int signo, int code, void *addr, int trap)
926 {
927 ksiginfo_t ksi;
928
929 KSI_INIT_TRAP(&ksi);
930 ksi.ksi_signo = signo;
931 ksi.ksi_code = code;
932 ksi.ksi_addr = addr;
933 ksi.ksi_trap = trap;
934 #ifdef TRAP_SIGDEBUG
935 printf("%s, %zu: ", func, line);
936 sigdebug(tf, &ksi);
937 #endif
938 (*l->l_proc->p_emul->e_trapsignal)(l, &ksi);
939 }
940
941 bool
942 cpu_intr_p(void)
943 {
944 uint64_t ncsw;
945 int idepth;
946 lwp_t *l;
947
948 #ifdef __HAVE_PIC_FAST_SOFTINTS
949 /* XXX Copied from cpu.h. Looks incomplete - needs fixing. */
950 if (ci->ci_cpl < IPL_VM)
951 return false;
952 #endif
953
954 l = curlwp;
955 if (__predict_false(l->l_cpu == NULL)) {
956 KASSERT(l == &lwp0);
957 return false;
958 }
959 do {
960 ncsw = l->l_ncsw;
961 __insn_barrier();
962 idepth = l->l_cpu->ci_intr_depth;
963 __insn_barrier();
964 } while (__predict_false(ncsw != l->l_ncsw));
965
966 return idepth > 0;
967 }
968