trap.c revision 1.41 1 /* $NetBSD: trap.c,v 1.41 2024/09/08 10:16:04 andvar Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.41 2024/09/08 10:16:04 andvar Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_altivec.h"
42 #include "opt_ddb.h"
43 #endif
44
45 #include <sys/param.h>
46 #include <sys/cpu.h>
47 #include <sys/kauth.h>
48 #include <sys/lwp.h>
49 #include <sys/proc.h>
50 #include <sys/ptrace.h>
51 #include <sys/ras.h>
52 #include <sys/siginfo.h>
53 #include <sys/systm.h>
54
55 #include <ddb/ddb.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #include <powerpc/altivec.h> /* use same interface for SPE */
60 #include <powerpc/instr.h>
61 #include <powerpc/pcb.h>
62 #include <powerpc/psl.h>
63 #include <powerpc/spr.h>
64 #include <powerpc/trap.h>
65 #include <powerpc/userret.h>
66
67 #include <powerpc/fpu/fpu_extern.h>
68
69 #include <powerpc/booke/cpuvar.h>
70 #include <powerpc/booke/pte.h>
71 #include <powerpc/booke/spr.h>
72 #include <powerpc/booke/trap.h>
73
74 void trap(enum ppc_booke_exceptions, struct trapframe *);
75
76 static const char trap_names[][8] = {
77 [T_CRITIAL_INPUT] = "CRIT",
78 [T_EXTERNAL_INPUT] = "EXT",
79 [T_DECREMENTER] = "DECR",
80 [T_FIXED_INTERVAL] = "FIT",
81 [T_WATCHDOG] = "WDOG",
82 [T_SYSTEM_CALL] = "SC",
83 [T_MACHINE_CHECK] = "MCHK",
84 [T_DSI] = "DSI",
85 [T_ISI] = "ISI",
86 [T_ALIGNMENT] = "ALN",
87 [T_PROGRAM] = "PGM",
88 [T_FP_UNAVAILABLE] = "FP",
89 [T_AP_UNAVAILABLE] = "AP",
90 [T_DATA_TLB_ERROR] = "DTLB",
91 [T_INSTRUCTION_TLB_ERROR] = "ITLB",
92 [T_DEBUG] = "DEBUG",
93 [T_SPE_UNAVAILABLE] = "SPE",
94 [T_EMBEDDED_FP_DATA] = "FPDATA",
95 [T_EMBEDDED_FP_ROUND] = "FPROUND",
96 [T_EMBEDDED_PERF_MONITOR] = "PERFMON",
97 [T_AST] = "AST",
98 };
99
100 static inline bool
101 usertrap_p(struct trapframe *tf)
102 {
103 return (tf->tf_srr1 & PSL_PR) != 0;
104 }
105
106 static int
107 mchk_exception(struct trapframe *tf, ksiginfo_t *ksi)
108 {
109 const bool usertrap = usertrap_p(tf);
110 const vaddr_t faultva = tf->tf_mcar;
111 struct cpu_info * const ci = curcpu();
112 int rv = EFAULT;
113
114 if (usertrap) {
115 ci->ci_ev_umchk.ev_count++;
116 KSI_INIT_TRAP(ksi);
117 ksi->ksi_signo = SIGBUS;
118 ksi->ksi_trap = EXC_MCHK;
119 ksi->ksi_addr = (void *)faultva;
120 ksi->ksi_code = BUS_OBJERR;
121 }
122
123 return rv;
124 }
125
126 static inline vm_prot_t
127 get_faulttype(const struct trapframe * const tf)
128 {
129 return VM_PROT_READ | (tf->tf_esr & ESR_ST ? VM_PROT_WRITE : 0);
130 }
131
132 static inline struct vm_map *
133 get_faultmap(const struct trapframe * const tf, register_t psl_mask)
134 {
135 return (tf->tf_srr1 & psl_mask)
136 ? &curlwp->l_proc->p_vmspace->vm_map
137 : kernel_map;
138 }
139
140 /*
141 * We could use pmap_pte_lookup but this slightly faster since we already
142 * the segtab pointers in cpu_info.
143 */
144 static inline pt_entry_t *
145 trap_pte_lookup(struct trapframe *tf, vaddr_t va, register_t psl_mask)
146 {
147 pmap_segtab_t ** const stbs = &curcpu()->ci_pmap_kern_segtab;
148 pmap_segtab_t * const stb = stbs[(tf->tf_srr1 / psl_mask) & 1];
149 if (__predict_false(stb == NULL))
150 return NULL;
151
152 pmap_ptpage_t * const ppg = stb->seg_ppg[va >> SEGSHIFT];
153 if (__predict_false(ppg == NULL))
154 return NULL;
155 const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
156
157 return ppg->ppg_ptes + pte_idx;
158 }
159
160 static int
161 pagefault(struct vm_map *map, vaddr_t va, vm_prot_t ftype, bool usertrap)
162 {
163 struct lwp * const l = curlwp;
164 int rv;
165
166 // printf("%s(%p,%#lx,%u,%u)\n", __func__, map, va, ftype, usertrap);
167
168 if (usertrap) {
169 rv = uvm_fault(map, trunc_page(va), ftype);
170 if (rv == 0)
171 uvm_grow(l->l_proc, trunc_page(va));
172 } else {
173 if (cpu_intr_p())
174 return EFAULT;
175
176 struct pcb * const pcb = lwp_getpcb(l);
177 struct faultbuf * const fb = pcb->pcb_onfault;
178 pcb->pcb_onfault = NULL;
179 rv = uvm_fault(map, trunc_page(va), ftype);
180 pcb->pcb_onfault = fb;
181 if (map != kernel_map) {
182 if (rv == 0)
183 uvm_grow(l->l_proc, trunc_page(va));
184 }
185 }
186 return rv;
187 }
188
189 static void
190 vm_signal(int error, int trap, vaddr_t addr, ksiginfo_t *ksi)
191 {
192
193 KSI_INIT_TRAP(ksi);
194 switch (error) {
195 case EINVAL:
196 ksi->ksi_signo = SIGBUS;
197 ksi->ksi_code = BUS_ADRERR;
198 break;
199 case EACCES:
200 ksi->ksi_signo = SIGSEGV;
201 ksi->ksi_code = SEGV_ACCERR;
202 break;
203 default:
204 ksi->ksi_signo = SIGSEGV;
205 ksi->ksi_code = SEGV_MAPERR;
206 break;
207 }
208 ksi->ksi_trap = trap;
209 ksi->ksi_addr = (void *)addr;
210 }
211
212 static int
213 dsi_exception(struct trapframe *tf, ksiginfo_t *ksi)
214 {
215 const vaddr_t faultva = tf->tf_dear;
216 const vm_prot_t ftype = get_faulttype(tf);
217 struct vm_map * const faultmap = get_faultmap(tf, PSL_DS);
218 const bool usertrap = usertrap_p(tf);
219
220 kpreempt_disable();
221 struct cpu_info * const ci = curcpu();
222
223 if (usertrap)
224 ci->ci_ev_udsi.ev_count++;
225 else
226 ci->ci_ev_kdsi.ev_count++;
227
228 /*
229 * If we had a TLB entry (which we must have had to get this exception),
230 * we certainly have a PTE.
231 */
232 pt_entry_t * const ptep = trap_pte_lookup(tf, trunc_page(faultva),
233 PSL_DS);
234 KASSERT(ptep != NULL);
235 pt_entry_t pte = *ptep;
236
237 if ((ftype & VM_PROT_WRITE)
238 && ((pte & (PTE_xW|PTE_UNMODIFIED)) == (PTE_xW|PTE_UNMODIFIED))) {
239 const paddr_t pa = pte_to_paddr(pte);
240 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
241 KASSERT(pg);
242 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
243
244 if (!VM_PAGEMD_MODIFIED_P(mdpg)) {
245 pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED);
246 }
247 pte &= ~PTE_UNMODIFIED;
248 *ptep = pte;
249 pmap_tlb_update_addr(faultmap->pmap, trunc_page(faultva),
250 pte, 0);
251 kpreempt_enable();
252 return 0;
253 }
254 kpreempt_enable();
255
256 int rv = pagefault(faultmap, faultva, ftype, usertrap);
257
258 if (__predict_false(rv != 0 && usertrap)) {
259 ci->ci_ev_udsi_fatal.ev_count++;
260 vm_signal(rv, EXC_DSI, faultva, ksi);
261 }
262 return rv;
263 }
264
265 static int
266 isi_exception(struct trapframe *tf, ksiginfo_t *ksi)
267 {
268 const vaddr_t faultva = trunc_page(tf->tf_srr0);
269 struct vm_map * const faultmap = get_faultmap(tf, PSL_IS);
270 const bool usertrap = usertrap_p(tf);
271
272 kpreempt_disable();
273 struct cpu_info * const ci = curcpu();
274
275 if (usertrap)
276 ci->ci_ev_isi.ev_count++;
277 else
278 ci->ci_ev_kisi.ev_count++;
279
280 /*
281 * If we had a TLB entry (which we must have had to get this exception),
282 * we certainly have a PTE.
283 */
284 pt_entry_t * const ptep = trap_pte_lookup(tf, trunc_page(faultva),
285 PSL_IS);
286 if (ptep == NULL)
287 dump_trapframe(tf, NULL);
288 KASSERT(ptep != NULL);
289 pt_entry_t pte = *ptep;
290
291 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmapexechist);
292
293 if ((pte & PTE_UNSYNCED) == PTE_UNSYNCED) {
294 const paddr_t pa = pte_to_paddr(pte);
295 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
296 KASSERT(pg);
297 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
298
299 #ifdef UVMHIST
300 if (VM_PAGEMD_EXECPAGE_P(mdpg))
301 UVMHIST_LOG(pmapexechist,
302 "srr0=%#x pg=%p (pa %#"PRIxPADDR"): "
303 "no syncicache (already execpage)",
304 tf->tf_srr0, (uintptr_t)pg, pa, 0);
305 else
306 UVMHIST_LOG(pmapexechist,
307 "srr0=%#x pg=%p (pa %#"PRIxPADDR"): "
308 "performed syncicache (now execpage)",
309 tf->tf_srr0, (uintptr_t)pg, pa, 0);
310 #endif
311
312 if (!VM_PAGEMD_EXECPAGE_P(mdpg)) {
313 ci->ci_softc->cpu_ev_exec_trap_sync.ev_count++;
314 dcache_wb_page(pa);
315 icache_inv_page(pa);
316 pmap_page_set_attributes(mdpg, VM_PAGEMD_EXECPAGE);
317 }
318 pte &= ~PTE_UNSYNCED;
319 pte |= PTE_xX;
320 *ptep = pte;
321
322 pmap_tlb_update_addr(faultmap->pmap, trunc_page(faultva),
323 pte, 0);
324 kpreempt_enable();
325 UVMHIST_LOG(pmapexechist, "<- 0", 0,0,0,0);
326 return 0;
327 }
328 kpreempt_enable();
329
330 int rv = pagefault(faultmap, faultva, VM_PROT_READ|VM_PROT_EXECUTE,
331 usertrap);
332
333 if (__predict_false(rv != 0 && usertrap)) {
334 ci->ci_ev_isi_fatal.ev_count++;
335 vm_signal(rv, EXC_ISI, tf->tf_srr0, ksi);
336 }
337 UVMHIST_LOG(pmapexechist, "<- %d", rv, 0,0,0);
338 return rv;
339 }
340
341 static int
342 dtlb_exception(struct trapframe *tf, ksiginfo_t *ksi)
343 {
344 const vaddr_t faultva = tf->tf_dear;
345 const vm_prot_t ftype = get_faulttype(tf);
346 struct vm_map * const faultmap = get_faultmap(tf, PSL_DS);
347 struct cpu_info * const ci = curcpu();
348 const bool usertrap = usertrap_p(tf);
349
350 #if 0
351 /*
352 * This is what pte_load in trap_subr.S does for us.
353 */
354 const pt_entry_t * const ptep =
355 trap_pte_lookup(tf, trunc_page(faultva), PSL_DS);
356 if (ptep != NULL && !usertrap && pte_valid_p(*ptep)) {
357 tlb_update_addr(trunc_page(faultva), KERNEL_PID, *ptep, true);
358 ci->ci_ev_tlbmiss_soft.ev_count++;
359 return 0;
360 }
361 #endif
362
363 ci->ci_ev_dtlbmiss_hard.ev_count++;
364
365 // printf("pagefault(%p,%#lx,%u,%u)", faultmap, faultva, ftype, usertrap);
366 int rv = pagefault(faultmap, faultva, ftype, usertrap);
367 // printf(": %d\n", rv);
368
369 if (__predict_false(rv != 0 && usertrap)) {
370 ci->ci_ev_udsi_fatal.ev_count++;
371 vm_signal(rv, EXC_DSI, faultva, ksi);
372 }
373 return rv;
374 }
375
376 static int
377 itlb_exception(struct trapframe *tf, ksiginfo_t *ksi)
378 {
379 struct vm_map * const faultmap = get_faultmap(tf, PSL_IS);
380 const vaddr_t faultva = tf->tf_srr0;
381 struct cpu_info * const ci = curcpu();
382 const bool usertrap = usertrap_p(tf);
383
384 ci->ci_ev_itlbmiss_hard.ev_count++;
385
386 int rv = pagefault(faultmap, faultva, VM_PROT_READ|VM_PROT_EXECUTE,
387 usertrap);
388
389 if (__predict_false(rv != 0 && usertrap)) {
390 ci->ci_ev_isi_fatal.ev_count++;
391 vm_signal(rv, EXC_ISI, tf->tf_srr0, ksi);
392 }
393 return rv;
394 }
395
396 static int
397 spe_exception(struct trapframe *tf, ksiginfo_t *ksi)
398 {
399 struct cpu_info * const ci = curcpu();
400
401 if (!usertrap_p(tf))
402 return EPERM;
403
404 ci->ci_ev_vec.ev_count++;
405
406 #ifdef PPC_HAVE_SPE
407 vec_load();
408 return 0;
409 #else
410 KSI_INIT_TRAP(ksi);
411 ksi->ksi_signo = SIGILL;
412 ksi->ksi_trap = EXC_PGM;
413 ksi->ksi_code = ILL_ILLOPC;
414 ksi->ksi_addr = (void *)tf->tf_srr0;
415 return EPERM;
416 #endif
417 }
418
419 static bool
420 emulate_opcode(struct trapframe *tf, ksiginfo_t *ksi)
421 {
422 uint32_t opcode;
423 if (copyin((void *)tf->tf_srr0, &opcode, sizeof(opcode)) != 0)
424 return false;
425
426 if (opcode == OPC_LWSYNC)
427 return true;
428
429 if (OPC_MFSPR_P(opcode, SPR_PVR)) {
430 __asm ("mfpvr %0" : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)]));
431 return true;
432 }
433
434 if (OPC_MFSPR_P(opcode, SPR_PIR)) {
435 __asm ("mfspr %0, %1"
436 : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)])
437 : "n"(SPR_PIR));
438 return true;
439 }
440
441 if (OPC_MFSPR_P(opcode, SPR_SVR)) {
442 __asm ("mfspr %0,%1"
443 : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)])
444 : "n"(SPR_SVR));
445 return true;
446 }
447
448 return emulate_mxmsr(curlwp, tf, opcode);
449 }
450
451 static int
452 pgm_exception(struct trapframe *tf, ksiginfo_t *ksi)
453 {
454 struct cpu_info * const ci = curcpu();
455 int rv = EPERM;
456
457 if (!usertrap_p(tf))
458 return rv;
459
460 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmapexechist);
461
462 UVMHIST_LOG(pmapexechist, " srr0/1=%#x/%#x esr=%#x pte=%#x",
463 tf->tf_srr0, tf->tf_srr1, tf->tf_esr,
464 *trap_pte_lookup(tf, trunc_page(tf->tf_srr0), PSL_IS));
465
466 ci->ci_ev_pgm.ev_count++;
467
468 KSI_INIT_TRAP(ksi);
469
470 if (tf->tf_esr & ESR_PTR) {
471 struct lwp * const l = curlwp;
472 struct proc * const p = curlwp->l_proc;
473 vaddr_t va = (vaddr_t)tf->tf_srr0;
474 int error;
475
476 /*
477 * Restore original instruction and clear BP.
478 */
479 if (p->p_md.md_ss_addr[0] == va ||
480 p->p_md.md_ss_addr[1] == va) {
481 error = ppc_sstep(l, 0);
482 if (error != 0) {
483 vm_signal(error, EXC_PGM /* XXX */, va, ksi);
484 return error;
485 }
486 ksi->ksi_code = TRAP_TRACE;
487 } else
488 ksi->ksi_code = TRAP_BRKPT;
489
490 if (p->p_raslist != NULL &&
491 ras_lookup(p, (void *)va) != (void *)-1) {
492 tf->tf_srr0 += (ksi->ksi_code == TRAP_TRACE) ? 0 : 4;
493 return 0;
494 }
495 }
496
497 if (tf->tf_esr & (ESR_PIL|ESR_PPR)) {
498 if (emulate_opcode(tf, ksi)) {
499 tf->tf_srr0 += 4;
500 return 0;
501 }
502 }
503
504 if (tf->tf_esr & ESR_PIL) {
505 struct lwp * const l = curlwp;
506 struct pcb * const pcb = lwp_getpcb(l);
507
508 if (__predict_false(!fpu_used_p(l))) {
509 memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu));
510 fpu_mark_used(l);
511 }
512 if (fpu_emulate(tf, &pcb->pcb_fpu, ksi)) {
513 if (ksi->ksi_signo == 0) {
514 ci->ci_ev_fpu.ev_count++;
515 return 0;
516 }
517 return EFAULT;
518 }
519 }
520
521 ksi->ksi_signo = SIGILL;
522 ksi->ksi_trap = EXC_PGM;
523 if (tf->tf_esr & ESR_PIL) {
524 ksi->ksi_code = ILL_ILLOPC;
525 } else if (tf->tf_esr & ESR_PPR) {
526 ksi->ksi_code = ILL_PRVOPC;
527 } else if (tf->tf_esr & ESR_PTR) {
528 ksi->ksi_signo = SIGTRAP;
529 } else {
530 ksi->ksi_code = 0;
531 }
532 ksi->ksi_addr = (void *)tf->tf_srr0;
533 return rv;
534 }
535
536 #if 0
537 static int
538 debug_exception(struct trapframe *tf, ksiginfo_t *ksi)
539 {
540 struct cpu_info * const ci = curcpu();
541 int rv = EPERM;
542
543 if (!usertrap_p(tf))
544 return rv;
545
546 ci->ci_ev_debug.ev_count++;
547
548 /*
549 * Ack the interrupt.
550 */
551 mtspr(SPR_DBSR, tf->tf_esr);
552 KASSERT(tf->tf_esr & (DBSR_IAC1|DBSR_IAC2|DBSR_BRT));
553 KASSERT((tf->tf_srr1 & PSL_SE) == 0);
554
555 /*
556 * Disable debug events
557 */
558 mtspr(SPR_DBCR1, 0);
559 mtspr(SPR_DBCR0, 0);
560
561 /*
562 * Tell the debugger ...
563 */
564 KSI_INIT_TRAP(ksi);
565 ksi->ksi_signo = SIGTRAP;
566 ksi->ksi_trap = EXC_TRC;
567 ksi->ksi_addr = (void *)tf->tf_srr0;
568 ksi->ksi_code = TRAP_TRACE;
569 return rv;
570 }
571 #endif
572
573 static int
574 ali_exception(struct trapframe *tf, ksiginfo_t *ksi)
575 {
576 struct cpu_info * const ci = curcpu();
577 int rv = EFAULT;
578
579 ci->ci_ev_ali.ev_count++;
580
581 if (rv != 0 && usertrap_p(tf)) {
582 ci->ci_ev_ali_fatal.ev_count++;
583 KSI_INIT_TRAP(ksi);
584 ksi->ksi_signo = SIGILL;
585 ksi->ksi_trap = EXC_PGM;
586 if (tf->tf_esr & ESR_PIL)
587 ksi->ksi_code = ILL_ILLOPC;
588 else if (tf->tf_esr & ESR_PPR)
589 ksi->ksi_code = ILL_PRVOPC;
590 else if (tf->tf_esr & ESR_PTR)
591 ksi->ksi_code = ILL_ILLTRP;
592 else
593 ksi->ksi_code = 0;
594 ksi->ksi_addr = (void *)tf->tf_srr0;
595 }
596 return rv;
597 }
598
599 static int
600 embedded_fp_data_exception(struct trapframe *tf, ksiginfo_t *ksi)
601 {
602 struct cpu_info * const ci = curcpu();
603 int rv = EFAULT;
604
605 ci->ci_ev_fpu.ev_count++;
606
607 if (rv != 0 && usertrap_p(tf)) {
608 KSI_INIT_TRAP(ksi);
609 #ifdef PPC_HAVE_SPE
610 ksi->ksi_signo = SIGFPE;
611 ksi->ksi_trap = tf->tf_exc;
612 ksi->ksi_code = vec_siginfo_code(tf);
613 #else
614 ksi->ksi_signo = SIGILL;
615 ksi->ksi_trap = EXC_PGM;
616 ksi->ksi_code = ILL_ILLOPC;
617 #endif
618 ksi->ksi_addr = (void *)tf->tf_srr0;
619 }
620 return rv;
621 }
622
623 static int
624 embedded_fp_round_exception(struct trapframe *tf, ksiginfo_t *ksi)
625 {
626 struct cpu_info * const ci = curcpu();
627 int rv = EDOM;
628
629 ci->ci_ev_fpu.ev_count++;
630
631 if (rv != 0 && usertrap_p(tf)) {
632 KSI_INIT_TRAP(ksi);
633 #ifdef PPC_HAVE_SPE
634 ksi->ksi_signo = SIGFPE;
635 ksi->ksi_trap = tf->tf_exc;
636 ksi->ksi_code = vec_siginfo_code(tf);
637 #else
638 ksi->ksi_signo = SIGILL;
639 ksi->ksi_trap = EXC_PGM;
640 ksi->ksi_code = ILL_ILLOPC;
641 #endif
642 ksi->ksi_addr = (void *)tf->tf_srr0;
643 }
644 return rv;
645 }
646
647 void
648 dump_trapframe(const struct trapframe *tf, void (*pr)(const char *, ...))
649 {
650 if (pr == NULL)
651 pr = printf;
652 (*pr)("trapframe %p (exc=%x srr0/1=%#lx/%#lx esr/dear=%#x/%#lx)\n",
653 tf, tf->tf_exc, tf->tf_srr0, tf->tf_srr1, tf->tf_esr, tf->tf_dear);
654 (*pr)("lr =%08lx ctr=%08lx cr =%08x xer=%08x\n",
655 tf->tf_lr, tf->tf_ctr, tf->tf_cr, tf->tf_xer);
656 for (u_int r = 0; r < 32; r += 4) {
657 (*pr)("r%02u=%08lx r%02u=%08lx r%02u=%08lx r%02u=%08lx\n",
658 r+0, tf->tf_fixreg[r+0], r+1, tf->tf_fixreg[r+1],
659 r+2, tf->tf_fixreg[r+2], r+3, tf->tf_fixreg[r+3]);
660 }
661 }
662
663 #ifdef DDB
664 static bool
665 ddb_exception(struct trapframe *tf)
666 {
667 #if 0
668 const register_t ddb_trapfunc = (uintptr_t) cpu_Debugger;
669 if ((tf->tf_esr & ESR_PTR) == 0)
670 return false;
671 if (ddb_trapfunc <= tf->tf_srr0 && tf->tf_srr0 <= ddb_trapfunc+16) {
672 register_t srr0 = tf->tf_srr0;
673 if (kdb_trap(tf->tf_exc, tf)) {
674 if (srr0 == tf->tf_srr0)
675 tf->tf_srr0 += 4;
676 return true;
677 }
678 }
679 return false;
680 #else
681 #if 0
682 struct cpu_info * const ci = curcpu();
683 struct cpu_softc * const cpu = ci->ci_softc;
684 printf("CPL stack:");
685 if (ci->ci_idepth >= 0) {
686 for (u_int i = 0; i <= ci->ci_idepth; i++) {
687 printf(" [%u]=%u", i, cpu->cpu_pcpls[i]);
688 }
689 }
690 printf(" %u\n", ci->ci_cpl);
691 dump_trapframe(tf, NULL);
692 #endif
693 if (kdb_trap(tf->tf_exc, tf)) {
694 tf->tf_srr0 += 4;
695 return true;
696 }
697 return false;
698 #endif
699 }
700 #endif /* DDB */
701
702 static bool
703 onfaulted(struct trapframe *tf, register_t rv)
704 {
705 struct lwp * const l = curlwp;
706 struct pcb * const pcb = lwp_getpcb(l);
707 struct faultbuf * const fb = pcb->pcb_onfault;
708 if (fb == NULL)
709 return false;
710 tf->tf_srr0 = fb->fb_pc;
711 tf->tf_srr1 = fb->fb_msr;
712 tf->tf_cr = fb->fb_cr;
713 tf->tf_fixreg[1] = fb->fb_sp;
714 tf->tf_fixreg[2] = fb->fb_r2;
715 tf->tf_fixreg[3] = rv;
716 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, sizeof(fb->fb_fixreg));
717 return true;
718 }
719
720 void
721 trap(enum ppc_booke_exceptions trap_code, struct trapframe *tf)
722 {
723 const bool usertrap = usertrap_p(tf);
724 struct cpu_info * const ci = curcpu();
725 struct lwp * const l = curlwp;
726 struct proc * const p = l->l_proc;
727 ksiginfo_t ksi;
728 int rv = EACCES;
729
730 ci->ci_ev_traps.ev_count++;
731 ci->ci_data.cpu_ntrap++;
732
733 KASSERTMSG(!usertrap || tf == trapframe(l),
734 "trap: tf=%p is invalid: trapframe(%p)=%p", tf, l, trapframe(l));
735
736 #if 0
737 if (trap_code != T_PROGRAM || usertrap)
738 printf("trap(enter): %s (tf=%p, esr/dear=%#x/%#lx, srr0/1=%#lx/%#lx, lr=%#lx)\n",
739 trap_names[trap_code], tf, tf->tf_esr, tf->tf_dear,
740 tf->tf_srr0, tf->tf_srr1, tf->tf_lr);
741 #endif
742 #if 0
743 if ((register_t)tf >= (register_t)l->l_addr + USPACE
744 || (register_t)tf < (register_t)l->l_addr + PAGE_SIZE) {
745 printf("%s(entry): pid %d.%d (%s): invalid tf addr %p\n",
746 __func__, p->p_pid, l->l_lid, p->p_comm, tf);
747 dump_trapframe(tf, NULL);
748 console_debugger();
749 }
750 #endif
751 #if 0
752 if ((mfmsr() & PSL_CE) == 0) {
753 printf("%s(entry): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
754 __func__, p->p_pid, l->l_lid, p->p_comm,
755 trap_names[trap_code], mfmsr());
756 dump_trapframe(tf, NULL);
757 }
758 #endif
759
760 if ((VM_MAX_ADDRESS & 0x80000000) == 0
761 && usertrap && (tf->tf_fixreg[1] & 0x80000000)) {
762 printf("%s(entry): pid %d.%d (%s): %s invalid sp %#lx "
763 "(sprg1=%#jx)\n", __func__, p->p_pid, l->l_lid, p->p_comm,
764 trap_names[trap_code], tf->tf_fixreg[1],
765 (uintmax_t)mfspr(SPR_SPRG1));
766 dump_trapframe(tf, NULL);
767 console_debugger();
768 }
769
770 if (usertrap && (tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
771 printf("%s(entry): pid %d.%d (%s): %s invalid PSL %#lx\n",
772 __func__, p->p_pid, l->l_lid, p->p_comm,
773 trap_names[trap_code], tf->tf_srr1);
774 dump_trapframe(tf, NULL);
775 console_debugger();
776 }
777
778 switch (trap_code) {
779 case T_CRITIAL_INPUT:
780 case T_EXTERNAL_INPUT:
781 case T_DEBUG:
782 case T_DECREMENTER:
783 case T_FIXED_INTERVAL:
784 case T_WATCHDOG:
785 case T_SYSTEM_CALL:
786 default:
787 panic("trap: unexcepted trap code %d! (tf=%p, srr0/1=%#lx/%#lx)",
788 trap_code, tf, tf->tf_srr0, tf->tf_srr1);
789 case T_MACHINE_CHECK:
790 rv = mchk_exception(tf, &ksi);
791 break;
792 case T_DSI:
793 rv = dsi_exception(tf, &ksi);
794 break;
795 case T_ISI:
796 rv = isi_exception(tf, &ksi);
797 break;
798 case T_ALIGNMENT:
799 rv = ali_exception(tf, &ksi);
800 break;
801 case T_SPE_UNAVAILABLE:
802 rv = spe_exception(tf, &ksi);
803 break;
804 case T_PROGRAM:
805 #ifdef DDB
806 if (!usertrap && ddb_exception(tf))
807 return;
808 #endif
809 rv = pgm_exception(tf, &ksi);
810 break;
811 case T_FP_UNAVAILABLE:
812 case T_AP_UNAVAILABLE:
813 panic("trap: unexcepted trap code %d! (tf=%p, srr0/1=%#lx/%#lx)",
814 trap_code, tf, tf->tf_srr0, tf->tf_srr1);
815 case T_DATA_TLB_ERROR:
816 rv = dtlb_exception(tf, &ksi);
817 break;
818 case T_INSTRUCTION_TLB_ERROR:
819 rv = itlb_exception(tf, &ksi);
820 break;
821 #if 0
822 case T_DEBUG:
823 #ifdef DDB
824 if (!usertrap && ddb_exception(tf))
825 return;
826 #endif
827 rv = debug_exception(tf, &ksi);
828 break;
829 #endif
830 case T_EMBEDDED_FP_DATA:
831 rv = embedded_fp_data_exception(tf, &ksi);
832 break;
833 case T_EMBEDDED_FP_ROUND:
834 rv = embedded_fp_round_exception(tf, &ksi);
835 break;
836 case T_EMBEDDED_PERF_MONITOR:
837 #ifdef DDB
838 //db_stack_trace_print(tf->tf_fixreg[1], true, 40, "", printf);
839 #endif
840 dump_trapframe(tf, NULL);
841 rv = EPERM;
842 break;
843 case T_AST:
844 KASSERT(usertrap);
845 cpu_ast(l, ci);
846 if ((VM_MAX_ADDRESS & 0x80000000) == 0
847 && (tf->tf_fixreg[1] & 0x80000000)) {
848 printf("%s(ast-exit): pid %d.%d (%s): invalid sp %#lx\n",
849 __func__, p->p_pid, l->l_lid, p->p_comm,
850 tf->tf_fixreg[1]);
851 dump_trapframe(tf, NULL);
852 console_debugger();
853 }
854 if ((tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
855 printf("%s(entry): pid %d.%d (%s): %s invalid PSL %#lx\n",
856 __func__, p->p_pid, l->l_lid, p->p_comm,
857 trap_names[trap_code], tf->tf_srr1);
858 dump_trapframe(tf, NULL);
859 console_debugger();
860 }
861 #if 0
862 if ((mfmsr() & PSL_CE) == 0) {
863 printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
864 __func__, p->p_pid, l->l_lid, p->p_comm,
865 trap_names[trap_code], mfmsr());
866 dump_trapframe(tf, NULL);
867 }
868 #endif
869 userret(l, tf);
870 return;
871 }
872 if (!usertrap) {
873 if (rv != 0) {
874 if (!onfaulted(tf, rv)) {
875 #ifdef DDB
876 db_stack_trace_print(tf->tf_fixreg[1], true, 40, "", printf);
877 #endif
878 dump_trapframe(tf, NULL);
879 panic("%s: pid %d.%d (%s): %s exception in kernel mode"
880 " (tf=%p, dear=%#lx, esr=%#x,"
881 " srr0/1=%#lx/%#lx)",
882 __func__, p->p_pid, l->l_lid, p->p_comm,
883 trap_names[trap_code], tf, tf->tf_dear,
884 tf->tf_esr, tf->tf_srr0, tf->tf_srr1);
885 }
886 }
887 #if 0
888 if (tf->tf_fixreg[1] >= (register_t)l->l_addr + USPACE
889 || tf->tf_fixreg[1] < (register_t)l->l_addr + PAGE_SIZE) {
890 printf("%s(exit): pid %d.%d (%s): invalid kern sp %#lx\n",
891 __func__, p->p_pid, l->l_lid, p->p_comm,
892 tf->tf_fixreg[1]);
893 dump_trapframe(tf, NULL);
894 Debugger();
895 }
896 #endif
897 #if 0
898 if ((mfmsr() & PSL_CE) == 0) {
899 printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
900 __func__, p->p_pid, l->l_lid, p->p_comm,
901 trap_names[trap_code], mfmsr());
902 mtmsr(mfmsr()|PSL_CE);
903 dump_trapframe(tf, NULL);
904 }
905 #endif
906 } else {
907 if (rv == ENOMEM) {
908 printf("UVM: pid %d.%d (%s), uid %d killed: "
909 "out of swap\n",
910 p->p_pid, l->l_lid, p->p_comm,
911 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1);
912 ksi.ksi_signo = SIGKILL;
913 ksi.ksi_code = 0;
914 }
915 if (rv != 0) {
916 /*
917 * Only print a fatal trap if the signal will be
918 * uncaught.
919 */
920 if (cpu_printfataltraps
921 && (p->p_slflag & PSL_TRACED) == 0
922 && !sigismember(&p->p_sigctx.ps_sigcatch,
923 ksi.ksi_signo)) {
924 printf("%s: pid %d.%d (%s):"
925 " %s exception in user mode\n",
926 __func__, p->p_pid, l->l_lid, p->p_comm,
927 trap_names[trap_code]);
928 if (cpu_printfataltraps > 1)
929 dump_trapframe(tf, NULL);
930 }
931 (*p->p_emul->e_trapsignal)(l, &ksi);
932 }
933 #ifdef DEBUG
934 if ((tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
935 printf("%s(exit): pid %d.%d (%s): %s invalid PSL %#lx\n",
936 __func__, p->p_pid, l->l_lid, p->p_comm,
937 trap_names[trap_code], tf->tf_srr1);
938 dump_trapframe(tf, NULL);
939 console_debugger();
940 }
941 #endif
942 #if 0
943 if ((mfmsr() & PSL_CE) == 0) {
944 printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
945 __func__, p->p_pid, l->l_lid, p->p_comm,
946 trap_names[trap_code], mfmsr());
947 dump_trapframe(tf, NULL);
948 }
949 #endif
950 userret(l, tf);
951 }
952 }
953