trap.c revision 1.28 1 /* $NetBSD: trap.c,v 1.28 2020/07/06 09:34:16 rin Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.28 2020/07/06 09:34:16 rin Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_ddb.h"
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/siginfo.h>
47 #include <sys/lwp.h>
48 #include <sys/proc.h>
49 #include <sys/cpu.h>
50 #include <sys/kauth.h>
51 #include <sys/ras.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #include <powerpc/pcb.h>
56 #include <powerpc/userret.h>
57 #include <powerpc/psl.h>
58 #include <powerpc/instr.h>
59 #include <powerpc/altivec.h> /* use same interface for SPE */
60
61 #include <powerpc/spr.h>
62 #include <powerpc/booke/spr.h>
63 #include <powerpc/booke/cpuvar.h>
64
65 #include <powerpc/fpu/fpu_extern.h>
66
67 #include <powerpc/db_machdep.h>
68 #include <ddb/db_interface.h>
69
70 #include <powerpc/trap.h>
71 #include <powerpc/booke/trap.h>
72 #include <powerpc/booke/pte.h>
73
74 void trap(enum ppc_booke_exceptions, struct trapframe *);
75
76 static const char trap_names[][8] = {
77 [T_CRITIAL_INPUT] = "CRIT",
78 [T_EXTERNAL_INPUT] = "EXT",
79 [T_DECREMENTER] = "DECR",
80 [T_FIXED_INTERVAL] = "FIT",
81 [T_WATCHDOG] = "WDOG",
82 [T_SYSTEM_CALL] = "SC",
83 [T_MACHINE_CHECK] = "MCHK",
84 [T_DSI] = "DSI",
85 [T_ISI] = "ISI",
86 [T_ALIGNMENT] = "ALN",
87 [T_PROGRAM] = "PGM",
88 [T_FP_UNAVAILABLE] = "FP",
89 [T_AP_UNAVAILABLE] = "AP",
90 [T_DATA_TLB_ERROR] = "DTLB",
91 [T_INSTRUCTION_TLB_ERROR] = "ITLB",
92 [T_DEBUG] = "DEBUG",
93 [T_SPE_UNAVAILABLE] = "SPE",
94 [T_EMBEDDED_FP_DATA] = "FPDATA",
95 [T_EMBEDDED_FP_ROUND] = "FPROUND",
96 [T_EMBEDDED_PERF_MONITOR] = "PERFMON",
97 [T_AST] = "AST",
98 };
99
100 static inline bool
101 usertrap_p(struct trapframe *tf)
102 {
103 return (tf->tf_srr1 & PSL_PR) != 0;
104 }
105
106 static int
107 mchk_exception(struct trapframe *tf, ksiginfo_t *ksi)
108 {
109 const bool usertrap = usertrap_p(tf);
110 const vaddr_t faultva = tf->tf_mcar;
111 struct cpu_info * const ci = curcpu();
112 int rv = EFAULT;
113
114 if (usertrap)
115 ci->ci_ev_umchk.ev_count++;
116
117 if (rv != 0 && usertrap) {
118 KSI_INIT_TRAP(ksi);
119 ksi->ksi_signo = SIGSEGV;
120 ksi->ksi_trap = EXC_DSI;
121 ksi->ksi_code = SEGV_ACCERR;
122 ksi->ksi_addr = (void *)faultva;
123 }
124
125 return rv;
126 }
127
128 static inline vm_prot_t
129 get_faulttype(const struct trapframe * const tf)
130 {
131 return VM_PROT_READ | (tf->tf_esr & ESR_ST ? VM_PROT_WRITE : 0);
132 }
133
134 static inline struct vm_map *
135 get_faultmap(const struct trapframe * const tf, register_t psl_mask)
136 {
137 return (tf->tf_srr1 & psl_mask)
138 ? &curlwp->l_proc->p_vmspace->vm_map
139 : kernel_map;
140 }
141
142 /*
143 * We could use pmap_pte_lookup but this slightly faster since we already
144 * the segtab pointers in cpu_info.
145 */
146 static inline pt_entry_t *
147 trap_pte_lookup(struct trapframe *tf, vaddr_t va, register_t psl_mask)
148 {
149 pmap_segtab_t ** const stps = &curcpu()->ci_pmap_kern_segtab;
150 pmap_segtab_t * const stp = stps[(tf->tf_srr1 / psl_mask) & 1];
151 if (__predict_false(stp == NULL))
152 return NULL;
153 pt_entry_t * const ptep = stp->seg_tab[va >> SEGSHIFT];
154 if (__predict_false(ptep == NULL))
155 return NULL;
156 return ptep + ((va & SEGOFSET) >> PAGE_SHIFT);
157 }
158
159 static int
160 pagefault(struct vm_map *map, vaddr_t va, vm_prot_t ftype, bool usertrap)
161 {
162 struct lwp * const l = curlwp;
163 int rv;
164
165 // printf("%s(%p,%#lx,%u,%u)\n", __func__, map, va, ftype, usertrap);
166
167 if (usertrap) {
168 rv = uvm_fault(map, trunc_page(va), ftype);
169 if (rv == 0)
170 uvm_grow(l->l_proc, trunc_page(va));
171 if (rv == EACCES)
172 rv = EFAULT;
173 } else {
174 if (cpu_intr_p())
175 return EFAULT;
176
177 struct pcb * const pcb = lwp_getpcb(l);
178 struct faultbuf * const fb = pcb->pcb_onfault;
179 pcb->pcb_onfault = NULL;
180 rv = uvm_fault(map, trunc_page(va), ftype);
181 pcb->pcb_onfault = fb;
182 if (map != kernel_map) {
183 if (rv == 0)
184 uvm_grow(l->l_proc, trunc_page(va));
185 }
186 if (rv == EACCES)
187 rv = EFAULT;
188 }
189 return rv;
190 }
191
192 static int
193 dsi_exception(struct trapframe *tf, ksiginfo_t *ksi)
194 {
195 const vaddr_t faultva = tf->tf_dear;
196 const vm_prot_t ftype = get_faulttype(tf);
197 struct vm_map * const faultmap = get_faultmap(tf, PSL_DS);
198 const bool usertrap = usertrap_p(tf);
199
200 kpreempt_disable();
201 struct cpu_info * const ci = curcpu();
202
203 if (usertrap)
204 ci->ci_ev_udsi.ev_count++;
205 else
206 ci->ci_ev_kdsi.ev_count++;
207
208 /*
209 * If we had a TLB entry (which we must have had to get this exception),
210 * we certainly have a PTE.
211 */
212 pt_entry_t * const ptep = trap_pte_lookup(tf, trunc_page(faultva),
213 PSL_DS);
214 KASSERT(ptep != NULL);
215 pt_entry_t pte = *ptep;
216
217 if ((ftype & VM_PROT_WRITE)
218 && ((pte & (PTE_xW|PTE_UNMODIFIED)) == (PTE_xW|PTE_UNMODIFIED))) {
219 const paddr_t pa = pte_to_paddr(pte);
220 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
221 KASSERT(pg);
222 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
223
224 if (!VM_PAGEMD_MODIFIED_P(mdpg)) {
225 pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED);
226 }
227 pte &= ~PTE_UNMODIFIED;
228 *ptep = pte;
229 pmap_tlb_update_addr(faultmap->pmap, trunc_page(faultva),
230 pte, 0);
231 kpreempt_enable();
232 return 0;
233 }
234 kpreempt_enable();
235
236 int rv = pagefault(faultmap, faultva, ftype, usertrap);
237
238 /*
239 * We can't get a MAPERR here since that's a different exception.
240 */
241 if (__predict_false(rv != 0 && usertrap)) {
242 ci->ci_ev_udsi_fatal.ev_count++;
243 KSI_INIT_TRAP(ksi);
244 ksi->ksi_signo = SIGSEGV;
245 ksi->ksi_trap = EXC_DSI;
246 ksi->ksi_code = SEGV_ACCERR;
247 ksi->ksi_addr = (void *)faultva;
248 }
249 return rv;
250 }
251
252 static int
253 isi_exception(struct trapframe *tf, ksiginfo_t *ksi)
254 {
255 const vaddr_t faultva = trunc_page(tf->tf_srr0);
256 struct vm_map * const faultmap = get_faultmap(tf, PSL_IS);
257 const bool usertrap = usertrap_p(tf);
258
259 kpreempt_disable();
260 struct cpu_info * const ci = curcpu();
261
262 if (usertrap)
263 ci->ci_ev_isi.ev_count++;
264 else
265 ci->ci_ev_kisi.ev_count++;
266
267 /*
268 * If we had a TLB entry (which we must have had to get this exception),
269 * we certainly have a PTE.
270 */
271 pt_entry_t * const ptep = trap_pte_lookup(tf, trunc_page(faultva),
272 PSL_IS);
273 if (ptep == NULL)
274 dump_trapframe(tf, NULL);
275 KASSERT(ptep != NULL);
276 pt_entry_t pte = *ptep;
277
278 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmapexechist);
279
280 if ((pte & PTE_UNSYNCED) == PTE_UNSYNCED) {
281 const paddr_t pa = pte_to_paddr(pte);
282 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
283 KASSERT(pg);
284 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
285
286 UVMHIST_LOG(pmapexechist,
287 "srr0=%#x pg=%p (pa %#"PRIxPADDR"): %s",
288 tf->tf_srr0, pg, pa,
289 (VM_PAGEMD_EXECPAGE_P(mdpg)
290 ? "no syncicache (already execpage)"
291 : "performed syncicache (now execpage)"));
292
293 if (!VM_PAGEMD_EXECPAGE_P(mdpg)) {
294 ci->ci_softc->cpu_ev_exec_trap_sync.ev_count++;
295 dcache_wb_page(pa);
296 icache_inv_page(pa);
297 pmap_page_set_attributes(mdpg, VM_PAGEMD_EXECPAGE);
298 }
299 pte &= ~PTE_UNSYNCED;
300 pte |= PTE_xX;
301 *ptep = pte;
302
303 pmap_tlb_update_addr(faultmap->pmap, trunc_page(faultva),
304 pte, 0);
305 kpreempt_enable();
306 UVMHIST_LOG(pmapexechist, "<- 0", 0,0,0,0);
307 return 0;
308 }
309 kpreempt_enable();
310
311 int rv = pagefault(faultmap, faultva, VM_PROT_READ|VM_PROT_EXECUTE,
312 usertrap);
313
314 if (__predict_false(rv != 0 && usertrap)) {
315 /*
316 * We can't get a MAPERR here since
317 * that's a different exception.
318 */
319 ci->ci_ev_isi_fatal.ev_count++;
320 KSI_INIT_TRAP(ksi);
321 ksi->ksi_signo = SIGSEGV;
322 ksi->ksi_trap = EXC_ISI;
323 ksi->ksi_code = SEGV_ACCERR;
324 ksi->ksi_addr = (void *)tf->tf_srr0; /* not truncated */
325 }
326 UVMHIST_LOG(pmapexechist, "<- %d", rv, 0,0,0);
327 return rv;
328 }
329
330 static int
331 dtlb_exception(struct trapframe *tf, ksiginfo_t *ksi)
332 {
333 const vaddr_t faultva = tf->tf_dear;
334 const vm_prot_t ftype = get_faulttype(tf);
335 struct vm_map * const faultmap = get_faultmap(tf, PSL_DS);
336 struct cpu_info * const ci = curcpu();
337 const bool usertrap = usertrap_p(tf);
338
339 #if 0
340 /*
341 * This is what pte_load in trap_subr.S does for us.
342 */
343 const pt_entry_t * const ptep =
344 trap_pte_lookup(tf, trunc_page(faultva), PSL_DS);
345 if (ptep != NULL && !usertrap && pte_valid_p(*ptep)) {
346 tlb_update_addr(trunc_page(faultva), KERNEL_PID, *ptep, true);
347 ci->ci_ev_tlbmiss_soft.ev_count++;
348 return 0;
349 }
350 #endif
351
352 ci->ci_ev_dtlbmiss_hard.ev_count++;
353
354 // printf("pagefault(%p,%#lx,%u,%u)", faultmap, faultva, ftype, usertrap);
355 int rv = pagefault(faultmap, faultva, ftype, usertrap);
356 // printf(": %d\n", rv);
357
358 if (__predict_false(rv != 0 && usertrap)) {
359 ci->ci_ev_udsi_fatal.ev_count++;
360 KSI_INIT_TRAP(ksi);
361 ksi->ksi_signo = SIGSEGV;
362 ksi->ksi_trap = EXC_DSI;
363 ksi->ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
364 ksi->ksi_addr = (void *)faultva;
365 }
366 return rv;
367 }
368
369 static int
370 itlb_exception(struct trapframe *tf, ksiginfo_t *ksi)
371 {
372 struct vm_map * const faultmap = get_faultmap(tf, PSL_IS);
373 const vaddr_t faultva = tf->tf_srr0;
374 struct cpu_info * const ci = curcpu();
375 const bool usertrap = usertrap_p(tf);
376
377 ci->ci_ev_itlbmiss_hard.ev_count++;
378
379 int rv = pagefault(faultmap, faultva, VM_PROT_READ|VM_PROT_EXECUTE,
380 usertrap);
381
382 if (__predict_false(rv != 0 && usertrap)) {
383 ci->ci_ev_isi_fatal.ev_count++;
384 KSI_INIT_TRAP(ksi);
385 ksi->ksi_signo = SIGSEGV;
386 ksi->ksi_trap = EXC_ISI;
387 ksi->ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
388 ksi->ksi_addr = (void *)tf->tf_srr0;
389 }
390 return rv;
391 }
392
393 static int
394 spe_exception(struct trapframe *tf, ksiginfo_t *ksi)
395 {
396 struct cpu_info * const ci = curcpu();
397
398 if (!usertrap_p(tf))
399 return EPERM;
400
401 ci->ci_ev_vec.ev_count++;
402
403 #ifdef PPC_HAVE_SPE
404 vec_load();
405 return 0;
406 #else
407 KSI_INIT_TRAP(ksi);
408 ksi->ksi_signo = SIGILL;
409 ksi->ksi_trap = EXC_PGM;
410 ksi->ksi_code = ILL_ILLOPC;
411 ksi->ksi_addr = (void *)tf->tf_srr0;
412 return EPERM;
413 #endif
414 }
415
416 static bool
417 emulate_opcode(struct trapframe *tf, ksiginfo_t *ksi)
418 {
419 uint32_t opcode;
420 if (copyin((void *)tf->tf_srr0, &opcode, sizeof(opcode)) != 0)
421 return false;
422
423 if (opcode == OPC_LWSYNC)
424 return true;
425
426 if (OPC_MFSPR_P(opcode, SPR_PVR)) {
427 __asm ("mfpvr %0" : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)]));
428 return true;
429 }
430
431 if (OPC_MFSPR_P(opcode, SPR_PIR)) {
432 __asm ("mfspr %0, %1"
433 : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)])
434 : "n"(SPR_PIR));
435 return true;
436 }
437
438 if (OPC_MFSPR_P(opcode, SPR_SVR)) {
439 __asm ("mfspr %0,%1"
440 : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)])
441 : "n"(SPR_SVR));
442 return true;
443 }
444
445 /*
446 * If we bothered to emulate FP, we would try to do so here.
447 */
448 return false;
449 }
450
451 static int
452 pgm_exception(struct trapframe *tf, ksiginfo_t *ksi)
453 {
454 struct cpu_info * const ci = curcpu();
455 int rv = EPERM;
456
457 if (!usertrap_p(tf))
458 return rv;
459
460 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmapexechist);
461
462 UVMHIST_LOG(pmapexechist, " srr0/1=%#x/%#x esr=%#x pte=%#x",
463 tf->tf_srr0, tf->tf_srr1, tf->tf_esr,
464 *trap_pte_lookup(tf, trunc_page(tf->tf_srr0), PSL_IS));
465
466 ci->ci_ev_pgm.ev_count++;
467
468 if (tf->tf_esr & ESR_PTR) {
469 struct proc *p = curlwp->l_proc;
470 if (p->p_raslist != NULL
471 && ras_lookup(p, (void *)tf->tf_srr0) != (void *) -1) {
472 tf->tf_srr0 += 4;
473 return 0;
474 }
475 }
476
477 if (tf->tf_esr & (ESR_PIL|ESR_PPR)) {
478 if (emulate_opcode(tf, ksi)) {
479 tf->tf_srr0 += 4;
480 return 0;
481 }
482 }
483
484 if (tf->tf_esr & ESR_PIL) {
485 struct pcb * const pcb = lwp_getpcb(curlwp);
486 if (__predict_false(!fpu_used_p(curlwp))) {
487 memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu));
488 fpu_mark_used(curlwp);
489 }
490 if (fpu_emulate(tf, &pcb->pcb_fpu, ksi)) {
491 if (ksi->ksi_signo == 0) {
492 ci->ci_ev_fpu.ev_count++;
493 return 0;
494 }
495 return EFAULT;
496 }
497 }
498
499 KSI_INIT_TRAP(ksi);
500 ksi->ksi_signo = SIGILL;
501 ksi->ksi_trap = EXC_PGM;
502 if (tf->tf_esr & ESR_PIL) {
503 ksi->ksi_code = ILL_ILLOPC;
504 } else if (tf->tf_esr & ESR_PPR) {
505 ksi->ksi_code = ILL_PRVOPC;
506 } else if (tf->tf_esr & ESR_PTR) {
507 ksi->ksi_signo = SIGTRAP;
508 ksi->ksi_code = TRAP_BRKPT;
509 } else {
510 ksi->ksi_code = 0;
511 }
512 ksi->ksi_addr = (void *)tf->tf_srr0;
513 return rv;
514 }
515
516 static int
517 debug_exception(struct trapframe *tf, ksiginfo_t *ksi)
518 {
519 struct cpu_info * const ci = curcpu();
520 int rv = EPERM;
521
522 if (!usertrap_p(tf))
523 return rv;
524
525 ci->ci_ev_debug.ev_count++;
526
527 /*
528 * Ack the interrupt.
529 */
530 mtspr(SPR_DBSR, tf->tf_esr);
531 KASSERT(tf->tf_esr & (DBSR_IAC1|DBSR_IAC2|DBSR_BRT));
532 KASSERT((tf->tf_srr1 & PSL_SE) == 0);
533
534 /*
535 * Disable debug events
536 */
537 mtspr(SPR_DBCR1, 0);
538 mtspr(SPR_DBCR0, 0);
539
540 /*
541 * Tell the debugger ...
542 */
543 KSI_INIT_TRAP(ksi);
544 ksi->ksi_signo = SIGTRAP;
545 ksi->ksi_trap = EXC_TRC;
546 ksi->ksi_addr = (void *)tf->tf_srr0;
547 ksi->ksi_code = TRAP_TRACE;
548 return rv;
549 }
550
551 static int
552 ali_exception(struct trapframe *tf, ksiginfo_t *ksi)
553 {
554 struct cpu_info * const ci = curcpu();
555 int rv = EFAULT;
556
557 ci->ci_ev_ali.ev_count++;
558
559 if (rv != 0 && usertrap_p(tf)) {
560 ci->ci_ev_ali_fatal.ev_count++;
561 KSI_INIT_TRAP(ksi);
562 ksi->ksi_signo = SIGILL;
563 ksi->ksi_trap = EXC_PGM;
564 if (tf->tf_esr & ESR_PIL)
565 ksi->ksi_code = ILL_ILLOPC;
566 else if (tf->tf_esr & ESR_PPR)
567 ksi->ksi_code = ILL_PRVOPC;
568 else if (tf->tf_esr & ESR_PTR)
569 ksi->ksi_code = ILL_ILLTRP;
570 else
571 ksi->ksi_code = 0;
572 ksi->ksi_addr = (void *)tf->tf_srr0;
573 }
574 return rv;
575 }
576
577 static int
578 embedded_fp_data_exception(struct trapframe *tf, ksiginfo_t *ksi)
579 {
580 struct cpu_info * const ci = curcpu();
581 int rv = EFAULT;
582
583 ci->ci_ev_fpu.ev_count++;
584
585 if (rv != 0 && usertrap_p(tf)) {
586 KSI_INIT_TRAP(ksi);
587 #ifdef PPC_HAVE_SPE
588 ksi->ksi_signo = SIGFPE;
589 ksi->ksi_trap = tf->tf_exc;
590 ksi->ksi_code = vec_siginfo_code(tf);
591 #else
592 ksi->ksi_signo = SIGILL;
593 ksi->ksi_trap = EXC_PGM;
594 ksi->ksi_code = ILL_ILLOPC;
595 #endif
596 ksi->ksi_addr = (void *)tf->tf_srr0;
597 }
598 return rv;
599 }
600
601 static int
602 embedded_fp_round_exception(struct trapframe *tf, ksiginfo_t *ksi)
603 {
604 struct cpu_info * const ci = curcpu();
605 int rv = EDOM;
606
607 ci->ci_ev_fpu.ev_count++;
608
609 if (rv != 0 && usertrap_p(tf)) {
610 KSI_INIT_TRAP(ksi);
611 #ifdef PPC_HAVE_SPE
612 ksi->ksi_signo = SIGFPE;
613 ksi->ksi_trap = tf->tf_exc;
614 ksi->ksi_code = vec_siginfo_code(tf);
615 #else
616 ksi->ksi_signo = SIGILL;
617 ksi->ksi_trap = EXC_PGM;
618 ksi->ksi_code = ILL_ILLOPC;
619 #endif
620 ksi->ksi_addr = (void *)tf->tf_srr0;
621 }
622 return rv;
623 }
624
625 void
626 dump_trapframe(const struct trapframe *tf, void (*pr)(const char *, ...))
627 {
628 if (pr == NULL)
629 pr = printf;
630 (*pr)("trapframe %p (exc=%x srr0/1=%#lx/%#lx esr/dear=%#x/%#lx)\n",
631 tf, tf->tf_exc, tf->tf_srr0, tf->tf_srr1, tf->tf_esr, tf->tf_dear);
632 (*pr)("lr =%08lx ctr=%08lx cr =%08x xer=%08x\n",
633 tf->tf_lr, tf->tf_ctr, tf->tf_cr, tf->tf_xer);
634 for (u_int r = 0; r < 32; r += 4) {
635 (*pr)("r%02u=%08lx r%02u=%08lx r%02u=%08lx r%02u=%08lx\n",
636 r+0, tf->tf_fixreg[r+0], r+1, tf->tf_fixreg[r+1],
637 r+2, tf->tf_fixreg[r+2], r+3, tf->tf_fixreg[r+3]);
638 }
639 }
640
641 static bool
642 ddb_exception(struct trapframe *tf)
643 {
644 #if 0
645 const register_t ddb_trapfunc = (uintptr_t) cpu_Debugger;
646 if ((tf->tf_esr & ESR_PTR) == 0)
647 return false;
648 if (ddb_trapfunc <= tf->tf_srr0 && tf->tf_srr0 <= ddb_trapfunc+16) {
649 register_t srr0 = tf->tf_srr0;
650 if (kdb_trap(tf->tf_exc, tf)) {
651 if (srr0 == tf->tf_srr0)
652 tf->tf_srr0 += 4;
653 return true;
654 }
655 }
656 return false;
657 #else
658 #if 0
659 struct cpu_info * const ci = curcpu();
660 struct cpu_softc * const cpu = ci->ci_softc;
661 printf("CPL stack:");
662 if (ci->ci_idepth >= 0) {
663 for (u_int i = 0; i <= ci->ci_idepth; i++) {
664 printf(" [%u]=%u", i, cpu->cpu_pcpls[i]);
665 }
666 }
667 printf(" %u\n", ci->ci_cpl);
668 dump_trapframe(tf, NULL);
669 #endif
670 if (kdb_trap(tf->tf_exc, tf)) {
671 tf->tf_srr0 += 4;
672 return true;
673 }
674 return false;
675 #endif
676 }
677
678 static bool
679 onfaulted(struct trapframe *tf, register_t rv)
680 {
681 struct lwp * const l = curlwp;
682 struct pcb * const pcb = lwp_getpcb(l);
683 struct faultbuf * const fb = pcb->pcb_onfault;
684 if (fb == NULL)
685 return false;
686 tf->tf_srr0 = fb->fb_pc;
687 tf->tf_srr1 = fb->fb_msr;
688 tf->tf_cr = fb->fb_cr;
689 tf->tf_fixreg[1] = fb->fb_sp;
690 tf->tf_fixreg[2] = fb->fb_r2;
691 tf->tf_fixreg[3] = rv;
692 pcb->pcb_onfault = NULL;
693 return true;
694 }
695
696 void
697 trap(enum ppc_booke_exceptions trap_code, struct trapframe *tf)
698 {
699 const bool usertrap = usertrap_p(tf);
700 struct cpu_info * const ci = curcpu();
701 struct lwp * const l = curlwp;
702 struct proc * const p = l->l_proc;
703 ksiginfo_t ksi;
704 int rv = EACCES;
705
706 ci->ci_ev_traps.ev_count++;
707 ci->ci_data.cpu_ntrap++;
708
709 KASSERTMSG(!usertrap || tf == trapframe(l),
710 "trap: tf=%p is invalid: trapframe(%p)=%p", tf, l, trapframe(l));
711
712 #if 0
713 if (trap_code != T_PROGRAM || usertrap)
714 printf("trap(enter): %s (tf=%p, esr/dear=%#x/%#lx, srr0/1=%#lx/%#lx, lr=%#lx)\n",
715 trap_names[trap_code], tf, tf->tf_esr, tf->tf_dear,
716 tf->tf_srr0, tf->tf_srr1, tf->tf_lr);
717 #endif
718 #if 0
719 if ((register_t)tf >= (register_t)l->l_addr + USPACE
720 || (register_t)tf < (register_t)l->l_addr + PAGE_SIZE) {
721 printf("%s(entry): pid %d.%d (%s): invalid tf addr %p\n",
722 __func__, p->p_pid, l->l_lid, p->p_comm, tf);
723 dump_trapframe(tf, NULL);
724 Debugger();
725 }
726 #endif
727 #if 0
728 if ((mfmsr() & PSL_CE) == 0) {
729 printf("%s(entry): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
730 __func__, p->p_pid, l->l_lid, p->p_comm,
731 trap_names[trap_code], mfmsr());
732 dump_trapframe(tf, NULL);
733 }
734 #endif
735
736 if ((VM_MAX_ADDRESS & 0x80000000) == 0
737 && usertrap && (tf->tf_fixreg[1] & 0x80000000)) {
738 printf("%s(entry): pid %d.%d (%s): %s invalid sp %#lx "
739 "(sprg1=%#jx)\n", __func__, p->p_pid, l->l_lid, p->p_comm,
740 trap_names[trap_code], tf->tf_fixreg[1],
741 (uintmax_t)mfspr(SPR_SPRG1));
742 dump_trapframe(tf, NULL);
743 Debugger();
744 }
745
746 if (usertrap && (tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
747 printf("%s(entry): pid %d.%d (%s): %s invalid PSL %#lx\n",
748 __func__, p->p_pid, l->l_lid, p->p_comm,
749 trap_names[trap_code], tf->tf_srr1);
750 dump_trapframe(tf, NULL);
751 Debugger();
752 }
753
754 switch (trap_code) {
755 case T_CRITIAL_INPUT:
756 case T_EXTERNAL_INPUT:
757 case T_DECREMENTER:
758 case T_FIXED_INTERVAL:
759 case T_WATCHDOG:
760 case T_SYSTEM_CALL:
761 default:
762 panic("trap: unexcepted trap code %d! (tf=%p, srr0/1=%#lx/%#lx)",
763 trap_code, tf, tf->tf_srr0, tf->tf_srr1);
764 case T_MACHINE_CHECK:
765 rv = mchk_exception(tf, &ksi);
766 break;
767 case T_DSI:
768 rv = dsi_exception(tf, &ksi);
769 break;
770 case T_ISI:
771 rv = isi_exception(tf, &ksi);
772 break;
773 case T_ALIGNMENT:
774 rv = ali_exception(tf, &ksi);
775 break;
776 case T_SPE_UNAVAILABLE:
777 rv = spe_exception(tf, &ksi);
778 break;
779 case T_PROGRAM:
780 #ifdef DDB
781 if (!usertrap && ddb_exception(tf))
782 return;
783 #endif
784 rv = pgm_exception(tf, &ksi);
785 break;
786 case T_FP_UNAVAILABLE:
787 case T_AP_UNAVAILABLE:
788 panic("trap: unexcepted trap code %d! (tf=%p, srr0/1=%#lx/%#lx)",
789 trap_code, tf, tf->tf_srr0, tf->tf_srr1);
790 case T_DATA_TLB_ERROR:
791 rv = dtlb_exception(tf, &ksi);
792 break;
793 case T_INSTRUCTION_TLB_ERROR:
794 rv = itlb_exception(tf, &ksi);
795 break;
796 case T_DEBUG:
797 #ifdef DDB
798 if (!usertrap && ddb_exception(tf))
799 return;
800 #endif
801 rv = debug_exception(tf, &ksi);
802 break;
803 case T_EMBEDDED_FP_DATA:
804 rv = embedded_fp_data_exception(tf, &ksi);
805 break;
806 case T_EMBEDDED_FP_ROUND:
807 rv = embedded_fp_round_exception(tf, &ksi);
808 break;
809 case T_EMBEDDED_PERF_MONITOR:
810 //db_stack_trace_print(tf->tf_fixreg[1], true, 40, "", printf);
811 dump_trapframe(tf, NULL);
812 rv = EPERM;
813 break;
814 case T_AST:
815 KASSERT(usertrap);
816 cpu_ast(l, ci);
817 if ((VM_MAX_ADDRESS & 0x80000000) == 0
818 && (tf->tf_fixreg[1] & 0x80000000)) {
819 printf("%s(ast-exit): pid %d.%d (%s): invalid sp %#lx\n",
820 __func__, p->p_pid, l->l_lid, p->p_comm,
821 tf->tf_fixreg[1]);
822 dump_trapframe(tf, NULL);
823 Debugger();
824 }
825 if ((tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
826 printf("%s(entry): pid %d.%d (%s): %s invalid PSL %#lx\n",
827 __func__, p->p_pid, l->l_lid, p->p_comm,
828 trap_names[trap_code], tf->tf_srr1);
829 dump_trapframe(tf, NULL);
830 Debugger();
831 }
832 #if 0
833 if ((mfmsr() & PSL_CE) == 0) {
834 printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
835 __func__, p->p_pid, l->l_lid, p->p_comm,
836 trap_names[trap_code], mfmsr());
837 dump_trapframe(tf, NULL);
838 }
839 #endif
840 userret(l, tf);
841 return;
842 }
843 if (!usertrap) {
844 if (rv != 0) {
845 if (!onfaulted(tf, rv)) {
846 db_stack_trace_print(tf->tf_fixreg[1], true, 40, "", printf);
847 dump_trapframe(tf, NULL);
848 panic("%s: pid %d.%d (%s): %s exception in kernel mode"
849 " (tf=%p, dear=%#lx, esr=%#x,"
850 " srr0/1=%#lx/%#lx)",
851 __func__, p->p_pid, l->l_lid, p->p_comm,
852 trap_names[trap_code], tf, tf->tf_dear,
853 tf->tf_esr, tf->tf_srr0, tf->tf_srr1);
854 }
855 }
856 #if 0
857 if (tf->tf_fixreg[1] >= (register_t)l->l_addr + USPACE
858 || tf->tf_fixreg[1] < (register_t)l->l_addr + PAGE_SIZE) {
859 printf("%s(exit): pid %d.%d (%s): invalid kern sp %#lx\n",
860 __func__, p->p_pid, l->l_lid, p->p_comm,
861 tf->tf_fixreg[1]);
862 dump_trapframe(tf, NULL);
863 Debugger();
864 }
865 #endif
866 #if 0
867 if ((mfmsr() & PSL_CE) == 0) {
868 printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
869 __func__, p->p_pid, l->l_lid, p->p_comm,
870 trap_names[trap_code], mfmsr());
871 mtmsr(mfmsr()|PSL_CE);
872 dump_trapframe(tf, NULL);
873 }
874 #endif
875 } else {
876 if (rv == ENOMEM) {
877 printf("UVM: pid %d.%d (%s), uid %d killed: "
878 "out of swap\n",
879 p->p_pid, l->l_lid, p->p_comm,
880 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1);
881 ksi.ksi_signo = SIGKILL;
882 }
883 if (rv != 0) {
884 /*
885 * Only print a fatal trap if the signal will be
886 * uncaught.
887 */
888 if (cpu_printfataltraps
889 && (p->p_slflag & PSL_TRACED) == 0
890 && !sigismember(&p->p_sigctx.ps_sigcatch,
891 ksi.ksi_signo)) {
892 printf("%s: pid %d.%d (%s):"
893 " %s exception in user mode\n",
894 __func__, p->p_pid, l->l_lid, p->p_comm,
895 trap_names[trap_code]);
896 if (cpu_printfataltraps > 1)
897 dump_trapframe(tf, NULL);
898 }
899 (*p->p_emul->e_trapsignal)(l, &ksi);
900 }
901 #ifdef DEBUG
902 if ((tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
903 printf("%s(exit): pid %d.%d (%s): %s invalid PSL %#lx\n",
904 __func__, p->p_pid, l->l_lid, p->p_comm,
905 trap_names[trap_code], tf->tf_srr1);
906 dump_trapframe(tf, NULL);
907 Debugger();
908 }
909 #endif
910 #if 0
911 if ((mfmsr() & PSL_CE) == 0) {
912 printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
913 __func__, p->p_pid, l->l_lid, p->p_comm,
914 trap_names[trap_code], mfmsr());
915 dump_trapframe(tf, NULL);
916 }
917 #endif
918 userret(l, tf);
919 }
920 }
921