trap.c revision 1.9 1 /* $NetBSD: trap.c,v 1.9 2011/06/13 21:12:50 matt Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "opt_ddb.h"
38 #include "opt_sa.h"
39
40 #include <sys/cdefs.h>
41
42 __KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.9 2011/06/13 21:12:50 matt Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/siginfo.h>
47 #include <sys/lwp.h>
48 #include <sys/proc.h>
49 #include <sys/cpu.h>
50 #ifdef KERN_SA
51 #include <sys/savar.h>
52 #endif
53 #include <sys/kauth.h>
54 #include <sys/kmem.h>
55 #include <sys/ras.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #include <powerpc/pcb.h>
60 #include <powerpc/userret.h>
61 #include <powerpc/psl.h>
62 #include <powerpc/instr.h>
63 #include <powerpc/altivec.h> /* use same interface for SPE */
64
65 #include <powerpc/spr.h>
66 #include <powerpc/booke/spr.h>
67 #include <powerpc/booke/cpuvar.h>
68
69 #include <powerpc/db_machdep.h>
70 #include <ddb/db_interface.h>
71
72 #include <powerpc/trap.h>
73 #include <powerpc/booke/trap.h>
74 #include <powerpc/booke/pte.h>
75
76 void trap(enum ppc_booke_exceptions, struct trapframe *);
77 static void dump_trapframe(const struct trapframe *);
78
79 static const char trap_names[][8] = {
80 [T_CRITIAL_INPUT] = "CRIT",
81 [T_EXTERNAL_INPUT] = "EXT",
82 [T_DECREMENTER] = "DECR",
83 [T_FIXED_INTERVAL] = "FIT",
84 [T_WATCHDOG] = "WDOG",
85 [T_SYSTEM_CALL] = "SC",
86 [T_MACHINE_CHECK] = "MCHK",
87 [T_DSI] = "DSI",
88 [T_ISI] = "ISI",
89 [T_ALIGNMENT] = "ALN",
90 [T_PROGRAM] = "PGM",
91 [T_FP_UNAVAILABLE] = "FP",
92 [T_AP_UNAVAILABLE] = "AP",
93 [T_DATA_TLB_ERROR] = "DTLB",
94 [T_INSTRUCTION_TLB_ERROR] = "ITLB",
95 [T_DEBUG] = "DEBUG",
96 [T_SPE_UNAVAILABLE] = "SPE",
97 [T_EMBEDDED_FP_DATA] = "FPDATA",
98 [T_EMBEDDED_FP_ROUND] = "FPROUND",
99 [T_EMBEDDED_PERF_MONITOR] = "PERFMON",
100 [T_AST] = "AST",
101 };
102
103 static inline bool
104 usertrap_p(struct trapframe *tf)
105 {
106 return (tf->tf_srr1 & PSL_PR) != 0;
107 }
108
109 static int
110 mchk_exception(struct trapframe *tf, ksiginfo_t *ksi)
111 {
112 const bool usertrap = usertrap_p(tf);
113 const vaddr_t faultva = tf->tf_mcar;
114 struct cpu_info * const ci = curcpu();
115 int rv = EFAULT;
116
117 if (usertrap)
118 ci->ci_ev_umchk.ev_count++;
119
120 if (rv != 0 && usertrap) {
121 KSI_INIT_TRAP(ksi);
122 ksi->ksi_signo = SIGSEGV;
123 ksi->ksi_trap = EXC_DSI;
124 ksi->ksi_code = SEGV_ACCERR;
125 ksi->ksi_addr = (void *)faultva;
126 }
127
128 return rv;
129 }
130
131 static inline vm_prot_t
132 get_faulttype(const struct trapframe * const tf)
133 {
134 return VM_PROT_READ | (tf->tf_esr & ESR_ST ? VM_PROT_WRITE : 0);
135 }
136
137 static inline struct vm_map *
138 get_faultmap(const struct trapframe * const tf, register_t psl_mask)
139 {
140 return (tf->tf_srr1 & psl_mask)
141 ? &curlwp->l_proc->p_vmspace->vm_map
142 : kernel_map;
143 }
144
145 /*
146 * We could use pmap_pte_lookip but this slightly faster since we already
147 * the segtab pointers in cpu_info.
148 */
149 static inline pt_entry_t *
150 trap_pte_lookup(struct trapframe *tf, vaddr_t va, register_t psl_mask)
151 {
152 struct pmap_segtab ** const stps = &curcpu()->ci_pmap_kern_segtab;
153 struct pmap_segtab * const stp = stps[(tf->tf_srr1 / psl_mask) & 1];
154 if (__predict_false(stp == NULL))
155 return NULL;
156 pt_entry_t *ptep = stp->seg_tab[va >> SEGSHIFT];
157 if (__predict_false(ptep == NULL))
158 return NULL;
159 return ptep + ((va & SEGOFSET) >> PAGE_SHIFT);
160 }
161
162 static int
163 pagefault(struct vm_map *map, vaddr_t va, vm_prot_t ftype, bool usertrap)
164 {
165 struct lwp * const l = curlwp;
166 int rv;
167
168 // printf("%s(%p,%#lx,%u,%u)\n", __func__, map, va, ftype, usertrap);
169
170 if (usertrap) {
171 #ifdef KERN_SA
172 if (l->l_flag & LW_SA) {
173 l->l_savp->savp_faultaddr = va;
174 l->l_pflag |= LP_SA_PAGEFAULT;
175 }
176 #endif
177 rv = uvm_fault(map, trunc_page(va), ftype);
178 if (rv == 0)
179 uvm_grow(l->l_proc, trunc_page(va));
180 if (rv == EACCES)
181 rv = EFAULT;
182 #ifdef KERN_SA
183 l->l_pflag &= ~LP_SA_PAGEFAULT;
184 #endif
185 } else {
186 if (cpu_intr_p())
187 return EFAULT;
188
189 struct pcb * const pcb = lwp_getpcb(l);
190 struct faultbuf * const fb = pcb->pcb_onfault;
191 pcb->pcb_onfault = NULL;
192 rv = uvm_fault(map, trunc_page(va), ftype);
193 pcb->pcb_onfault = fb;
194 if (map != kernel_map) {
195 if (rv == 0)
196 uvm_grow(l->l_proc, trunc_page(va));
197 #ifdef KERN_SA
198 l->l_pflag &= ~LP_SA_PAGEFAULT;
199 #endif
200 }
201 if (rv == EACCES)
202 rv = EFAULT;
203 }
204 return rv;
205 }
206
207 static int
208 dsi_exception(struct trapframe *tf, ksiginfo_t *ksi)
209 {
210 const vaddr_t faultva = tf->tf_dear;
211 const vm_prot_t ftype = get_faulttype(tf);
212 struct vm_map * const faultmap = get_faultmap(tf, PSL_DS);
213 const bool usertrap = usertrap_p(tf);
214
215 kpreempt_disable();
216 struct cpu_info * const ci = curcpu();
217
218 if (usertrap)
219 ci->ci_ev_udsi.ev_count++;
220 else
221 ci->ci_ev_kdsi.ev_count++;
222
223 /*
224 * If we had a TLB entry (which we must have had to get this exception),
225 * we certainly have a PTE.
226 */
227 pt_entry_t * const ptep = trap_pte_lookup(tf, trunc_page(faultva),
228 PSL_DS);
229 KASSERT(ptep != NULL);
230 pt_entry_t pte = *ptep;
231
232 if ((ftype & VM_PROT_WRITE)
233 && ((pte & (PTE_xW|PTE_UNMODIFIED)) == (PTE_xW|PTE_UNMODIFIED))) {
234 const paddr_t pa = pte_to_paddr(pte);
235 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
236 KASSERT(pg);
237
238 if (!VM_PAGE_MD_MODIFIED_P(pg)) {
239 pmap_page_set_attributes(pg, VM_PAGE_MD_MODIFIED);
240 }
241 pte &= ~PTE_UNMODIFIED;
242 *ptep = pte;
243 pmap_tlb_update_addr(faultmap->pmap, trunc_page(faultva),
244 pte, 0);
245 kpreempt_enable();
246 return 0;
247 }
248 kpreempt_enable();
249
250 int rv = pagefault(faultmap, faultva, ftype, usertrap);
251
252 /*
253 * We can't get a MAPERR here since that's a different exception.
254 */
255 if (__predict_false(rv != 0 && usertrap)) {
256 ci->ci_ev_udsi_fatal.ev_count++;
257 KSI_INIT_TRAP(ksi);
258 ksi->ksi_signo = SIGSEGV;
259 ksi->ksi_trap = EXC_DSI;
260 ksi->ksi_code = SEGV_ACCERR;
261 ksi->ksi_addr = (void *)faultva;
262 }
263 return rv;
264 }
265
266 static int
267 isi_exception(struct trapframe *tf, ksiginfo_t *ksi)
268 {
269 const vaddr_t faultva = trunc_page(tf->tf_srr0);
270 struct vm_map * const faultmap = get_faultmap(tf, PSL_IS);
271 const bool usertrap = usertrap_p(tf);
272
273 kpreempt_disable();
274 struct cpu_info * const ci = curcpu();
275
276 if (usertrap)
277 ci->ci_ev_isi.ev_count++;
278 else
279 ci->ci_ev_kisi.ev_count++;
280
281 /*
282 * If we had a TLB entry (which we must have had to get this exception),
283 * we certainly have a PTE.
284 */
285 pt_entry_t * const ptep = trap_pte_lookup(tf, trunc_page(faultva),
286 PSL_IS);
287 if (ptep == NULL)
288 dump_trapframe(tf);
289 KASSERT(ptep != NULL);
290 pt_entry_t pte = *ptep;
291
292 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmapexechist);
293
294 if ((pte & PTE_UNSYNCED) == PTE_UNSYNCED) {
295 const paddr_t pa = pte_to_paddr(pte);
296 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
297 KASSERT(pg);
298
299 UVMHIST_LOG(pmapexechist,
300 "srr0=%#x pg=%p (pa %#"PRIxPADDR"): %s",
301 tf->tf_srr0, pg, pa,
302 (VM_PAGE_MD_EXECPAGE_P(pg)
303 ? "no syncicache (already execpage)"
304 : "performed syncicache (now execpage)"));
305
306 if (!VM_PAGE_MD_EXECPAGE_P(pg)) {
307 ci->ci_softc->cpu_ev_exec_trap_sync.ev_count++;
308 dcache_wb_page(pa);
309 icache_inv_page(pa);
310 pmap_page_set_attributes(pg, VM_PAGE_MD_EXECPAGE);
311 }
312 pte &= ~PTE_UNSYNCED;
313 pte |= PTE_xX;
314 *ptep = pte;
315
316 pmap_tlb_update_addr(faultmap->pmap, trunc_page(faultva),
317 pte, 0);
318 kpreempt_enable();
319 UVMHIST_LOG(pmapexechist, "<- 0", 0,0,0,0);
320 return 0;
321 }
322 kpreempt_enable();
323
324 int rv = pagefault(faultmap, faultva, VM_PROT_READ|VM_PROT_EXECUTE,
325 usertrap);
326
327 if (__predict_false(rv != 0 && usertrap)) {
328 /*
329 * We can't get a MAPERR here since
330 * that's a different exception.
331 */
332 ci->ci_ev_isi_fatal.ev_count++;
333 KSI_INIT_TRAP(ksi);
334 ksi->ksi_signo = SIGSEGV;
335 ksi->ksi_trap = EXC_ISI;
336 ksi->ksi_code = SEGV_ACCERR;
337 ksi->ksi_addr = (void *)tf->tf_srr0; /* not truncated */
338 }
339 UVMHIST_LOG(pmapexechist, "<- %d", rv, 0,0,0);
340 return rv;
341 }
342
343 static int
344 dtlb_exception(struct trapframe *tf, ksiginfo_t *ksi)
345 {
346 const vaddr_t faultva = tf->tf_dear;
347 const vm_prot_t ftype = get_faulttype(tf);
348 struct vm_map * const faultmap = get_faultmap(tf, PSL_DS);
349 struct cpu_info * const ci = curcpu();
350 const bool usertrap = usertrap_p(tf);
351
352 #if 0
353 /*
354 * This is what pte_load in trap_subr.S does for us.
355 */
356 const pt_entry_t * const ptep =
357 trap_pte_lookup(tf, trunc_page(faultva), PSL_DS);
358 if (ptep != NULL && !usertrap && pte_valid_p(*ptep)) {
359 tlb_update_addr(trunc_page(faultva), KERNEL_PID, *ptep, true);
360 ci->ci_ev_tlbmiss_soft.ev_count++;
361 return 0;
362 }
363 #endif
364
365 ci->ci_ev_dtlbmiss_hard.ev_count++;
366
367 // printf("pagefault(%p,%#lx,%u,%u)", faultmap, faultva, ftype, usertrap);
368 int rv = pagefault(faultmap, faultva, ftype, usertrap);
369 // printf(": %d\n", rv);
370
371 if (__predict_false(rv != 0 && usertrap)) {
372 ci->ci_ev_udsi_fatal.ev_count++;
373 KSI_INIT_TRAP(ksi);
374 ksi->ksi_signo = SIGSEGV;
375 ksi->ksi_trap = EXC_DSI;
376 ksi->ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
377 ksi->ksi_addr = (void *)faultva;
378 }
379 return rv;
380 }
381
382 static int
383 itlb_exception(struct trapframe *tf, ksiginfo_t *ksi)
384 {
385 struct vm_map * const faultmap = get_faultmap(tf, PSL_IS);
386 const vaddr_t faultva = tf->tf_srr0;
387 struct cpu_info * const ci = curcpu();
388 const bool usertrap = usertrap_p(tf);
389
390 ci->ci_ev_itlbmiss_hard.ev_count++;
391
392 int rv = pagefault(faultmap, faultva, VM_PROT_READ|VM_PROT_EXECUTE,
393 usertrap);
394
395 if (__predict_false(rv != 0 && usertrap)) {
396 ci->ci_ev_isi_fatal.ev_count++;
397 KSI_INIT_TRAP(ksi);
398 ksi->ksi_signo = SIGSEGV;
399 ksi->ksi_trap = EXC_ISI;
400 ksi->ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
401 ksi->ksi_addr = (void *)tf->tf_srr0;
402 }
403 return rv;
404 }
405
406 static int
407 spe_exception(struct trapframe *tf, ksiginfo_t *ksi)
408 {
409 struct cpu_info * const ci = curcpu();
410
411 if (!usertrap_p(tf))
412 return EPERM;
413
414 ci->ci_ev_vec.ev_count++;
415
416 #ifdef PPC_HAVE_SPE
417 vec_load();
418 return 0;
419 #else
420 KSI_INIT_TRAP(ksi);
421 ksi->ksi_signo = SIGILL;
422 ksi->ksi_trap = EXC_PGM;
423 ksi->ksi_code = ILL_ILLOPC;
424 ksi->ksi_addr = (void *)tf->tf_srr0;
425 return EPERM;
426 #endif
427 }
428
429 static bool
430 emulate_opcode(struct trapframe *tf, ksiginfo_t *ksi)
431 {
432 uint32_t opcode;
433 if (copyin((void *)tf->tf_srr0, &opcode, sizeof(opcode)) != 0)
434 return false;
435
436 if (opcode == OPC_LWSYNC)
437 return true;
438
439 if (OPC_MFSPR_P(opcode, SPR_PVR)) {
440 __asm ("mfpvr %0" : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)]));
441 return true;
442 }
443
444 /*
445 * If we bothered to emulate FP, we would try to do so here.
446 */
447 return false;
448 }
449
450 static int
451 pgm_exception(struct trapframe *tf, ksiginfo_t *ksi)
452 {
453 struct cpu_info * const ci = curcpu();
454 int rv = EPERM;
455
456 if (!usertrap_p(tf))
457 return rv;
458
459 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmapexechist);
460
461 UVMHIST_LOG(pmapexechist, " srr0/1=%#x/%#x esr=%#x pte=%#x",
462 tf->tf_srr0, tf->tf_srr1, tf->tf_esr,
463 *trap_pte_lookup(tf, trunc_page(tf->tf_srr0), PSL_IS));
464
465 ci->ci_ev_pgm.ev_count++;
466
467 if (tf->tf_esr & ESR_PTR) {
468 struct proc *p = curlwp->l_proc;
469 if (p->p_raslist != NULL
470 && ras_lookup(p, (void *)tf->tf_srr0) != (void *) -1) {
471 tf->tf_srr0 += 4;
472 return 0;
473 }
474 }
475
476 if (tf->tf_esr & (ESR_PIL|ESR_PPR)) {
477 if (emulate_opcode(tf, ksi)) {
478 tf->tf_srr0 += 4;
479 return 0;
480 }
481 }
482
483 KSI_INIT_TRAP(ksi);
484 ksi->ksi_signo = SIGILL;
485 ksi->ksi_trap = EXC_PGM;
486 if (tf->tf_esr & ESR_PIL) {
487 ksi->ksi_code = ILL_ILLOPC;
488 } else if (tf->tf_esr & ESR_PPR) {
489 ksi->ksi_code = ILL_PRVOPC;
490 } else if (tf->tf_esr & ESR_PTR) {
491 ksi->ksi_signo = SIGTRAP;
492 ksi->ksi_code = TRAP_BRKPT;
493 } else {
494 ksi->ksi_code = 0;
495 }
496 ksi->ksi_addr = (void *)tf->tf_srr0;
497 return rv;
498 }
499
500 static int
501 debug_exception(struct trapframe *tf, ksiginfo_t *ksi)
502 {
503 struct cpu_info * const ci = curcpu();
504 int rv = EPERM;
505
506 if (!usertrap_p(tf))
507 return rv;
508
509 ci->ci_ev_debug.ev_count++;
510
511 /*
512 * Ack the interrupt.
513 */
514 mtspr(SPR_DBSR, tf->tf_esr);
515 KASSERT(tf->tf_esr & (DBSR_IAC1|DBSR_IAC2));
516 KASSERT((tf->tf_srr1 & PSL_SE) == 0);
517
518 /*
519 * Disable debug events
520 */
521 mtspr(SPR_DBCR1, 0);
522 mtspr(SPR_DBCR0, 0);
523
524 /*
525 * Tell the debugger ...
526 */
527 KSI_INIT_TRAP(ksi);
528 ksi->ksi_signo = SIGTRAP;
529 ksi->ksi_trap = EXC_TRC;
530 ksi->ksi_addr = (void *)tf->tf_srr0;
531 ksi->ksi_code = TRAP_TRACE;
532 return rv;
533 }
534
535 static int
536 ali_exception(struct trapframe *tf, ksiginfo_t *ksi)
537 {
538 struct cpu_info * const ci = curcpu();
539 int rv = EFAULT;
540
541 ci->ci_ev_ali.ev_count++;
542
543 if (rv != 0 && usertrap_p(tf)) {
544 ci->ci_ev_ali_fatal.ev_count++;
545 KSI_INIT_TRAP(ksi);
546 ksi->ksi_signo = SIGILL;
547 ksi->ksi_trap = EXC_PGM;
548 if (tf->tf_esr & ESR_PIL)
549 ksi->ksi_code = ILL_ILLOPC;
550 else if (tf->tf_esr & ESR_PPR)
551 ksi->ksi_code = ILL_PRVOPC;
552 else if (tf->tf_esr & ESR_PTR)
553 ksi->ksi_code = ILL_ILLTRP;
554 else
555 ksi->ksi_code = 0;
556 ksi->ksi_addr = (void *)tf->tf_srr0;
557 }
558 return rv;
559 }
560
561 static int
562 embedded_fp_data_exception(struct trapframe *tf, ksiginfo_t *ksi)
563 {
564 struct cpu_info * const ci = curcpu();
565 int rv = EFAULT;
566
567 ci->ci_ev_fpu.ev_count++;
568
569 if (rv != 0 && usertrap_p(tf)) {
570 KSI_INIT_TRAP(ksi);
571 #ifdef PPC_HAVE_SPE
572 ksi->ksi_signo = SIGFPE;
573 ksi->ksi_trap = tf->tf_exc;
574 ksi->ksi_code = vec_siginfo_code(tf);
575 #else
576 ksi->ksi_signo = SIGILL;
577 ksi->ksi_trap = EXC_PGM;
578 ksi->ksi_code = ILL_ILLOPC;
579 #endif
580 ksi->ksi_addr = (void *)tf->tf_srr0;
581 }
582 return rv;
583 }
584
585 static int
586 embedded_fp_round_exception(struct trapframe *tf, ksiginfo_t *ksi)
587 {
588 struct cpu_info * const ci = curcpu();
589 int rv = EDOM;
590
591 ci->ci_ev_fpu.ev_count++;
592
593 if (rv != 0 && usertrap_p(tf)) {
594 KSI_INIT_TRAP(ksi);
595 #ifdef PPC_HAVE_SPE
596 ksi->ksi_signo = SIGFPE;
597 ksi->ksi_trap = tf->tf_exc;
598 ksi->ksi_code = vec_siginfo_code(tf);
599 #else
600 ksi->ksi_signo = SIGILL;
601 ksi->ksi_trap = EXC_PGM;
602 ksi->ksi_code = ILL_ILLOPC;
603 #endif
604 ksi->ksi_addr = (void *)tf->tf_srr0;
605 }
606 return rv;
607 }
608
609 static void
610 dump_trapframe(const struct trapframe *tf)
611 {
612 printf("trapframe %p (exc=%x srr0/1=%#lx/%#lx esr/dear=%#x/%#lx)\n",
613 tf, tf->tf_exc, tf->tf_srr0, tf->tf_srr1, tf->tf_esr, tf->tf_dear);
614 printf("lr =%08lx ctr=%08lx cr =%08x xer=%08x\n",
615 tf->tf_lr, tf->tf_ctr, tf->tf_cr, tf->tf_xer);
616 for (u_int r = 0; r < 32; r += 4) {
617 printf("r%02u=%08lx r%02u=%08lx r%02u=%08lx r%02u=%08lx\n",
618 r+0, tf->tf_fixreg[r+0], r+1, tf->tf_fixreg[r+1],
619 r+2, tf->tf_fixreg[r+2], r+3, tf->tf_fixreg[r+3]);
620 }
621 }
622 static bool
623 ddb_exception(struct trapframe *tf)
624 {
625 #if 0
626 const register_t ddb_trapfunc = (uintptr_t) cpu_Debugger;
627 if ((tf->tf_esr & ESR_PTR) == 0)
628 return false;
629 if (ddb_trapfunc <= tf->tf_srr0 && tf->tf_srr0 <= ddb_trapfunc+16) {
630 register_t srr0 = tf->tf_srr0;
631 if (kdb_trap(tf->tf_exc, tf)) {
632 if (srr0 == tf->tf_srr0)
633 tf->tf_srr0 += 4;
634 return true;
635 }
636 }
637 return false;
638 #else
639 #if 0
640 struct cpu_info * const ci = curcpu();
641 struct cpu_softc * const cpu = ci->ci_softc;
642 printf("CPL stack:");
643 if (ci->ci_idepth >= 0) {
644 for (u_int i = 0; i <= ci->ci_idepth; i++) {
645 printf(" [%u]=%u", i, cpu->cpu_pcpls[i]);
646 }
647 }
648 printf(" %u\n", ci->ci_cpl);
649 dump_trapframe(tf);
650 #endif
651 if (kdb_trap(tf->tf_exc, tf)) {
652 tf->tf_srr0 += 4;
653 return true;
654 }
655 return false;
656 #endif
657 }
658
659 static bool
660 onfaulted(struct trapframe *tf, register_t rv)
661 {
662 struct lwp * const l = curlwp;
663 struct pcb * const pcb = lwp_getpcb(l);
664 struct faultbuf * const fb = pcb->pcb_onfault;
665 if (fb == NULL)
666 return false;
667 tf->tf_srr0 = fb->fb_pc;
668 tf->tf_srr1 = fb->fb_msr;
669 tf->tf_cr = fb->fb_cr;
670 tf->tf_fixreg[1] = fb->fb_sp;
671 tf->tf_fixreg[2] = fb->fb_r2;
672 tf->tf_fixreg[3] = rv;
673 pcb->pcb_onfault = NULL;
674 return true;
675 }
676
677 void
678 trap(enum ppc_booke_exceptions trap_code, struct trapframe *tf)
679 {
680 const bool usertrap = usertrap_p(tf);
681 struct cpu_info * const ci = curcpu();
682 struct lwp * const l = curlwp;
683 struct proc * const p = l->l_proc;
684 ksiginfo_t ksi;
685 int rv = EACCES;
686
687 ci->ci_ev_traps.ev_count++;
688 ci->ci_data.cpu_ntrap++;
689
690 KASSERTMSG(!usertrap || tf == trapframe(l),
691 ("trap: tf=%p is invalid: trapframe(%p)=%p", tf, l, trapframe(l)));
692
693 #if 0
694 if (trap_code != T_PROGRAM || usertrap)
695 printf("trap(enter): %s (tf=%p, esr/dear=%#x/%#lx, srr0/1=%#lx/%#lx, lr=%#lx)\n",
696 trap_names[trap_code], tf, tf->tf_esr, tf->tf_dear,
697 tf->tf_srr0, tf->tf_srr1, tf->tf_lr);
698 #endif
699 #if 0
700 if ((register_t)tf >= (register_t)l->l_addr + USPACE
701 || (register_t)tf < (register_t)l->l_addr + PAGE_SIZE) {
702 printf("%s(entry): pid %d.%d (%s): invalid tf addr %p\n",
703 __func__, p->p_pid, l->l_lid, p->p_comm, tf);
704 dump_trapframe(tf);
705 Debugger();
706 }
707 #endif
708 #if 0
709 if ((mfmsr() & PSL_CE) == 0) {
710 printf("%s(entry): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
711 __func__, p->p_pid, l->l_lid, p->p_comm,
712 trap_names[trap_code], mfmsr());
713 dump_trapframe(tf);
714 }
715 #endif
716
717 if (usertrap && (tf->tf_fixreg[1] & 0x80000000)) {
718 printf("%s(entry): pid %d.%d (%s): %s invalid sp %#lx (sprg1=%#lx)\n",
719 __func__, p->p_pid, l->l_lid, p->p_comm,
720 trap_names[trap_code], tf->tf_fixreg[1], mfspr(SPR_SPRG1));
721 dump_trapframe(tf);
722 Debugger();
723 }
724
725 if (usertrap && (tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
726 printf("%s(entry): pid %d.%d (%s): %s invalid PSL %#lx\n",
727 __func__, p->p_pid, l->l_lid, p->p_comm,
728 trap_names[trap_code], tf->tf_srr1);
729 dump_trapframe(tf);
730 Debugger();
731 }
732
733 switch (trap_code) {
734 case T_CRITIAL_INPUT:
735 case T_EXTERNAL_INPUT:
736 case T_DECREMENTER:
737 case T_FIXED_INTERVAL:
738 case T_WATCHDOG:
739 case T_SYSTEM_CALL:
740 default:
741 panic("trap: unexcepted trap code %d! (tf=%p, srr0/1=%#lx/%#lx)",
742 trap_code, tf, tf->tf_srr0, tf->tf_srr1);
743 case T_MACHINE_CHECK:
744 rv = mchk_exception(tf, &ksi);
745 break;
746 case T_DSI:
747 rv = dsi_exception(tf, &ksi);
748 break;
749 case T_ISI:
750 rv = isi_exception(tf, &ksi);
751 break;
752 case T_ALIGNMENT:
753 rv = ali_exception(tf, &ksi);
754 break;
755 case T_SPE_UNAVAILABLE:
756 rv = spe_exception(tf, &ksi);
757 break;
758 case T_PROGRAM:
759 #ifdef DDB
760 if (!usertrap && ddb_exception(tf))
761 return;
762 #endif
763 rv = pgm_exception(tf, &ksi);
764 break;
765 case T_FP_UNAVAILABLE:
766 case T_AP_UNAVAILABLE:
767 panic("trap: unexcepted trap code %d! (tf=%p, srr0/1=%#lx/%#lx)",
768 trap_code, tf, tf->tf_srr0, tf->tf_srr1);
769 case T_DATA_TLB_ERROR:
770 rv = dtlb_exception(tf, &ksi);
771 break;
772 case T_INSTRUCTION_TLB_ERROR:
773 rv = itlb_exception(tf, &ksi);
774 break;
775 case T_DEBUG:
776 #ifdef DDB
777 if (!usertrap && ddb_exception(tf))
778 return;
779 #endif
780 rv = debug_exception(tf, &ksi);
781 break;
782 case T_EMBEDDED_FP_DATA:
783 rv = embedded_fp_data_exception(tf, &ksi);
784 break;
785 case T_EMBEDDED_FP_ROUND:
786 rv = embedded_fp_round_exception(tf, &ksi);
787 break;
788 case T_EMBEDDED_PERF_MONITOR:
789 //db_stack_trace_print(tf->tf_fixreg[1], true, 40, "", printf);
790 dump_trapframe(tf);
791 rv = EPERM;
792 break;
793 case T_AST:
794 KASSERT(usertrap);
795 l->l_md.md_astpending = 0; /* we are about to do it */
796 ci->ci_data.cpu_nsoft++;
797 if (l->l_pflag & LP_OWEUPC) {
798 l->l_pflag &= ~LP_OWEUPC;
799 ADDUPROF(l);
800 }
801 /* Check whether we are being preempted. */
802 if (ci->ci_want_resched)
803 preempt();
804 if (tf->tf_fixreg[1] & 0x80000000) {
805 printf("%s(ast-exit): pid %d.%d (%s): invalid sp %#lx\n",
806 __func__, p->p_pid, l->l_lid, p->p_comm,
807 tf->tf_fixreg[1]);
808 dump_trapframe(tf);
809 Debugger();
810 }
811 if ((tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
812 printf("%s(entry): pid %d.%d (%s): %s invalid PSL %#lx\n",
813 __func__, p->p_pid, l->l_lid, p->p_comm,
814 trap_names[trap_code], tf->tf_srr1);
815 dump_trapframe(tf);
816 Debugger();
817 }
818 #if 0
819 if ((mfmsr() & PSL_CE) == 0) {
820 printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
821 __func__, p->p_pid, l->l_lid, p->p_comm,
822 trap_names[trap_code], mfmsr());
823 dump_trapframe(tf);
824 }
825 #endif
826 userret(l, tf);
827 return;
828 }
829 if (!usertrap) {
830 if (rv != 0) {
831 if (!onfaulted(tf, rv)) {
832 db_stack_trace_print(tf->tf_fixreg[1], true, 40, "", printf);
833 dump_trapframe(tf);
834 panic("%s: pid %d.%d (%s): %s exception in kernel mode"
835 " (tf=%p, dear=%#lx, esr=%#x,"
836 " srr0/1=%#lx/%#lx)",
837 __func__, p->p_pid, l->l_lid, p->p_comm,
838 trap_names[trap_code], tf, tf->tf_dear,
839 tf->tf_esr, tf->tf_srr0, tf->tf_srr1);
840 }
841 }
842 #if 0
843 if (tf->tf_fixreg[1] >= (register_t)l->l_addr + USPACE
844 || tf->tf_fixreg[1] < (register_t)l->l_addr + PAGE_SIZE) {
845 printf("%s(exit): pid %d.%d (%s): invalid kern sp %#lx\n",
846 __func__, p->p_pid, l->l_lid, p->p_comm,
847 tf->tf_fixreg[1]);
848 dump_trapframe(tf);
849 Debugger();
850 }
851 #endif
852 #if 0
853 if ((mfmsr() & PSL_CE) == 0) {
854 printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
855 __func__, p->p_pid, l->l_lid, p->p_comm,
856 trap_names[trap_code], mfmsr());
857 mtmsr(mfmsr()|PSL_CE);
858 dump_trapframe(tf);
859 }
860 #endif
861 } else {
862 if (rv == ENOMEM) {
863 printf("UVM: pid %d.%d (%s), uid %d killed: "
864 "out of swap\n",
865 p->p_pid, l->l_lid, p->p_comm,
866 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1);
867 ksi.ksi_signo = SIGKILL;
868 }
869 if (rv != 0) {
870 if (cpu_printfataltraps) {
871 printf("%s: pid %d.%d (%s):"
872 " %s exception in user mode\n",
873 __func__, p->p_pid, l->l_lid, p->p_comm,
874 trap_names[trap_code]);
875 if (cpu_printfataltraps > 1)
876 dump_trapframe(tf);
877 }
878 (*p->p_emul->e_trapsignal)(l, &ksi);
879 }
880 #ifdef DEBUG
881 if ((tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
882 printf("%s(exit): pid %d.%d (%s): %s invalid PSL %#lx\n",
883 __func__, p->p_pid, l->l_lid, p->p_comm,
884 trap_names[trap_code], tf->tf_srr1);
885 dump_trapframe(tf);
886 Debugger();
887 }
888 #endif
889 #if 0
890 if ((mfmsr() & PSL_CE) == 0) {
891 printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
892 __func__, p->p_pid, l->l_lid, p->p_comm,
893 trap_names[trap_code], mfmsr());
894 dump_trapframe(tf);
895 }
896 #endif
897 userret(l, tf);
898 }
899 }
900
901 void
902 upcallret(struct lwp *l)
903 {
904
905 mi_userret(l); /* Invoke MI userret code */
906 }
907
908 /*
909 * Start a new LWP
910 */
911 void
912 startlwp(void *arg)
913 {
914 ucontext_t * const uc = arg;
915 struct lwp * const l = curlwp;
916
917 int error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
918 KASSERT(error == 0);
919 (void)error;
920 kmem_free(uc, sizeof(ucontext_t));
921 upcallret(l);
922 }
923