trap.c revision 1.2 1 1.2 matt /* $NetBSD: trap.c,v 1.2 2011/01/18 01:02:52 matt Exp $ */
2 1.2 matt /*-
3 1.2 matt * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 1.2 matt * All rights reserved.
5 1.2 matt *
6 1.2 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.2 matt * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 1.2 matt * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 1.2 matt *
10 1.2 matt * This material is based upon work supported by the Defense Advanced Research
11 1.2 matt * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 1.2 matt * Contract No. N66001-09-C-2073.
13 1.2 matt * Approved for Public Release, Distribution Unlimited
14 1.2 matt *
15 1.2 matt * Redistribution and use in source and binary forms, with or without
16 1.2 matt * modification, are permitted provided that the following conditions
17 1.2 matt * are met:
18 1.2 matt * 1. Redistributions of source code must retain the above copyright
19 1.2 matt * notice, this list of conditions and the following disclaimer.
20 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright
21 1.2 matt * notice, this list of conditions and the following disclaimer in the
22 1.2 matt * documentation and/or other materials provided with the distribution.
23 1.2 matt *
24 1.2 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 1.2 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 1.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 1.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 1.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 1.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 1.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 1.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 1.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 1.2 matt * POSSIBILITY OF SUCH DAMAGE.
35 1.2 matt */
36 1.2 matt
37 1.2 matt #include "opt_ddb.h"
38 1.2 matt #include "opt_sa.h"
39 1.2 matt
40 1.2 matt #include <sys/cdefs.h>
41 1.2 matt
42 1.2 matt __KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.2 2011/01/18 01:02:52 matt Exp $");
43 1.2 matt
44 1.2 matt #include <sys/param.h>
45 1.2 matt #include <sys/systm.h>
46 1.2 matt #include <sys/siginfo.h>
47 1.2 matt #include <sys/lwp.h>
48 1.2 matt #include <sys/proc.h>
49 1.2 matt #include <sys/cpu.h>
50 1.2 matt #ifdef KERN_SA
51 1.2 matt #include <sys/savar.h>
52 1.2 matt #endif
53 1.2 matt #include <sys/kauth.h>
54 1.2 matt #include <sys/kmem.h>
55 1.2 matt #include <sys/ras.h>
56 1.2 matt
57 1.2 matt #include <uvm/uvm_extern.h>
58 1.2 matt
59 1.2 matt #include <powerpc/pcb.h>
60 1.2 matt #include <powerpc/userret.h>
61 1.2 matt #include <powerpc/psl.h>
62 1.2 matt #include <powerpc/instr.h>
63 1.2 matt #include <powerpc/altivec.h> /* use same interface for SPE */
64 1.2 matt
65 1.2 matt #include <powerpc/spr.h>
66 1.2 matt #include <powerpc/booke/spr.h>
67 1.2 matt
68 1.2 matt #include <powerpc/db_machdep.h>
69 1.2 matt #include <ddb/db_interface.h>
70 1.2 matt
71 1.2 matt #include <powerpc/trap.h>
72 1.2 matt #include <powerpc/booke/trap.h>
73 1.2 matt #include <powerpc/booke/pte.h>
74 1.2 matt
75 1.2 matt void trap(enum ppc_booke_exceptions, struct trapframe *);
76 1.2 matt static void dump_trapframe(const struct trapframe *);
77 1.2 matt
78 1.2 matt static const char trap_names[][8] = {
79 1.2 matt [T_CRITIAL_INPUT] = "CRIT",
80 1.2 matt [T_EXTERNAL_INPUT] = "EXT",
81 1.2 matt [T_DECREMENTER] = "DECR",
82 1.2 matt [T_FIXED_INTERVAL] = "FIT",
83 1.2 matt [T_WATCHDOG] = "WDOG",
84 1.2 matt [T_SYSTEM_CALL] = "SC",
85 1.2 matt [T_MACHINE_CHECK] = "MCHK",
86 1.2 matt [T_DSI] = "DSI",
87 1.2 matt [T_ISI] = "ISI",
88 1.2 matt [T_ALIGNMENT] = "ALN",
89 1.2 matt [T_PROGRAM] = "PGM",
90 1.2 matt [T_FP_UNAVAILABLE] = "FP",
91 1.2 matt [T_AP_UNAVAILABLE] = "AP",
92 1.2 matt [T_DATA_TLB_ERROR] = "DTLB",
93 1.2 matt [T_INSTRUCTION_TLB_ERROR] = "ITLB",
94 1.2 matt [T_DEBUG] = "DEBUG",
95 1.2 matt [T_SPE_UNAVAILABLE] = "SPE",
96 1.2 matt [T_EMBEDDED_FP_DATA] = "FPDATA",
97 1.2 matt [T_EMBEDDED_FP_ROUND] = "FPROUND",
98 1.2 matt [T_EMBEDDED_PERF_MONITOR] = "PERFMON",
99 1.2 matt [T_AST] = "AST",
100 1.2 matt };
101 1.2 matt
102 1.2 matt static inline bool
103 1.2 matt usertrap_p(struct trapframe *tf)
104 1.2 matt {
105 1.2 matt return (tf->tf_srr1 & PSL_PR) != 0;
106 1.2 matt }
107 1.2 matt
108 1.2 matt static int
109 1.2 matt mchk_exception(struct trapframe *tf, ksiginfo_t *ksi)
110 1.2 matt {
111 1.2 matt const bool usertrap = usertrap_p(tf);
112 1.2 matt const vaddr_t faultva = tf->tf_mcar;
113 1.2 matt struct cpu_info * const ci = curcpu();
114 1.2 matt int rv = EFAULT;
115 1.2 matt
116 1.2 matt if (usertrap)
117 1.2 matt ci->ci_ev_umchk.ev_count++;
118 1.2 matt
119 1.2 matt if (rv != 0 && usertrap) {
120 1.2 matt KSI_INIT_TRAP(ksi);
121 1.2 matt ksi->ksi_signo = SIGSEGV;
122 1.2 matt ksi->ksi_trap = EXC_DSI;
123 1.2 matt ksi->ksi_code = SEGV_ACCERR;
124 1.2 matt ksi->ksi_addr = (void *)faultva;
125 1.2 matt }
126 1.2 matt
127 1.2 matt return rv;
128 1.2 matt }
129 1.2 matt
130 1.2 matt static inline vm_prot_t
131 1.2 matt get_faulttype(const struct trapframe * const tf)
132 1.2 matt {
133 1.2 matt return VM_PROT_READ | (tf->tf_esr & ESR_ST ? VM_PROT_WRITE : 0);
134 1.2 matt }
135 1.2 matt
136 1.2 matt static inline struct vm_map *
137 1.2 matt get_faultmap(const struct trapframe * const tf, register_t psl_mask)
138 1.2 matt {
139 1.2 matt return (tf->tf_srr1 & psl_mask)
140 1.2 matt ? &curlwp->l_proc->p_vmspace->vm_map
141 1.2 matt : kernel_map;
142 1.2 matt }
143 1.2 matt
144 1.2 matt /*
145 1.2 matt * We could use pmap_pte_lookip but this slightly faster since we already
146 1.2 matt * the segtab pointers in cpu_info.
147 1.2 matt */
148 1.2 matt static inline pt_entry_t *
149 1.2 matt trap_pte_lookup(struct trapframe *tf, vaddr_t va, register_t psl_mask)
150 1.2 matt {
151 1.2 matt struct pmap_segtab ** const stps = &curcpu()->ci_pmap_kern_segtab;
152 1.2 matt struct pmap_segtab * const stp = stps[(tf->tf_srr1 / psl_mask) & 1];
153 1.2 matt if (__predict_false(stp == NULL))
154 1.2 matt return NULL;
155 1.2 matt pt_entry_t *ptep = stp->seg_tab[va >> SEGSHIFT];
156 1.2 matt if (__predict_false(ptep == NULL))
157 1.2 matt return NULL;
158 1.2 matt return ptep + ((va & SEGOFSET) >> PAGE_SHIFT);
159 1.2 matt }
160 1.2 matt
161 1.2 matt static int
162 1.2 matt pagefault(struct vm_map *map, vaddr_t va, vm_prot_t ftype, bool usertrap)
163 1.2 matt {
164 1.2 matt struct lwp * const l = curlwp;
165 1.2 matt int rv;
166 1.2 matt
167 1.2 matt // printf("%s(%p,%#lx,%u,%u)\n", __func__, map, va, ftype, usertrap);
168 1.2 matt
169 1.2 matt if (usertrap) {
170 1.2 matt #ifdef KERN_SA
171 1.2 matt if (l->l_flag & LW_SA) {
172 1.2 matt l->l_savp->savp_faultaddr = va;
173 1.2 matt l->l_pflag |= LP_SA_PAGEFAULT;
174 1.2 matt }
175 1.2 matt #endif
176 1.2 matt rv = uvm_fault(map, trunc_page(va), ftype);
177 1.2 matt if (rv == 0)
178 1.2 matt uvm_grow(l->l_proc, trunc_page(va));
179 1.2 matt #ifdef KERN_SA
180 1.2 matt l->l_pflag &= ~LP_SA_PAGEFAULT;
181 1.2 matt #endif
182 1.2 matt } else {
183 1.2 matt if (cpu_intr_p())
184 1.2 matt return EFAULT;
185 1.2 matt
186 1.2 matt struct pcb * const pcb = lwp_getpcb(l);
187 1.2 matt struct faultbuf * const fb = pcb->pcb_onfault;
188 1.2 matt pcb->pcb_onfault = NULL;
189 1.2 matt rv = uvm_fault(map, trunc_page(va), ftype);
190 1.2 matt pcb->pcb_onfault = fb;
191 1.2 matt if (map != kernel_map) {
192 1.2 matt if (rv == 0)
193 1.2 matt uvm_grow(l->l_proc, trunc_page(va));
194 1.2 matt #ifdef KERN_SA
195 1.2 matt l->l_pflag &= ~LP_SA_PAGEFAULT;
196 1.2 matt #endif
197 1.2 matt }
198 1.2 matt if (rv == EACCES)
199 1.2 matt rv = EFAULT;
200 1.2 matt }
201 1.2 matt return rv;
202 1.2 matt }
203 1.2 matt
204 1.2 matt static int
205 1.2 matt dsi_exception(struct trapframe *tf, ksiginfo_t *ksi)
206 1.2 matt {
207 1.2 matt const vaddr_t faultva = tf->tf_dear;
208 1.2 matt const vm_prot_t ftype = get_faulttype(tf);
209 1.2 matt struct vm_map * const faultmap = get_faultmap(tf, PSL_DS);
210 1.2 matt const bool usertrap = usertrap_p(tf);
211 1.2 matt
212 1.2 matt kpreempt_disable();
213 1.2 matt struct cpu_info * const ci = curcpu();
214 1.2 matt
215 1.2 matt if (usertrap)
216 1.2 matt ci->ci_ev_udsi.ev_count++;
217 1.2 matt else
218 1.2 matt ci->ci_ev_kdsi.ev_count++;
219 1.2 matt
220 1.2 matt /*
221 1.2 matt * If we had a TLB entry (which we must have had to get this exception),
222 1.2 matt * we certainly have a PTE.
223 1.2 matt */
224 1.2 matt pt_entry_t * const ptep = trap_pte_lookup(tf, trunc_page(faultva),
225 1.2 matt PSL_DS);
226 1.2 matt KASSERT(ptep != NULL);
227 1.2 matt pt_entry_t pte = *ptep;
228 1.2 matt
229 1.2 matt if ((ftype & VM_PROT_WRITE)
230 1.2 matt && ((pte & (PTE_xW|PTE_UNMODIFIED)) == (PTE_xW|PTE_UNMODIFIED))) {
231 1.2 matt const paddr_t pa = pte_to_paddr(pte);
232 1.2 matt struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
233 1.2 matt KASSERT(pg);
234 1.2 matt
235 1.2 matt if (!VM_PAGE_MD_MODIFIED_P(pg)) {
236 1.2 matt pmap_page_set_attributes(pg, VM_PAGE_MD_MODIFIED);
237 1.2 matt }
238 1.2 matt pte &= ~PTE_UNMODIFIED;
239 1.2 matt *ptep = pte;
240 1.2 matt pmap_tlb_update_addr(faultmap->pmap, trunc_page(faultva),
241 1.2 matt pte, 0);
242 1.2 matt kpreempt_enable();
243 1.2 matt return 0;
244 1.2 matt }
245 1.2 matt kpreempt_enable();
246 1.2 matt
247 1.2 matt int rv = pagefault(faultmap, faultva, ftype, usertrap);
248 1.2 matt
249 1.2 matt /*
250 1.2 matt * We can't get a MAPERR here since that's a different exception.
251 1.2 matt */
252 1.2 matt if (__predict_false(rv != 0 && usertrap)) {
253 1.2 matt ci->ci_ev_udsi_fatal.ev_count++;
254 1.2 matt KSI_INIT_TRAP(ksi);
255 1.2 matt ksi->ksi_signo = SIGSEGV;
256 1.2 matt ksi->ksi_trap = EXC_DSI;
257 1.2 matt ksi->ksi_code = SEGV_ACCERR;
258 1.2 matt ksi->ksi_addr = (void *)faultva;
259 1.2 matt }
260 1.2 matt return rv;
261 1.2 matt }
262 1.2 matt
263 1.2 matt static int
264 1.2 matt isi_exception(struct trapframe *tf, ksiginfo_t *ksi)
265 1.2 matt {
266 1.2 matt const vaddr_t faultva = trunc_page(tf->tf_srr0);
267 1.2 matt struct vm_map * const faultmap = get_faultmap(tf, PSL_IS);
268 1.2 matt const bool usertrap = usertrap_p(tf);
269 1.2 matt
270 1.2 matt kpreempt_disable();
271 1.2 matt struct cpu_info * const ci = curcpu();
272 1.2 matt
273 1.2 matt if (usertrap)
274 1.2 matt ci->ci_ev_isi.ev_count++;
275 1.2 matt else
276 1.2 matt ci->ci_ev_kisi.ev_count++;
277 1.2 matt
278 1.2 matt /*
279 1.2 matt * If we had a TLB entry (which we must have had to get this exception),
280 1.2 matt * we certainly have a PTE.
281 1.2 matt */
282 1.2 matt pt_entry_t * const ptep = trap_pte_lookup(tf, trunc_page(faultva),
283 1.2 matt PSL_IS);
284 1.2 matt if (ptep == NULL)
285 1.2 matt dump_trapframe(tf);
286 1.2 matt KASSERT(ptep != NULL);
287 1.2 matt pt_entry_t pte = *ptep;
288 1.2 matt
289 1.2 matt if ((pte & PTE_UNSYNCED) == PTE_UNSYNCED) {
290 1.2 matt const paddr_t pa = pte_to_paddr(pte);
291 1.2 matt struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
292 1.2 matt KASSERT(pg);
293 1.2 matt
294 1.2 matt if (!VM_PAGE_MD_EXECPAGE_P(pg)) {
295 1.2 matt ci->ci_softc->cpu_ev_exec_trap_sync.ev_count++;
296 1.2 matt dcache_wb_page(pa);
297 1.2 matt icache_inv_page(pa);
298 1.2 matt pmap_page_set_attributes(pg, VM_PAGE_MD_EXECPAGE);
299 1.2 matt }
300 1.2 matt pte &= ~PTE_UNSYNCED;
301 1.2 matt pte |= PTE_xX;
302 1.2 matt *ptep = pte;
303 1.2 matt pmap_tlb_update_addr(faultmap->pmap, trunc_page(faultva),
304 1.2 matt pte, 0);
305 1.2 matt kpreempt_enable();
306 1.2 matt return false;
307 1.2 matt }
308 1.2 matt kpreempt_enable();
309 1.2 matt
310 1.2 matt int rv = pagefault(faultmap, faultva, VM_PROT_READ|VM_PROT_EXECUTE,
311 1.2 matt usertrap);
312 1.2 matt
313 1.2 matt if (__predict_false(rv != 0 && usertrap)) {
314 1.2 matt /*
315 1.2 matt * We can't get a MAPERR here since
316 1.2 matt * that's a different exception.
317 1.2 matt */
318 1.2 matt ci->ci_ev_isi_fatal.ev_count++;
319 1.2 matt KSI_INIT_TRAP(ksi);
320 1.2 matt ksi->ksi_signo = SIGSEGV;
321 1.2 matt ksi->ksi_trap = EXC_ISI;
322 1.2 matt ksi->ksi_code = SEGV_ACCERR;
323 1.2 matt ksi->ksi_addr = (void *)tf->tf_srr0; /* not truncated */
324 1.2 matt }
325 1.2 matt return rv;
326 1.2 matt }
327 1.2 matt
328 1.2 matt static int
329 1.2 matt dtlb_exception(struct trapframe *tf, ksiginfo_t *ksi)
330 1.2 matt {
331 1.2 matt const vaddr_t faultva = tf->tf_dear;
332 1.2 matt const vm_prot_t ftype = get_faulttype(tf);
333 1.2 matt struct vm_map * const faultmap = get_faultmap(tf, PSL_DS);
334 1.2 matt struct cpu_info * const ci = curcpu();
335 1.2 matt const bool usertrap = usertrap_p(tf);
336 1.2 matt
337 1.2 matt #if 0
338 1.2 matt /*
339 1.2 matt * This is what pte_load in trap_subr.S does for us.
340 1.2 matt */
341 1.2 matt const pt_entry_t * const ptep =
342 1.2 matt trap_pte_lookup(tf, trunc_page(faultva), PSL_DS);
343 1.2 matt if (ptep != NULL && !usertrap && pte_valid_p(*ptep)) {
344 1.2 matt tlb_update_addr(trunc_page(faultva), KERNEL_PID, *ptep, true);
345 1.2 matt ci->ci_ev_tlbmiss_soft.ev_count++;
346 1.2 matt return 0;
347 1.2 matt }
348 1.2 matt #endif
349 1.2 matt
350 1.2 matt ci->ci_ev_dtlbmiss_hard.ev_count++;
351 1.2 matt
352 1.2 matt // printf("pagefault(%p,%#lx,%u,%u)", faultmap, faultva, ftype, usertrap);
353 1.2 matt int rv = pagefault(faultmap, faultva, ftype, usertrap);
354 1.2 matt // printf(": %d\n", rv);
355 1.2 matt
356 1.2 matt if (__predict_false(rv != 0 && usertrap)) {
357 1.2 matt ci->ci_ev_udsi_fatal.ev_count++;
358 1.2 matt KSI_INIT_TRAP(ksi);
359 1.2 matt ksi->ksi_signo = SIGSEGV;
360 1.2 matt ksi->ksi_trap = EXC_DSI;
361 1.2 matt ksi->ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
362 1.2 matt ksi->ksi_addr = (void *)faultva;
363 1.2 matt }
364 1.2 matt return rv;
365 1.2 matt }
366 1.2 matt
367 1.2 matt static int
368 1.2 matt itlb_exception(struct trapframe *tf, ksiginfo_t *ksi)
369 1.2 matt {
370 1.2 matt struct vm_map * const faultmap = get_faultmap(tf, PSL_IS);
371 1.2 matt const vaddr_t faultva = tf->tf_srr0;
372 1.2 matt struct cpu_info * const ci = curcpu();
373 1.2 matt const bool usertrap = usertrap_p(tf);
374 1.2 matt
375 1.2 matt ci->ci_ev_itlbmiss_hard.ev_count++;
376 1.2 matt
377 1.2 matt int rv = pagefault(faultmap, faultva, VM_PROT_READ|VM_PROT_EXECUTE,
378 1.2 matt usertrap);
379 1.2 matt
380 1.2 matt if (__predict_false(rv != 0 && usertrap)) {
381 1.2 matt ci->ci_ev_isi_fatal.ev_count++;
382 1.2 matt KSI_INIT_TRAP(ksi);
383 1.2 matt ksi->ksi_signo = SIGSEGV;
384 1.2 matt ksi->ksi_trap = EXC_ISI;
385 1.2 matt ksi->ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
386 1.2 matt ksi->ksi_addr = (void *)tf->tf_srr0;
387 1.2 matt }
388 1.2 matt return rv;
389 1.2 matt }
390 1.2 matt
391 1.2 matt static int
392 1.2 matt spe_exception(struct trapframe *tf, ksiginfo_t *ksi)
393 1.2 matt {
394 1.2 matt struct cpu_info * const ci = curcpu();
395 1.2 matt
396 1.2 matt if (!usertrap_p(tf))
397 1.2 matt return EPERM;
398 1.2 matt
399 1.2 matt ci->ci_ev_vec.ev_count++;
400 1.2 matt
401 1.2 matt #ifdef PPC_HAVE_SPE
402 1.2 matt vec_enable();
403 1.2 matt return 0;
404 1.2 matt #else
405 1.2 matt KSI_INIT_TRAP(ksi);
406 1.2 matt ksi->ksi_signo = SIGILL;
407 1.2 matt ksi->ksi_trap = EXC_PGM;
408 1.2 matt ksi->ksi_code = ILL_ILLOPC;
409 1.2 matt ksi->ksi_addr = (void *)tf->tf_srr0;
410 1.2 matt return EPERM;
411 1.2 matt #endif
412 1.2 matt }
413 1.2 matt
414 1.2 matt static bool
415 1.2 matt emulate_opcode(struct trapframe *tf, ksiginfo_t *ksi)
416 1.2 matt {
417 1.2 matt uint32_t opcode;
418 1.2 matt if (copyin((void *)tf->tf_srr0, &opcode, sizeof(opcode)) != 0)
419 1.2 matt return false;
420 1.2 matt
421 1.2 matt if (opcode == OPC_LWSYNC)
422 1.2 matt return true;
423 1.2 matt
424 1.2 matt if (OPC_MFSPR_P(opcode, SPR_PVR)) {
425 1.2 matt __asm ("mfpvr %0" : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)]));
426 1.2 matt return true;
427 1.2 matt }
428 1.2 matt
429 1.2 matt /*
430 1.2 matt * If we bothered to emulate FP, we would try to do so here.
431 1.2 matt */
432 1.2 matt return false;
433 1.2 matt }
434 1.2 matt
435 1.2 matt static int
436 1.2 matt pgm_exception(struct trapframe *tf, ksiginfo_t *ksi)
437 1.2 matt {
438 1.2 matt struct cpu_info * const ci = curcpu();
439 1.2 matt int rv = EPERM;
440 1.2 matt
441 1.2 matt if (!usertrap_p(tf))
442 1.2 matt return rv;
443 1.2 matt
444 1.2 matt ci->ci_ev_pgm.ev_count++;
445 1.2 matt
446 1.2 matt if (tf->tf_esr & ESR_PTR) {
447 1.2 matt struct proc *p = curlwp->l_proc;
448 1.2 matt if (p->p_raslist != NULL
449 1.2 matt && ras_lookup(p, (void *)tf->tf_srr0) != (void *) -1) {
450 1.2 matt tf->tf_srr0 += 4;
451 1.2 matt return 0;
452 1.2 matt }
453 1.2 matt } else if (tf->tf_esr & (ESR_PIL|ESR_PPR)) {
454 1.2 matt if (emulate_opcode(tf, ksi)) {
455 1.2 matt tf->tf_srr0 += 4;
456 1.2 matt return 0;
457 1.2 matt }
458 1.2 matt }
459 1.2 matt
460 1.2 matt KSI_INIT_TRAP(ksi);
461 1.2 matt ksi->ksi_signo = SIGILL;
462 1.2 matt ksi->ksi_trap = EXC_PGM;
463 1.2 matt if (tf->tf_esr & ESR_PIL)
464 1.2 matt ksi->ksi_code = ILL_ILLOPC;
465 1.2 matt else if (tf->tf_esr & ESR_PPR)
466 1.2 matt ksi->ksi_code = ILL_PRVOPC;
467 1.2 matt else if (tf->tf_esr & ESR_PTR)
468 1.2 matt ksi->ksi_code = ILL_ILLTRP;
469 1.2 matt else
470 1.2 matt ksi->ksi_code = 0;
471 1.2 matt ksi->ksi_addr = (void *)tf->tf_srr0;
472 1.2 matt return rv;
473 1.2 matt }
474 1.2 matt
475 1.2 matt static int
476 1.2 matt ali_exception(struct trapframe *tf, ksiginfo_t *ksi)
477 1.2 matt {
478 1.2 matt struct cpu_info * const ci = curcpu();
479 1.2 matt int rv = EFAULT;
480 1.2 matt
481 1.2 matt ci->ci_ev_ali.ev_count++;
482 1.2 matt
483 1.2 matt if (rv != 0 && usertrap_p(tf)) {
484 1.2 matt ci->ci_ev_ali_fatal.ev_count++;
485 1.2 matt KSI_INIT_TRAP(ksi);
486 1.2 matt ksi->ksi_signo = SIGILL;
487 1.2 matt ksi->ksi_trap = EXC_PGM;
488 1.2 matt if (tf->tf_esr & ESR_PIL)
489 1.2 matt ksi->ksi_code = ILL_ILLOPC;
490 1.2 matt else if (tf->tf_esr & ESR_PPR)
491 1.2 matt ksi->ksi_code = ILL_PRVOPC;
492 1.2 matt else if (tf->tf_esr & ESR_PTR)
493 1.2 matt ksi->ksi_code = ILL_ILLTRP;
494 1.2 matt else
495 1.2 matt ksi->ksi_code = 0;
496 1.2 matt ksi->ksi_addr = (void *)tf->tf_srr0;
497 1.2 matt }
498 1.2 matt return rv;
499 1.2 matt }
500 1.2 matt
501 1.2 matt static int
502 1.2 matt embedded_fp_data_exception(struct trapframe *tf, ksiginfo_t *ksi)
503 1.2 matt {
504 1.2 matt struct cpu_info * const ci = curcpu();
505 1.2 matt int rv = EFAULT;
506 1.2 matt
507 1.2 matt ci->ci_ev_fpu.ev_count++;
508 1.2 matt
509 1.2 matt if (rv != 0 && usertrap_p(tf)) {
510 1.2 matt KSI_INIT_TRAP(ksi);
511 1.2 matt #ifdef PPC_HAVE_SPE
512 1.2 matt ksi->ksi_signo = SIGFPE;
513 1.2 matt ksi->ksi_trap = tf->tf_exc;
514 1.2 matt ksi->ksi_code = vec_siginfo_code(tf);
515 1.2 matt #else
516 1.2 matt ksi->ksi_signo = SIGILL;
517 1.2 matt ksi->ksi_trap = EXC_PGM;
518 1.2 matt ksi->ksi_code = ILL_ILLOPC;
519 1.2 matt #endif
520 1.2 matt ksi->ksi_addr = (void *)tf->tf_srr0;
521 1.2 matt }
522 1.2 matt return rv;
523 1.2 matt }
524 1.2 matt
525 1.2 matt static int
526 1.2 matt embedded_fp_round_exception(struct trapframe *tf, ksiginfo_t *ksi)
527 1.2 matt {
528 1.2 matt struct cpu_info * const ci = curcpu();
529 1.2 matt int rv = EDOM;
530 1.2 matt
531 1.2 matt ci->ci_ev_fpu.ev_count++;
532 1.2 matt
533 1.2 matt if (rv != 0 && usertrap_p(tf)) {
534 1.2 matt KSI_INIT_TRAP(ksi);
535 1.2 matt #ifdef PPC_HAVE_SPE
536 1.2 matt ksi->ksi_signo = SIGFPE;
537 1.2 matt ksi->ksi_trap = tf->tf_exc;
538 1.2 matt ksi->ksi_code = vec_siginfo_code(tf);
539 1.2 matt #else
540 1.2 matt ksi->ksi_signo = SIGILL;
541 1.2 matt ksi->ksi_trap = EXC_PGM;
542 1.2 matt ksi->ksi_code = ILL_ILLOPC;
543 1.2 matt #endif
544 1.2 matt ksi->ksi_addr = (void *)tf->tf_srr0;
545 1.2 matt }
546 1.2 matt return rv;
547 1.2 matt }
548 1.2 matt
549 1.2 matt static void
550 1.2 matt dump_trapframe(const struct trapframe *tf)
551 1.2 matt {
552 1.2 matt printf("trapframe %p (exc=%x srr0/1=%#lx/%#lx esr/dear=%#x/%#lx)\n",
553 1.2 matt tf, tf->tf_exc, tf->tf_srr0, tf->tf_srr1, tf->tf_esr, tf->tf_dear);
554 1.2 matt printf("lr =%08lx ctr=%08lx cr =%08x xer=%08x\n",
555 1.2 matt tf->tf_lr, tf->tf_ctr, tf->tf_cr, tf->tf_xer);
556 1.2 matt for (u_int r = 0; r < 32; r += 4) {
557 1.2 matt printf("r%02u=%08lx r%02u=%08lx r%02u=%08lx r%02u=%08lx\n",
558 1.2 matt r+0, tf->tf_fixreg[r+0], r+1, tf->tf_fixreg[r+1],
559 1.2 matt r+2, tf->tf_fixreg[r+2], r+3, tf->tf_fixreg[r+3]);
560 1.2 matt }
561 1.2 matt }
562 1.2 matt static bool
563 1.2 matt ddb_exception(struct trapframe *tf)
564 1.2 matt {
565 1.2 matt #if 0
566 1.2 matt const register_t ddb_trapfunc = (uintptr_t) cpu_Debugger;
567 1.2 matt if ((tf->tf_esr & ESR_PTR) == 0)
568 1.2 matt return false;
569 1.2 matt if (ddb_trapfunc <= tf->tf_srr0 && tf->tf_srr0 <= ddb_trapfunc+16) {
570 1.2 matt register_t srr0 = tf->tf_srr0;
571 1.2 matt if (kdb_trap(tf->tf_exc, tf)) {
572 1.2 matt if (srr0 == tf->tf_srr0)
573 1.2 matt tf->tf_srr0 += 4;
574 1.2 matt return true;
575 1.2 matt }
576 1.2 matt }
577 1.2 matt return false;
578 1.2 matt #else
579 1.2 matt struct cpu_info * const ci = curcpu();
580 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
581 1.2 matt printf("CPL stack:");
582 1.2 matt if (ci->ci_idepth >= 0) {
583 1.2 matt for (u_int i = 0; i <= ci->ci_idepth; i++) {
584 1.2 matt printf(" [%u]=%u", i, cpu->cpu_pcpls[i]);
585 1.2 matt }
586 1.2 matt }
587 1.2 matt printf(" %u\n", ci->ci_cpl);
588 1.2 matt dump_trapframe(tf);
589 1.2 matt if (kdb_trap(tf->tf_exc, tf)) {
590 1.2 matt tf->tf_srr0 += 4;
591 1.2 matt return true;
592 1.2 matt }
593 1.2 matt return false;
594 1.2 matt #endif
595 1.2 matt }
596 1.2 matt
597 1.2 matt static bool
598 1.2 matt onfaulted(struct trapframe *tf, register_t rv)
599 1.2 matt {
600 1.2 matt struct lwp * const l = curlwp;
601 1.2 matt struct pcb * const pcb = lwp_getpcb(l);
602 1.2 matt struct faultbuf * const fb = pcb->pcb_onfault;
603 1.2 matt if (fb == NULL)
604 1.2 matt return false;
605 1.2 matt tf->tf_srr0 = fb->fb_pc;
606 1.2 matt tf->tf_srr1 = fb->fb_msr;
607 1.2 matt tf->tf_cr = fb->fb_cr;
608 1.2 matt tf->tf_fixreg[1] = fb->fb_sp;
609 1.2 matt tf->tf_fixreg[2] = fb->fb_r2;
610 1.2 matt tf->tf_fixreg[3] = rv;
611 1.2 matt pcb->pcb_onfault = NULL;
612 1.2 matt return true;
613 1.2 matt }
614 1.2 matt
615 1.2 matt void
616 1.2 matt trap(enum ppc_booke_exceptions trap_code, struct trapframe *tf)
617 1.2 matt {
618 1.2 matt const bool usertrap = usertrap_p(tf);
619 1.2 matt struct cpu_info * const ci = curcpu();
620 1.2 matt struct lwp * const l = curlwp;
621 1.2 matt struct proc * const p = l->l_proc;
622 1.2 matt ksiginfo_t ksi;
623 1.2 matt int rv = EACCES;
624 1.2 matt
625 1.2 matt ci->ci_ev_traps.ev_count++;
626 1.2 matt ci->ci_data.cpu_ntrap++;
627 1.2 matt
628 1.2 matt KASSERTMSG(!usertrap || tf == trapframe(l),
629 1.2 matt ("trap: tf=%p is invalid: trapframe(%p)=%p", tf, l, trapframe(l)));
630 1.2 matt
631 1.2 matt #if 0
632 1.2 matt if (trap_code != T_PROGRAM || usertrap)
633 1.2 matt printf("trap(enter): %s (tf=%p, esr/dear=%#x/%#lx, srr0/1=%#lx/%#lx, lr=%#lx)\n",
634 1.2 matt trap_names[trap_code], tf, tf->tf_esr, tf->tf_dear,
635 1.2 matt tf->tf_srr0, tf->tf_srr1, tf->tf_lr);
636 1.2 matt #endif
637 1.2 matt #if 0
638 1.2 matt if ((register_t)tf >= (register_t)l->l_addr + USPACE
639 1.2 matt || (register_t)tf < (register_t)l->l_addr + PAGE_SIZE) {
640 1.2 matt printf("%s(entry): pid %d.%d (%s): invalid tf addr %p\n",
641 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm, tf);
642 1.2 matt dump_trapframe(tf);
643 1.2 matt Debugger();
644 1.2 matt }
645 1.2 matt #endif
646 1.2 matt #if 0
647 1.2 matt if ((mfmsr() & PSL_CE) == 0) {
648 1.2 matt printf("%s(entry): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
649 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
650 1.2 matt trap_names[trap_code], mfmsr());
651 1.2 matt dump_trapframe(tf);
652 1.2 matt }
653 1.2 matt #endif
654 1.2 matt
655 1.2 matt if (usertrap && (tf->tf_fixreg[1] & 0x80000000)) {
656 1.2 matt printf("%s(entry): pid %d.%d (%s): %s invalid sp %#lx (sprg1=%#lx)\n",
657 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
658 1.2 matt trap_names[trap_code], tf->tf_fixreg[1], mfspr(SPR_SPRG1));
659 1.2 matt dump_trapframe(tf);
660 1.2 matt Debugger();
661 1.2 matt }
662 1.2 matt
663 1.2 matt if (usertrap && (tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
664 1.2 matt printf("%s(entry): pid %d.%d (%s): %s invalid PSL %#lx\n",
665 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
666 1.2 matt trap_names[trap_code], tf->tf_srr1);
667 1.2 matt dump_trapframe(tf);
668 1.2 matt Debugger();
669 1.2 matt }
670 1.2 matt
671 1.2 matt switch (trap_code) {
672 1.2 matt case T_CRITIAL_INPUT:
673 1.2 matt case T_EXTERNAL_INPUT:
674 1.2 matt case T_DECREMENTER:
675 1.2 matt case T_FIXED_INTERVAL:
676 1.2 matt case T_WATCHDOG:
677 1.2 matt case T_SYSTEM_CALL:
678 1.2 matt default:
679 1.2 matt panic("trap: unexcepted trap code %d! (tf=%p, srr0/1=%#lx/%#lx)",
680 1.2 matt trap_code, tf, tf->tf_srr0, tf->tf_srr1);
681 1.2 matt case T_MACHINE_CHECK:
682 1.2 matt rv = mchk_exception(tf, &ksi);
683 1.2 matt break;
684 1.2 matt case T_DSI:
685 1.2 matt rv = dsi_exception(tf, &ksi);
686 1.2 matt break;
687 1.2 matt case T_ISI:
688 1.2 matt rv = isi_exception(tf, &ksi);
689 1.2 matt break;
690 1.2 matt case T_ALIGNMENT:
691 1.2 matt rv = ali_exception(tf, &ksi);
692 1.2 matt break;
693 1.2 matt case T_SPE_UNAVAILABLE:
694 1.2 matt rv = spe_exception(tf, &ksi);
695 1.2 matt break;
696 1.2 matt case T_PROGRAM:
697 1.2 matt #ifdef DDB
698 1.2 matt if (!usertrap && ddb_exception(tf))
699 1.2 matt return;
700 1.2 matt #endif
701 1.2 matt rv = pgm_exception(tf, &ksi);
702 1.2 matt break;
703 1.2 matt case T_FP_UNAVAILABLE:
704 1.2 matt case T_AP_UNAVAILABLE:
705 1.2 matt panic("trap: unexcepted trap code %d! (tf=%p, srr0/1=%#lx/%#lx)",
706 1.2 matt trap_code, tf, tf->tf_srr0, tf->tf_srr1);
707 1.2 matt case T_DATA_TLB_ERROR:
708 1.2 matt rv = dtlb_exception(tf, &ksi);
709 1.2 matt break;
710 1.2 matt case T_INSTRUCTION_TLB_ERROR:
711 1.2 matt rv = itlb_exception(tf, &ksi);
712 1.2 matt break;
713 1.2 matt case T_DEBUG:
714 1.2 matt case T_EMBEDDED_FP_DATA:
715 1.2 matt rv = embedded_fp_data_exception(tf, &ksi);
716 1.2 matt break;
717 1.2 matt case T_EMBEDDED_FP_ROUND:
718 1.2 matt rv = embedded_fp_round_exception(tf, &ksi);
719 1.2 matt break;
720 1.2 matt case T_EMBEDDED_PERF_MONITOR:
721 1.2 matt //db_stack_trace_print(tf->tf_fixreg[1], true, 40, "", printf);
722 1.2 matt dump_trapframe(tf);
723 1.2 matt rv = EPERM;
724 1.2 matt break;
725 1.2 matt case T_AST:
726 1.2 matt KASSERT(usertrap);
727 1.2 matt ci->ci_astpending = 0; /* we are about to do it */
728 1.2 matt ci->ci_data.cpu_nsoft++;
729 1.2 matt if (l->l_pflag & LP_OWEUPC) {
730 1.2 matt l->l_pflag &= ~LP_OWEUPC;
731 1.2 matt ADDUPROF(l);
732 1.2 matt }
733 1.2 matt /* Check whether we are being preempted. */
734 1.2 matt if (ci->ci_want_resched)
735 1.2 matt preempt();
736 1.2 matt if (tf->tf_fixreg[1] & 0x80000000) {
737 1.2 matt printf("%s(ast-exit): pid %d.%d (%s): invalid sp %#lx\n",
738 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
739 1.2 matt tf->tf_fixreg[1]);
740 1.2 matt dump_trapframe(tf);
741 1.2 matt Debugger();
742 1.2 matt }
743 1.2 matt if ((tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
744 1.2 matt printf("%s(entry): pid %d.%d (%s): %s invalid PSL %#lx\n",
745 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
746 1.2 matt trap_names[trap_code], tf->tf_srr1);
747 1.2 matt dump_trapframe(tf);
748 1.2 matt Debugger();
749 1.2 matt }
750 1.2 matt #if 0
751 1.2 matt if ((mfmsr() & PSL_CE) == 0) {
752 1.2 matt printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
753 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
754 1.2 matt trap_names[trap_code], mfmsr());
755 1.2 matt dump_trapframe(tf);
756 1.2 matt }
757 1.2 matt #endif
758 1.2 matt userret(l, tf);
759 1.2 matt return;
760 1.2 matt }
761 1.2 matt if (!usertrap) {
762 1.2 matt if (rv != 0) {
763 1.2 matt if (!onfaulted(tf, rv)) {
764 1.2 matt db_stack_trace_print(tf->tf_fixreg[1], true, 40, "", printf);
765 1.2 matt dump_trapframe(tf);
766 1.2 matt panic("%s: pid %d.%d (%s): %s exception in kernel mode"
767 1.2 matt " (tf=%p, dear=%#lx, esr=%#x,"
768 1.2 matt " srr0/1=%#lx/%#lx)",
769 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
770 1.2 matt trap_names[trap_code], tf, tf->tf_dear,
771 1.2 matt tf->tf_esr, tf->tf_srr0, tf->tf_srr1);
772 1.2 matt }
773 1.2 matt }
774 1.2 matt #if 0
775 1.2 matt if (tf->tf_fixreg[1] >= (register_t)l->l_addr + USPACE
776 1.2 matt || tf->tf_fixreg[1] < (register_t)l->l_addr + PAGE_SIZE) {
777 1.2 matt printf("%s(exit): pid %d.%d (%s): invalid kern sp %#lx\n",
778 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
779 1.2 matt tf->tf_fixreg[1]);
780 1.2 matt dump_trapframe(tf);
781 1.2 matt Debugger();
782 1.2 matt }
783 1.2 matt #endif
784 1.2 matt #if 0
785 1.2 matt if ((mfmsr() & PSL_CE) == 0) {
786 1.2 matt printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
787 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
788 1.2 matt trap_names[trap_code], mfmsr());
789 1.2 matt mtmsr(mfmsr()|PSL_CE);
790 1.2 matt dump_trapframe(tf);
791 1.2 matt }
792 1.2 matt #endif
793 1.2 matt } else {
794 1.2 matt if (rv == ENOMEM) {
795 1.2 matt printf("UVM: pid %d.%d (%s), uid %d killed: "
796 1.2 matt "out of swap\n",
797 1.2 matt p->p_pid, l->l_lid, p->p_comm,
798 1.2 matt l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1);
799 1.2 matt ksi.ksi_signo = SIGKILL;
800 1.2 matt }
801 1.2 matt if (rv != 0) {
802 1.2 matt if (cpu_printfataltraps) {
803 1.2 matt printf("%s: pid %d.%d (%s):"
804 1.2 matt " %s exception in user mode\n",
805 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
806 1.2 matt trap_names[trap_code]);
807 1.2 matt if (cpu_printfataltraps > 1)
808 1.2 matt dump_trapframe(tf);
809 1.2 matt }
810 1.2 matt (*p->p_emul->e_trapsignal)(l, &ksi);
811 1.2 matt }
812 1.2 matt #ifdef DEBUG
813 1.2 matt if ((tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
814 1.2 matt printf("%s(exit): pid %d.%d (%s): %s invalid PSL %#lx\n",
815 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
816 1.2 matt trap_names[trap_code], tf->tf_srr1);
817 1.2 matt dump_trapframe(tf);
818 1.2 matt Debugger();
819 1.2 matt }
820 1.2 matt #endif
821 1.2 matt #if 0
822 1.2 matt if ((mfmsr() & PSL_CE) == 0) {
823 1.2 matt printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
824 1.2 matt __func__, p->p_pid, l->l_lid, p->p_comm,
825 1.2 matt trap_names[trap_code], mfmsr());
826 1.2 matt dump_trapframe(tf);
827 1.2 matt }
828 1.2 matt #endif
829 1.2 matt userret(l, tf);
830 1.2 matt }
831 1.2 matt }
832 1.2 matt
833 1.2 matt void
834 1.2 matt upcallret(struct lwp *l)
835 1.2 matt {
836 1.2 matt
837 1.2 matt mi_userret(l); /* Invoke MI userret code */
838 1.2 matt }
839 1.2 matt
840 1.2 matt /*
841 1.2 matt * Start a new LWP
842 1.2 matt */
843 1.2 matt void
844 1.2 matt startlwp(void *arg)
845 1.2 matt {
846 1.2 matt ucontext_t * const uc = arg;
847 1.2 matt struct lwp * const l = curlwp;
848 1.2 matt
849 1.2 matt int error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
850 1.2 matt KASSERT(error);
851 1.2 matt (void)error;
852 1.2 matt kmem_free(uc, sizeof(ucontext_t));
853 1.2 matt upcallret(l);
854 1.2 matt }
855