trap.c revision 1.18 1 /* $NetBSD: trap.c,v 1.18 2003/09/26 00:00:17 eeh Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.18 2003/09/26 00:00:17 eeh Exp $");
71
72 #include "opt_altivec.h"
73 #include "opt_ddb.h"
74 #include "opt_ktrace.h"
75 #include "opt_systrace.h"
76 #include "opt_syscall_debug.h"
77
78 #include <sys/param.h>
79 #include <sys/proc.h>
80 #include <sys/reboot.h>
81 #include <sys/syscall.h>
82 #include <sys/systm.h>
83 #include <sys/user.h>
84 #ifdef KTRACE
85 #include <sys/ktrace.h>
86 #endif
87 #include <sys/pool.h>
88 #include <sys/sa.h>
89 #include <sys/savar.h>
90 #ifdef SYSTRACE
91 #include <sys/systrace.h>
92 #endif
93
94 #include <uvm/uvm_extern.h>
95
96 #include <dev/cons.h>
97
98 #include <machine/cpu.h>
99 #include <machine/db_machdep.h>
100 #include <machine/fpu.h>
101 #include <machine/frame.h>
102 #include <machine/pcb.h>
103 #include <machine/psl.h>
104 #include <machine/trap.h>
105
106 #include <powerpc/spr.h>
107 #include <powerpc/ibm4xx/pmap.h>
108 #include <powerpc/ibm4xx/tlb.h>
109 #include <powerpc/fpu/fpu_extern.h>
110
111 /* These definitions should probably be somewhere else XXX */
112 #define FIRSTARG 3 /* first argument is in reg 3 */
113 #define NARGREG 8 /* 8 args are in registers */
114 #define MOREARGS(sp) ((caddr_t)((int)(sp) + 8)) /* more args go here */
115
116 static int fix_unaligned __P((struct lwp *l, struct trapframe *frame));
117
118 void trap __P((struct trapframe *)); /* Called from locore / trap_subr */
119 /* Why are these not defined in a header? */
120 int badaddr __P((void *, size_t));
121 int badaddr_read __P((void *, size_t, int *));
122 int ctx_setup __P((int, int));
123
124 #ifdef DEBUG
125 #define TDB_ALL 0x1
126 int trapdebug = /* TDB_ALL */ 0;
127 #define DBPRINTF(x, y) if (trapdebug & (x)) printf y
128 #else
129 #define DBPRINTF(x, y)
130 #endif
131
132 void
133 trap(struct trapframe *frame)
134 {
135 struct lwp *l = curlwp;
136 struct proc *p = l ? l->l_proc : NULL;
137 int type = frame->exc;
138 int ftype, rv;
139 ksiginfo_t ksi;
140
141 KASSERT(l == 0 || (l->l_stat == LSONPROC));
142
143 if (frame->srr1 & PSL_PR)
144 type |= EXC_USER;
145
146 ftype = VM_PROT_READ;
147
148 DBPRINTF(TDB_ALL, ("trap(%x) at %lx from frame %p &frame %p\n",
149 type, frame->srr0, frame, &frame));
150
151 switch (type) {
152 case EXC_DEBUG|EXC_USER:
153 {
154 int srr2, srr3;
155
156 __asm __volatile("mfspr %0,0x3f0" :
157 "=r" (rv), "=r" (srr2), "=r" (srr3) :);
158 printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2,
159 srr3);
160 /* XXX fall through or break here?! */
161 }
162 /*
163 * DEBUG intr -- probably single-step.
164 */
165 case EXC_TRC|EXC_USER:
166 frame->srr1 &= ~PSL_SE;
167 memset(&ksi, 0, sizeof(ksi));
168 ksi.ksi_signo = SIGTRAP;
169 ksi.ksi_trap = EXC_TRC;
170 ksi.ksi_addr = (void *)frame->srr0;
171 KERNEL_PROC_LOCK(l);
172 trapsignal(l, &ksi);
173 KERNEL_PROC_UNLOCK(l);
174 break;
175
176 /*
177 * If we could not find and install appropriate TLB entry, fall through.
178 */
179
180 case EXC_DSI:
181 /* FALLTHROUGH */
182 case EXC_DTMISS:
183 {
184 struct vm_map *map;
185 vaddr_t va;
186 struct faultbuf *fb = NULL;
187
188 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
189 va = frame->dar;
190 if (frame->tf_xtra[TF_PID] == KERNEL_PID) {
191 map = kernel_map;
192 } else {
193 map = &p->p_vmspace->vm_map;
194 if (l->l_flag & L_SA) {
195 KDASSERT(p != NULL && p->p_sa != NULL);
196 p->p_sa->sa_vp_faultaddr = va;
197 l->l_flag |= L_SA_PAGEFAULT;
198 }
199 }
200
201 if (frame->tf_xtra[TF_ESR] & (ESR_DST|ESR_DIZ))
202 ftype = VM_PROT_WRITE;
203
204 DBPRINTF(TDB_ALL,
205 ("trap(EXC_DSI) at %lx %s fault on %p esr %x\n",
206 frame->srr0,
207 (ftype & VM_PROT_WRITE) ? "write" : "read",
208 (void *)va, frame->tf_xtra[TF_ESR]));
209 rv = uvm_fault(map, trunc_page(va), 0, ftype);
210 KERNEL_UNLOCK();
211 if (map != kernel_map)
212 l->l_flag &= ~L_SA_PAGEFAULT;
213 if (rv == 0)
214 goto done;
215 if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
216 frame->tf_xtra[TF_PID] = KERNEL_PID;
217 frame->srr0 = fb->fb_pc;
218 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
219 frame->fixreg[1] = fb->fb_sp;
220 frame->fixreg[2] = fb->fb_r2;
221 frame->fixreg[3] = 1; /* Return TRUE */
222 frame->cr = fb->fb_cr;
223 memcpy(&frame->fixreg[13], fb->fb_fixreg,
224 sizeof(fb->fb_fixreg));
225 goto done;
226 }
227 }
228 goto brain_damage;
229
230 case EXC_DSI|EXC_USER:
231 /* FALLTHROUGH */
232 case EXC_DTMISS|EXC_USER:
233 KERNEL_PROC_LOCK(l);
234
235 if (frame->tf_xtra[TF_ESR] & (ESR_DST|ESR_DIZ))
236 ftype = VM_PROT_WRITE;
237
238 DBPRINTF(TDB_ALL,
239 ("trap(EXC_DSI|EXC_USER) at %lx %s fault on %lx %x\n",
240 frame->srr0, (ftype & VM_PROT_WRITE) ? "write" : "read",
241 frame->dar, frame->tf_xtra[TF_ESR]));
242 KASSERT(l == curlwp && (l->l_stat == LSONPROC));
243 if (l->l_flag & L_SA) {
244 KDASSERT(p != NULL && p->p_sa != NULL);
245 p->p_sa->sa_vp_faultaddr = (vaddr_t)frame->dar;
246 l->l_flag |= L_SA_PAGEFAULT;
247 }
248 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->dar),
249 0, ftype);
250 if (rv == 0) {
251 l->l_flag &= ~L_SA_PAGEFAULT;
252 KERNEL_PROC_UNLOCK(l);
253 break;
254 }
255 memset(&ksi, 0, sizeof(ksi));
256 ksi.ksi_signo = SIGSEGV;
257 ksi.ksi_trap = EXC_DSI;
258 ksi.ksi_addr = (void *)frame->dar;
259 if (rv == ENOMEM) {
260 printf("UVM: pid %d (%s) lid %d, uid %d killed: "
261 "out of swap\n",
262 p->p_pid, p->p_comm, l->l_lid,
263 p->p_cred && p->p_ucred ?
264 p->p_ucred->cr_uid : -1);
265 ksi.ksi_signo = SIGKILL;
266 }
267 trapsignal(l, &ksi);
268 l->l_flag &= ~L_SA_PAGEFAULT;
269 KERNEL_PROC_UNLOCK(l);
270 break;
271
272 case EXC_ITMISS|EXC_USER:
273 case EXC_ISI|EXC_USER:
274 KERNEL_PROC_LOCK(l);
275 if (l->l_flag & L_SA) {
276 KDASSERT(p != NULL && p->p_sa != NULL);
277 p->p_sa->sa_vp_faultaddr = (vaddr_t)frame->srr0;
278 l->l_flag |= L_SA_PAGEFAULT;
279 }
280 ftype = VM_PROT_EXECUTE;
281 DBPRINTF(TDB_ALL,
282 ("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n",
283 frame->srr0, frame));
284 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0),
285 0, ftype);
286 if (rv == 0) {
287 l->l_flag &= ~L_SA_PAGEFAULT;
288 KERNEL_PROC_UNLOCK(l);
289 break;
290 }
291 memset(&ksi, 0, sizeof(ksi));
292 ksi.ksi_signo = SIGSEGV;
293 ksi.ksi_trap = EXC_ISI;
294 ksi.ksi_addr = (void *)frame->srr0;
295 trapsignal(l, &ksi);
296 l->l_flag &= ~L_SA_PAGEFAULT;
297 KERNEL_PROC_UNLOCK(l);
298 break;
299
300 case EXC_AST|EXC_USER:
301 curcpu()->ci_astpending = 0; /* we are about to do it */
302 KERNEL_PROC_LOCK(l);
303 uvmexp.softs++;
304 if (p->p_flag & P_OWEUPC) {
305 p->p_flag &= ~P_OWEUPC;
306 ADDUPROF(p);
307 }
308 /* Check whether we are being preempted. */
309 if (curcpu()->ci_want_resched)
310 preempt(0);
311 KERNEL_PROC_UNLOCK(l);
312 break;
313
314
315 case EXC_ALI|EXC_USER:
316 KERNEL_PROC_LOCK(l);
317 if (fix_unaligned(l, frame) != 0) {
318 memset(&ksi, 0, sizeof(ksi));
319 ksi.ksi_signo = SIGBUS;
320 ksi.ksi_trap = EXC_ALI;
321 ksi.ksi_addr = (void *)frame->dar;
322 trapsignal(l, &ksi);
323 } else
324 frame->srr0 += 4;
325 KERNEL_PROC_UNLOCK(l);
326 break;
327
328 case EXC_PGM|EXC_USER:
329 /*
330 * Illegal insn:
331 *
332 * let's try to see if it's FPU and can be emulated.
333 */
334 uvmexp.traps ++;
335 if (!(l->l_addr->u_pcb.pcb_flags & PCB_FPU)) {
336 memset(&l->l_addr->u_pcb.pcb_fpu, 0,
337 sizeof l->l_addr->u_pcb.pcb_fpu);
338 l->l_addr->u_pcb.pcb_flags |= PCB_FPU;
339 }
340
341 if ((rv = fpu_emulate(frame,
342 (struct fpreg *)&l->l_addr->u_pcb.pcb_fpu))) {
343 memset(&ksi, 0, sizeof(ksi));
344 ksi.ksi_signo = rv;
345 ksi.ksi_trap = EXC_PGM;
346 ksi.ksi_addr = (void *)frame->srr0;
347 KERNEL_PROC_LOCK(l);
348 trapsignal(l, &ksi);
349 KERNEL_PROC_UNLOCK(l);
350 }
351 break;
352
353 case EXC_MCHK:
354 {
355 struct faultbuf *fb;
356
357 if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
358 frame->tf_xtra[TF_PID] = KERNEL_PID;
359 frame->srr0 = fb->fb_pc;
360 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
361 frame->fixreg[1] = fb->fb_sp;
362 frame->fixreg[2] = fb->fb_r2;
363 frame->fixreg[3] = 1; /* Return TRUE */
364 frame->cr = fb->fb_cr;
365 memcpy(&frame->fixreg[13], fb->fb_fixreg,
366 sizeof(fb->fb_fixreg));
367 goto done;
368 }
369 }
370 goto brain_damage;
371 default:
372 brain_damage:
373 printf("trap type 0x%x at 0x%lx\n", type, frame->srr0);
374 #ifdef DDB
375 if (kdb_trap(type, frame))
376 goto done;
377 #endif
378 #ifdef TRAP_PANICWAIT
379 printf("Press a key to panic.\n");
380 cngetc();
381 #endif
382 panic("trap");
383 }
384
385 /* Take pending signals. */
386 {
387 int sig;
388
389 while ((sig = CURSIG(l)) != 0)
390 postsig(sig);
391 }
392
393 /* Invoke per-process kernel-exit handling, if any */
394 if (p->p_userret)
395 (p->p_userret)(l, p->p_userret_arg);
396
397 /* Invoke any pending upcalls */
398 while (l->l_flag & L_SA_UPCALL)
399 sa_upcall_userret(l);
400
401 curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
402 done:
403 return;
404 }
405
406 int
407 ctx_setup(int ctx, int srr1)
408 {
409 volatile struct pmap *pm;
410
411 /* Update PID if we're returning to user mode. */
412 if (srr1 & PSL_PR) {
413 pm = curproc->p_vmspace->vm_map.pmap;
414 if (!pm->pm_ctx) {
415 ctx_alloc((struct pmap *)pm);
416 }
417 ctx = pm->pm_ctx;
418 if (srr1 & PSL_SE) {
419 int dbreg, mask = 0x48000000;
420 /*
421 * Set the Internal Debug and
422 * Instruction Completion bits of
423 * the DBCR0 register.
424 *
425 * XXX this is also used by jtag debuggers...
426 */
427 __asm __volatile("mfspr %0,0x3f2;"
428 "or %0,%0,%1;"
429 "mtspr 0x3f2,%0;" :
430 "=&r" (dbreg) : "r" (mask));
431 }
432 }
433 else if (!ctx) {
434 ctx = KERNEL_PID;
435 }
436 return (ctx);
437 }
438
439 /*
440 * Used by copyin()/copyout()
441 */
442 extern vaddr_t vmaprange __P((struct proc *, vaddr_t, vsize_t, int));
443 extern void vunmaprange __P((vaddr_t, vsize_t));
444 static int bigcopyin __P((const void *, void *, size_t ));
445 static int bigcopyout __P((const void *, void *, size_t ));
446
447 int
448 copyin(const void *udaddr, void *kaddr, size_t len)
449 {
450 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
451 int msr, pid, tmp, ctx;
452 struct faultbuf env;
453
454 /* For bigger buffers use the faster copy */
455 if (len > 256) return (bigcopyin(udaddr, kaddr, len));
456
457 if (setfault(&env)) {
458 curpcb->pcb_onfault = 0;
459 return EFAULT;
460 }
461
462 if (!(ctx = pm->pm_ctx)) {
463 /* No context -- assign it one */
464 ctx_alloc(pm);
465 ctx = pm->pm_ctx;
466 }
467
468 asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
469 "mfmsr %0;" /* Save MSR */
470 "li %1,0x20; "
471 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
472 "mfpid %1;" /* Save old PID */
473 "sync; isync;"
474
475 "1: bdz 2f;" /* while len */
476 "mtpid %3; sync;" /* Load user ctx */
477 "lbz %2,0(%4); addi %4,%4,1;" /* Load byte */
478 "sync; isync;"
479 "mtpid %1;sync;"
480 "stb %2,0(%5); dcbf 0,%5; addi %5,%5,1;" /* Store kernel byte */
481 "sync; isync;"
482 "b 1b;" /* repeat */
483
484 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
485 "sync; isync;"
486 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
487 : "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
488
489 curpcb->pcb_onfault = 0;
490 return 0;
491 }
492
493 static int
494 bigcopyin(const void *udaddr, void *kaddr, size_t len)
495 {
496 const char *up;
497 char *kp = kaddr;
498 struct lwp *l = curlwp;
499 struct proc *p;
500 int error;
501
502 if (!l) {
503 return EFAULT;
504 }
505
506 p = l->l_proc;
507
508 /*
509 * Stolen from physio():
510 */
511 PHOLD(l);
512 error = uvm_vslock(p, (caddr_t)udaddr, len, VM_PROT_READ);
513 if (error) {
514 PRELE(l);
515 return EFAULT;
516 }
517 up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ);
518
519 memcpy(kp, up, len);
520 vunmaprange((vaddr_t)up, len);
521 uvm_vsunlock(p, (caddr_t)udaddr, len);
522 PRELE(l);
523
524 return 0;
525 }
526
527 int
528 copyout(const void *kaddr, void *udaddr, size_t len)
529 {
530 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
531 int msr, pid, tmp, ctx;
532 struct faultbuf env;
533
534 /* For big copies use more efficient routine */
535 if (len > 256) return (bigcopyout(kaddr, udaddr, len));
536
537 if (setfault(&env)) {
538 curpcb->pcb_onfault = 0;
539 return EFAULT;
540 }
541
542 if (!(ctx = pm->pm_ctx)) {
543 /* No context -- assign it one */
544 ctx_alloc(pm);
545 ctx = pm->pm_ctx;
546 }
547
548 asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
549 "mfmsr %0;" /* Save MSR */
550 "li %1,0x20; "
551 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
552 "mfpid %1;" /* Save old PID */
553 "sync; isync;"
554
555 "1: bdz 2f;" /* while len */
556 "mtpid %1;sync;"
557 "lbz %2,0(%5); addi %5,%5,1;" /* Load kernel byte */
558 "sync; isync;"
559 "mtpid %3; sync;" /* Load user ctx */
560 "stb %2,0(%4); dcbf 0,%4; addi %4,%4,1;" /* Store user byte */
561 "sync; isync;"
562 "b 1b;" /* repeat */
563
564 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
565 "sync; isync;"
566 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
567 : "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
568
569 curpcb->pcb_onfault = 0;
570 return 0;
571 }
572
573 static int
574 bigcopyout(const void *kaddr, void *udaddr, size_t len)
575 {
576 char *up;
577 const char *kp = (char *)kaddr;
578 struct lwp *l = curlwp;
579 struct proc *p;
580 int error;
581
582 if (!l) {
583 return EFAULT;
584 }
585
586 p = l->l_proc;
587
588 /*
589 * Stolen from physio():
590 */
591 PHOLD(l);
592 error = uvm_vslock(p, udaddr, len, VM_PROT_WRITE);
593 if (error) {
594 PRELE(l);
595 return EFAULT;
596 }
597 up = (char *)vmaprange(p, (vaddr_t)udaddr, len,
598 VM_PROT_READ | VM_PROT_WRITE);
599
600 memcpy(up, kp, len);
601 vunmaprange((vaddr_t)up, len);
602 uvm_vsunlock(p, udaddr, len);
603 PRELE(l);
604
605 return 0;
606 }
607
608 /*
609 * kcopy(const void *src, void *dst, size_t len);
610 *
611 * Copy len bytes from src to dst, aborting if we encounter a fatal
612 * page fault.
613 *
614 * kcopy() _must_ save and restore the old fault handler since it is
615 * called by uiomove(), which may be in the path of servicing a non-fatal
616 * page fault.
617 */
618 int
619 kcopy(const void *src, void *dst, size_t len)
620 {
621 struct faultbuf env, *oldfault;
622
623 oldfault = curpcb->pcb_onfault;
624 if (setfault(&env)) {
625 curpcb->pcb_onfault = oldfault;
626 return EFAULT;
627 }
628
629 memcpy(dst, src, len);
630
631 curpcb->pcb_onfault = oldfault;
632 return 0;
633 }
634
635 int
636 badaddr(void *addr, size_t size)
637 {
638
639 return badaddr_read(addr, size, NULL);
640 }
641
642 int
643 badaddr_read(void *addr, size_t size, int *rptr)
644 {
645 struct faultbuf env;
646 int x;
647
648 /* Get rid of any stale machine checks that have been waiting. */
649 __asm __volatile ("sync; isync");
650
651 if (setfault(&env)) {
652 curpcb->pcb_onfault = 0;
653 __asm __volatile ("sync");
654 return 1;
655 }
656
657 __asm __volatile ("sync");
658
659 switch (size) {
660 case 1:
661 x = *(volatile int8_t *)addr;
662 break;
663 case 2:
664 x = *(volatile int16_t *)addr;
665 break;
666 case 4:
667 x = *(volatile int32_t *)addr;
668 break;
669 default:
670 panic("badaddr: invalid size (%d)", size);
671 }
672
673 /* Make sure we took the machine check, if we caused one. */
674 __asm __volatile ("sync; isync");
675
676 curpcb->pcb_onfault = 0;
677 __asm __volatile ("sync"); /* To be sure. */
678
679 /* Use the value to avoid reorder. */
680 if (rptr)
681 *rptr = x;
682
683 return 0;
684 }
685
686 /*
687 * For now, this only deals with the particular unaligned access case
688 * that gcc tends to generate. Eventually it should handle all of the
689 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
690 */
691
692 static int
693 fix_unaligned(struct lwp *l, struct trapframe *frame)
694 {
695
696 return -1;
697 }
698
699 /*
700 * Start a new LWP
701 */
702 void
703 startlwp(arg)
704 void *arg;
705 {
706 int err;
707 ucontext_t *uc = arg;
708 struct lwp *l = curlwp;
709
710 err = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
711 #if DIAGNOSTIC
712 if (err) {
713 printf("Error %d from cpu_setmcontext.", err);
714 }
715 #endif
716 pool_put(&lwp_uc_pool, uc);
717
718 upcallret(l);
719 }
720
721 /*
722 * XXX This is a terrible name.
723 */
724 void
725 upcallret(l)
726 struct lwp *l;
727 {
728 int sig;
729
730 /* Take pending signals. */
731 while ((sig = CURSIG(l)) != 0)
732 postsig(sig);
733
734 /* Invoke per-process kernel-exit handling, if any */
735 if (l->l_proc->p_userret)
736 (l->l_proc->p_userret)(l, l->l_proc->p_userret_arg);
737
738 /* Invoke any pending upcalls */
739 while (l->l_flag & L_SA_UPCALL)
740 sa_upcall_userret(l);
741
742 curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
743 }
744