trap.c revision 1.2.6.8 1 /* $NetBSD: trap.c,v 1.2.6.8 2002/07/12 01:39:45 nathanw Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include "opt_altivec.h"
70 #include "opt_ddb.h"
71 #include "opt_ktrace.h"
72 #include "opt_systrace.h"
73 #include "opt_syscall_debug.h"
74
75 #include <sys/param.h>
76 #include <sys/proc.h>
77 #include <sys/reboot.h>
78 #include <sys/syscall.h>
79 #include <sys/systm.h>
80 #include <sys/user.h>
81 #ifdef KTRACE
82 #include <sys/ktrace.h>
83 #endif
84 #include <sys/pool.h>
85 #include <sys/sa.h>
86 #include <sys/savar.h>
87 #ifdef SYSTRACE
88 #include <sys/systrace.h>
89 #endif
90
91 #include <uvm/uvm_extern.h>
92
93 #include <dev/cons.h>
94
95 #include <machine/cpu.h>
96 #include <machine/db_machdep.h>
97 #include <machine/fpu.h>
98 #include <machine/frame.h>
99 #include <machine/pcb.h>
100 #include <machine/psl.h>
101 #include <machine/trap.h>
102
103 #include <powerpc/spr.h>
104 #include <powerpc/ibm4xx/pmap.h>
105 #include <powerpc/ibm4xx/tlb.h>
106 #include <powerpc/fpu/fpu_extern.h>
107
108 /* These definitions should probably be somewhere else XXX */
109 #define FIRSTARG 3 /* first argument is in reg 3 */
110 #define NARGREG 8 /* 8 args are in registers */
111 #define MOREARGS(sp) ((caddr_t)((int)(sp) + 8)) /* more args go here */
112
113 #ifndef MULTIPROCESSOR
114 volatile int astpending;
115 volatile int want_resched;
116 #endif
117
118 void *syscall = NULL; /* XXX dummy symbol for emul_netbsd */
119
120 static int fix_unaligned __P((struct lwp *p, struct trapframe *frame));
121
122 void trap __P((struct trapframe *)); /* Called from locore / trap_subr */
123 int setfault __P((faultbuf)); /* defined in locore.S */
124 /* Why are these not defined in a header? */
125 int badaddr __P((void *, size_t));
126 int badaddr_read __P((void *, size_t, int *));
127 int ctx_setup __P((int, int));
128
129 #ifdef DEBUG
130 #define TDB_ALL 0x1
131 int trapdebug = /* TDB_ALL */ 0;
132 #define DBPRINTF(x, y) if (trapdebug & (x)) printf y
133 #else
134 #define DBPRINTF(x, y)
135 #endif
136
137 void
138 trap(struct trapframe *frame)
139 {
140 struct lwp *l = curlwp;
141 struct proc *p = l ? l->l_proc : NULL;
142 int type = frame->exc;
143 int ftype, rv;
144
145 KASSERT(l == 0 || (l->l_stat == LSONPROC));
146
147 if (frame->srr1 & PSL_PR)
148 type |= EXC_USER;
149
150 ftype = VM_PROT_READ;
151
152 DBPRINTF(TDB_ALL, ("trap(%x) at %x from frame %p &frame %p\n",
153 type, frame->srr0, frame, &frame));
154
155 switch (type) {
156 case EXC_DEBUG|EXC_USER:
157 {
158 int srr2, srr3;
159 __asm __volatile("mfspr %0,0x3f0" : "=r" (rv), "=r" (srr2), "=r" (srr3) :);
160 printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2, srr3);
161 }
162 /*
163 * DEBUG intr -- probably single-step.
164 */
165 case EXC_TRC|EXC_USER:
166 KERNEL_PROC_LOCK(l);
167 frame->srr1 &= ~PSL_SE;
168 trapsignal(l, SIGTRAP, EXC_TRC);
169 KERNEL_PROC_UNLOCK(l);
170 break;
171
172 /* If we could not find and install appropriate TLB entry, fall through */
173
174 case EXC_DSI:
175 /* FALLTHROUGH */
176 case EXC_DTMISS:
177 {
178 struct vm_map *map;
179 vaddr_t va;
180 faultbuf *fb = NULL;
181
182 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
183 va = frame->dear;
184 if (frame->pid == KERNEL_PID) {
185 map = kernel_map;
186 } else {
187 map = &p->p_vmspace->vm_map;
188 }
189
190 if (frame->esr & (ESR_DST|ESR_DIZ))
191 ftype = VM_PROT_WRITE;
192
193 DBPRINTF(TDB_ALL, ("trap(EXC_DSI) at %x %s fault on %p esr %x\n",
194 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", (void *)va, frame->esr));
195 rv = uvm_fault(map, trunc_page(va), 0, ftype);
196 KERNEL_UNLOCK();
197 if (rv == 0)
198 goto done;
199 if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
200 frame->pid = KERNEL_PID;
201 frame->srr0 = (*fb)[0];
202 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
203 frame->fixreg[1] = (*fb)[1];
204 frame->fixreg[2] = (*fb)[2];
205 frame->fixreg[3] = 1; /* Return TRUE */
206 frame->cr = (*fb)[3];
207 memcpy(&frame->fixreg[13], &(*fb)[4],
208 19 * sizeof(register_t));
209 goto done;
210 }
211 }
212 goto brain_damage;
213
214 case EXC_DSI|EXC_USER:
215 /* FALLTHROUGH */
216 case EXC_DTMISS|EXC_USER:
217 KERNEL_PROC_LOCK(l);
218
219 if (frame->esr & (ESR_DST|ESR_DIZ))
220 ftype = VM_PROT_WRITE;
221
222 DBPRINTF(TDB_ALL, ("trap(EXC_DSI|EXC_USER) at %x %s fault on %x %x\n",
223 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", frame->dear, frame->esr));
224 KASSERT(l == curlwp && (l->l_stat == LSONPROC));
225 rv = uvm_fault(&p->p_vmspace->vm_map,
226 trunc_page(frame->dear), 0, ftype);
227 if (rv == 0) {
228 KERNEL_PROC_UNLOCK(l);
229 break;
230 }
231 if (rv == ENOMEM) {
232 printf("UVM: pid %d (%s) lid %d, uid %d killed: "
233 "out of swap\n",
234 p->p_pid, p->p_comm, l->l_lid,
235 p->p_cred && p->p_ucred ?
236 p->p_ucred->cr_uid : -1);
237 trapsignal(l, SIGKILL, EXC_DSI);
238 } else {
239 trapsignal(l, SIGSEGV, EXC_DSI);
240 }
241 KERNEL_PROC_UNLOCK(l);
242 break;
243 case EXC_ITMISS|EXC_USER:
244 case EXC_ISI|EXC_USER:
245 KERNEL_PROC_LOCK(l);
246 ftype = VM_PROT_READ | VM_PROT_EXECUTE;
247 DBPRINTF(TDB_ALL, ("trap(EXC_ISI|EXC_USER) at %x %s fault on %x tf %p\n",
248 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", frame->srr0, frame));
249 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0), 0, ftype);
250 if (rv == 0) {
251 KERNEL_PROC_UNLOCK(l);
252 break;
253 }
254 trapsignal(l, SIGSEGV, EXC_ISI);
255 KERNEL_PROC_UNLOCK(l);
256 break;
257 case EXC_SC|EXC_USER:
258 {
259 const struct sysent *callp;
260 size_t argsize;
261 register_t code, error;
262 register_t *params, rval[2];
263 int n;
264 register_t args[10];
265
266 KERNEL_PROC_LOCK(l);
267
268 uvmexp.syscalls++;
269
270 code = frame->fixreg[0];
271 callp = p->p_emul->e_sysent;
272 params = frame->fixreg + FIRSTARG;
273 n = NARGREG;
274
275 switch (code) {
276 case SYS_syscall:
277 /*
278 * code is first argument,
279 * followed by actual args.
280 */
281 code = *params++;
282 n -= 1;
283 break;
284 case SYS___syscall:
285 params++;
286 code = *params++;
287 n -= 2;
288 break;
289 default:
290 break;
291 }
292
293 code &= (SYS_NSYSENT - 1);
294 callp += code;
295 argsize = callp->sy_argsize;
296
297 if (argsize > n * sizeof(register_t)) {
298 memcpy(args, params, n * sizeof(register_t));
299 error = copyin(MOREARGS(frame->fixreg[1]),
300 args + n,
301 argsize - n * sizeof(register_t));
302 if (error)
303 goto syscall_bad;
304 params = args;
305 }
306
307
308 if ((error = trace_enter(p, code, params, rval)) != 0)
309 goto syscall_bad;
310
311 rval[0] = 0;
312 rval[1] = 0;
313
314 error = (*callp->sy_call)(l, params, rval);
315 switch (error) {
316 case 0:
317 frame->fixreg[FIRSTARG] = rval[0];
318 frame->fixreg[FIRSTARG + 1] = rval[1];
319 frame->cr &= ~0x10000000;
320 break;
321 case ERESTART:
322 /*
323 * Set user's pc back to redo the system call.
324 */
325 frame->srr0 -= 4;
326 break;
327 case EJUSTRETURN:
328 /* nothing to do */
329 break;
330 default:
331 syscall_bad:
332 if (p->p_emul->e_errno)
333 error = p->p_emul->e_errno[error];
334 frame->fixreg[FIRSTARG] = error;
335 frame->cr |= 0x10000000;
336 break;
337 }
338 KERNEL_PROC_UNLOCK(p);
339
340 trace_exit(p, code, args, rval, error);
341 }
342 break;
343
344 case EXC_AST|EXC_USER:
345 astpending = 0; /* we are about to do it */
346 KERNEL_PROC_LOCK(l);
347 uvmexp.softs++;
348 if (p->p_flag & P_OWEUPC) {
349 p->p_flag &= ~P_OWEUPC;
350 ADDUPROF(p);
351 }
352 /* Check whether we are being preempted. */
353 if (want_resched)
354 preempt(NULL);
355 KERNEL_PROC_UNLOCK(l);
356 break;
357
358
359 case EXC_ALI|EXC_USER:
360 KERNEL_PROC_LOCK(l);
361 if (fix_unaligned(l, frame) != 0)
362 trapsignal(l, SIGBUS, EXC_ALI);
363 else
364 frame->srr0 += 4;
365 KERNEL_PROC_UNLOCK(l);
366 break;
367
368 case EXC_PGM|EXC_USER:
369 /*
370 * Illegal insn:
371 *
372 * let's try to see if it's FPU and can be emulated.
373 */
374 uvmexp.traps ++;
375 if (!(l->l_addr->u_pcb.pcb_flags & PCB_FPU)) {
376 memset(&l->l_addr->u_pcb.pcb_fpu, 0,
377 sizeof l->l_addr->u_pcb.pcb_fpu);
378 l->l_addr->u_pcb.pcb_flags |= PCB_FPU;
379 }
380
381 if ((rv = fpu_emulate(frame,
382 (struct fpreg *)&l->l_addr->u_pcb.pcb_fpu))) {
383 KERNEL_PROC_LOCK(l);
384 trapsignal(l, rv, EXC_PGM);
385 KERNEL_PROC_UNLOCK(l);
386 }
387 break;
388
389 case EXC_MCHK:
390 {
391 faultbuf *fb;
392
393 if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
394 frame->pid = KERNEL_PID;
395 frame->srr0 = (*fb)[0];
396 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
397 frame->fixreg[1] = (*fb)[1];
398 frame->fixreg[2] = (*fb)[2];
399 frame->fixreg[3] = 1; /* Return TRUE */
400 frame->cr = (*fb)[3];
401 memcpy(&frame->fixreg[13], &(*fb)[4],
402 19 * sizeof(register_t));
403 goto done;
404 }
405 }
406 goto brain_damage;
407 default:
408 brain_damage:
409 printf("trap type 0x%x at 0x%x\n", type, frame->srr0);
410 #ifdef DDB
411 if (kdb_trap(type, frame))
412 goto done;
413 #endif
414 #ifdef TRAP_PANICWAIT
415 printf("Press a key to panic.\n");
416 cngetc();
417 #endif
418 panic("trap");
419 }
420
421 /* Take pending signals. */
422 {
423 int sig;
424
425 while ((sig = CURSIG(l)) != 0)
426 postsig(sig);
427 }
428
429 /* Invoke per-process kernel-exit handling, if any */
430 if (p->p_userret)
431 (p->p_userret)(l, p->p_userret_arg);
432
433 /* Invoke any pending upcalls */
434 if (l->l_flag & L_SA_UPCALL)
435 sa_upcall_userret(l);
436
437 curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
438 done:
439 }
440
441 int
442 ctx_setup(int ctx, int srr1)
443 {
444 volatile struct pmap *pm;
445
446 /* Update PID if we're returning to user mode. */
447 if (srr1 & PSL_PR) {
448 pm = curproc->p_vmspace->vm_map.pmap;
449 if (!pm->pm_ctx) {
450 ctx_alloc((struct pmap *)pm);
451 }
452 ctx = pm->pm_ctx;
453 if (srr1 & PSL_SE) {
454 int dbreg, mask = 0x48000000;
455 /*
456 * Set the Internal Debug and
457 * Instruction Completion bits of
458 * the DBCR0 register.
459 *
460 * XXX this is also used by jtag debuggers...
461 */
462 __asm __volatile("mfspr %0,0x3f2;"
463 "or %0,%0,%1;"
464 "mtspr 0x3f2,%0;" :
465 "=&r" (dbreg) : "r" (mask));
466 }
467 }
468 else if (!ctx) {
469 ctx = KERNEL_PID;
470 }
471 return (ctx);
472 }
473
474 void
475 child_return(void *arg)
476 {
477 struct lwp *l = arg;
478 struct proc *p = l->l_proc;
479 struct trapframe *tf = trapframe(l);
480
481 KERNEL_PROC_UNLOCK(l);
482
483 tf->fixreg[FIRSTARG] = 0;
484 tf->fixreg[FIRSTARG + 1] = 1;
485 tf->cr &= ~0x10000000;
486 tf->srr1 &= ~(PSL_FP|PSL_VEC); /* Disable FP & AltiVec, as we can't be them */
487 #ifdef KTRACE
488 if (KTRPOINT(p, KTR_SYSRET)) {
489 KERNEL_PROC_LOCK(l);
490 ktrsysret(p, SYS_fork, 0, 0);
491 KERNEL_PROC_UNLOCK(l);
492 }
493 #endif
494 /* Profiling? XXX */
495 curcpu()->ci_schedstate.spc_curpriority = l->l_priority;
496 }
497
498 /*
499 * Used by copyin()/copyout()
500 */
501 extern vaddr_t vmaprange __P((struct proc *, vaddr_t, vsize_t, int));
502 extern void vunmaprange __P((vaddr_t, vsize_t));
503 static int bigcopyin __P((const void *, void *, size_t ));
504 static int bigcopyout __P((const void *, void *, size_t ));
505
506 int
507 copyin(const void *udaddr, void *kaddr, size_t len)
508 {
509 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
510 int msr, pid, tmp, ctx;
511 faultbuf env;
512
513 /* For bigger buffers use the faster copy */
514 if (len > 256) return (bigcopyin(udaddr, kaddr, len));
515
516 if (setfault(env)) {
517 curpcb->pcb_onfault = 0;
518 return EFAULT;
519 }
520
521 if (!(ctx = pm->pm_ctx)) {
522 /* No context -- assign it one */
523 ctx_alloc(pm);
524 ctx = pm->pm_ctx;
525 }
526
527 asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
528 "mfmsr %0;" /* Save MSR */
529 "li %1,0x20; "
530 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
531 "mfpid %1;" /* Save old PID */
532 "sync; isync;"
533
534 "1: bdz 2f;" /* while len */
535 "mtpid %3; sync;" /* Load user ctx */
536 "lbz %2,0(%4); addi %4,%4,1;" /* Load byte */
537 "sync; isync;"
538 "mtpid %1;sync;"
539 "stb %2,0(%5); dcbf 0,%5; addi %5,%5,1;" /* Store kernel byte */
540 "sync; isync;"
541 "b 1b;" /* repeat */
542
543 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
544 "sync; isync;"
545 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
546 : "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
547
548 curpcb->pcb_onfault = 0;
549 return 0;
550 }
551
552 static int
553 bigcopyin(const void *udaddr, void *kaddr, size_t len)
554 {
555 const char *up;
556 char *kp = kaddr;
557 struct lwp *l = curlwp;
558 struct proc *p;
559 int error;
560
561 if (!l) {
562 return EFAULT;
563 }
564
565 p = l->l_proc;
566
567 /*
568 * Stolen from physio():
569 */
570 PHOLD(l);
571 error = uvm_vslock(p, (caddr_t)udaddr, len, VM_PROT_READ);
572 if (error) {
573 PRELE(l);
574 return EFAULT;
575 }
576 up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ);
577
578 memcpy(kp, up, len);
579 vunmaprange((vaddr_t)up, len);
580 uvm_vsunlock(p, (caddr_t)udaddr, len);
581 PRELE(l);
582
583 return 0;
584 }
585
586 int
587 copyout(const void *kaddr, void *udaddr, size_t len)
588 {
589 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
590 int msr, pid, tmp, ctx;
591 faultbuf env;
592
593 /* For big copies use more efficient routine */
594 if (len > 256) return (bigcopyout(kaddr, udaddr, len));
595
596 if (setfault(env)) {
597 curpcb->pcb_onfault = 0;
598 return EFAULT;
599 }
600
601 if (!(ctx = pm->pm_ctx)) {
602 /* No context -- assign it one */
603 ctx_alloc(pm);
604 ctx = pm->pm_ctx;
605 }
606
607 asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
608 "mfmsr %0;" /* Save MSR */
609 "li %1,0x20; "
610 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
611 "mfpid %1;" /* Save old PID */
612 "sync; isync;"
613
614 "1: bdz 2f;" /* while len */
615 "mtpid %1;sync;"
616 "lbz %2,0(%5); addi %5,%5,1;" /* Load kernel byte */
617 "sync; isync;"
618 "mtpid %3; sync;" /* Load user ctx */
619 "stb %2,0(%4); dcbf 0,%4; addi %4,%4,1;" /* Store user byte */
620 "sync; isync;"
621 "b 1b;" /* repeat */
622
623 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
624 "sync; isync;"
625 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
626 : "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
627
628 curpcb->pcb_onfault = 0;
629 return 0;
630 }
631
632 static int
633 bigcopyout(const void *kaddr, void *udaddr, size_t len)
634 {
635 char *up;
636 const char *kp = (char *)kaddr;
637 struct lwp *l = curlwp;
638 struct proc *p;
639 int error;
640
641 if (!l) {
642 return EFAULT;
643 }
644
645 p = l->l_proc;
646
647 /*
648 * Stolen from physio():
649 */
650 PHOLD(l);
651 error = uvm_vslock(p, udaddr, len, VM_PROT_WRITE);
652 if (error) {
653 PRELE(l);
654 return EFAULT;
655 }
656 up = (char *)vmaprange(p, (vaddr_t)udaddr, len,
657 VM_PROT_READ|VM_PROT_WRITE);
658
659 memcpy(up, kp, len);
660 vunmaprange((vaddr_t)up, len);
661 uvm_vsunlock(p, udaddr, len);
662 PRELE(l);
663
664 return 0;
665 }
666
667 /*
668 * kcopy(const void *src, void *dst, size_t len);
669 *
670 * Copy len bytes from src to dst, aborting if we encounter a fatal
671 * page fault.
672 *
673 * kcopy() _must_ save and restore the old fault handler since it is
674 * called by uiomove(), which may be in the path of servicing a non-fatal
675 * page fault.
676 */
677 int
678 kcopy(const void *src, void *dst, size_t len)
679 {
680 faultbuf env, *oldfault;
681
682 oldfault = curpcb->pcb_onfault;
683 if (setfault(env)) {
684 curpcb->pcb_onfault = oldfault;
685 return EFAULT;
686 }
687
688 memcpy(dst, src, len);
689
690 curpcb->pcb_onfault = oldfault;
691 return 0;
692 }
693
694 int
695 badaddr(void *addr, size_t size)
696 {
697
698 return badaddr_read(addr, size, NULL);
699 }
700
701 int
702 badaddr_read(void *addr, size_t size, int *rptr)
703 {
704 faultbuf env;
705 int x;
706
707 /* Get rid of any stale machine checks that have been waiting. */
708 __asm __volatile ("sync; isync");
709
710 if (setfault(env)) {
711 curpcb->pcb_onfault = 0;
712 __asm __volatile ("sync");
713 return 1;
714 }
715
716 __asm __volatile ("sync");
717
718 switch (size) {
719 case 1:
720 x = *(volatile int8_t *)addr;
721 break;
722 case 2:
723 x = *(volatile int16_t *)addr;
724 break;
725 case 4:
726 x = *(volatile int32_t *)addr;
727 break;
728 default:
729 panic("badaddr: invalid size (%d)", size);
730 }
731
732 /* Make sure we took the machine check, if we caused one. */
733 __asm __volatile ("sync; isync");
734
735 curpcb->pcb_onfault = 0;
736 __asm __volatile ("sync"); /* To be sure. */
737
738 /* Use the value to avoid reorder. */
739 if (rptr)
740 *rptr = x;
741
742 return 0;
743 }
744
745 /*
746 * For now, this only deals with the particular unaligned access case
747 * that gcc tends to generate. Eventually it should handle all of the
748 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
749 */
750
751 static int
752 fix_unaligned(struct lwp *l, struct trapframe *frame)
753 {
754
755 return -1;
756 }
757
758 /*
759 * Start a new LWP
760 */
761 void
762 startlwp(arg)
763 void *arg;
764 {
765 int err;
766 ucontext_t *uc = arg;
767 struct lwp *l = curlwp;
768
769 err = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
770 #if DIAGNOSTIC
771 if (err) {
772 printf("Error %d from cpu_setmcontext.", err);
773 }
774 #endif
775 pool_put(&lwp_uc_pool, uc);
776
777 upcallret(l);
778 }
779
780 /*
781 * XXX This is a terrible name.
782 */
783 void
784 upcallret(arg)
785 void *arg;
786 {
787 struct lwp *l = curlwp;
788 int sig;
789
790 /* Take pending signals. */
791 while ((sig = CURSIG(l)) != 0)
792 postsig(sig);
793
794 /* If our process is on the way out, die. */
795 if (l->l_proc->p_flag & P_WEXIT)
796 lwp_exit(l);
797
798 /* Invoke any pending upcalls */
799 if (l->l_flag & L_SA_UPCALL)
800 sa_upcall_userret(l);
801
802 curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
803 }
804