trap.c revision 1.5 1 /* $NetBSD: trap.c,v 1.5 2002/06/17 21:08:56 christos Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include "opt_altivec.h"
70 #include "opt_ddb.h"
71 #include "opt_ktrace.h"
72 #include "opt_systrace.h"
73 #include "opt_syscall_debug.h"
74
75 #include <sys/param.h>
76 #include <sys/proc.h>
77 #include <sys/reboot.h>
78 #include <sys/syscall.h>
79 #include <sys/systm.h>
80 #include <sys/user.h>
81 #ifdef KTRACE
82 #include <sys/ktrace.h>
83 #endif
84 #ifdef SYSTRACE
85 #include <sys/systrace.h>
86 #endif
87
88 #include <uvm/uvm_extern.h>
89
90 #include <dev/cons.h>
91
92 #include <machine/cpu.h>
93 #include <machine/db_machdep.h>
94 #include <machine/fpu.h>
95 #include <machine/frame.h>
96 #include <machine/pcb.h>
97 #include <machine/psl.h>
98 #include <machine/trap.h>
99
100 #include <powerpc/spr.h>
101 #include <powerpc/ibm4xx/pmap.h>
102 #include <powerpc/ibm4xx/tlb.h>
103 #include <powerpc/fpu/fpu_extern.h>
104
105 /* These definitions should probably be somewhere else XXX */
106 #define FIRSTARG 3 /* first argument is in reg 3 */
107 #define NARGREG 8 /* 8 args are in registers */
108 #define MOREARGS(sp) ((caddr_t)((int)(sp) + 8)) /* more args go here */
109
110 #ifndef MULTIPROCESSOR
111 volatile int astpending;
112 volatile int want_resched;
113 #endif
114
115 void *syscall = NULL; /* XXX dummy symbol for emul_netbsd */
116
117 static int fix_unaligned __P((struct proc *p, struct trapframe *frame));
118
119 void trap __P((struct trapframe *)); /* Called from locore / trap_subr */
120 int setfault __P((faultbuf)); /* defined in locore.S */
121 /* Why are these not defined in a header? */
122 int badaddr __P((void *, size_t));
123 int badaddr_read __P((void *, size_t, int *));
124 int ctx_setup __P((int, int));
125
126 #ifdef DEBUG
127 #define TDB_ALL 0x1
128 int trapdebug = /* TDB_ALL */ 0;
129 #define DBPRINTF(x, y) if (trapdebug & (x)) printf y
130 #else
131 #define DBPRINTF(x, y)
132 #endif
133
134 void
135 trap(struct trapframe *frame)
136 {
137 struct proc *p = curproc;
138 int type = frame->exc;
139 int ftype, rv;
140
141 KASSERT(p == 0 || (p->p_stat == SONPROC));
142
143 if (frame->srr1 & PSL_PR)
144 type |= EXC_USER;
145
146 ftype = VM_PROT_READ;
147
148 DBPRINTF(TDB_ALL, ("trap(%x) at %x from frame %p &frame %p\n",
149 type, frame->srr0, frame, &frame));
150
151 switch (type) {
152 case EXC_DEBUG|EXC_USER:
153 {
154 int srr2, srr3;
155 __asm __volatile("mfspr %0,0x3f0" : "=r" (rv), "=r" (srr2), "=r" (srr3) :);
156 printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2, srr3);
157 }
158 /*
159 * DEBUG intr -- probably single-step.
160 */
161 case EXC_TRC|EXC_USER:
162 KERNEL_PROC_LOCK(p);
163 frame->srr1 &= ~PSL_SE;
164 trapsignal(p, SIGTRAP, EXC_TRC);
165 KERNEL_PROC_UNLOCK(p);
166 break;
167
168 /* If we could not find and install appropriate TLB entry, fall through */
169
170 case EXC_DSI:
171 /* FALLTHROUGH */
172 case EXC_DTMISS:
173 {
174 struct vm_map *map;
175 vaddr_t va;
176 faultbuf *fb = NULL;
177
178 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
179 va = frame->dear;
180 if (frame->pid == KERNEL_PID) {
181 map = kernel_map;
182 } else {
183 map = &p->p_vmspace->vm_map;
184 }
185
186 if (frame->esr & (ESR_DST|ESR_DIZ))
187 ftype = VM_PROT_WRITE;
188
189 DBPRINTF(TDB_ALL, ("trap(EXC_DSI) at %x %s fault on %p esr %x\n",
190 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", (void *)va, frame->esr));
191 rv = uvm_fault(map, trunc_page(va), 0, ftype);
192 KERNEL_UNLOCK();
193 if (rv == 0)
194 goto done;
195 if ((fb = p->p_addr->u_pcb.pcb_onfault) != NULL) {
196 frame->pid = KERNEL_PID;
197 frame->srr0 = (*fb)[0];
198 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
199 frame->fixreg[1] = (*fb)[1];
200 frame->fixreg[2] = (*fb)[2];
201 frame->fixreg[3] = 1; /* Return TRUE */
202 frame->cr = (*fb)[3];
203 memcpy(&frame->fixreg[13], &(*fb)[4],
204 19 * sizeof(register_t));
205 goto done;
206 }
207 }
208 goto brain_damage;
209
210 case EXC_DSI|EXC_USER:
211 /* FALLTHROUGH */
212 case EXC_DTMISS|EXC_USER:
213 KERNEL_PROC_LOCK(p);
214
215 if (frame->esr & (ESR_DST|ESR_DIZ))
216 ftype = VM_PROT_WRITE;
217
218 DBPRINTF(TDB_ALL, ("trap(EXC_DSI|EXC_USER) at %x %s fault on %x %x\n",
219 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", frame->dear, frame->esr));
220 KASSERT(p == curproc && (p->p_stat == SONPROC));
221 rv = uvm_fault(&p->p_vmspace->vm_map,
222 trunc_page(frame->dear), 0, ftype);
223 if (rv == 0) {
224 KERNEL_PROC_UNLOCK(p);
225 break;
226 }
227 if (rv == ENOMEM) {
228 printf("UVM: pid %d (%s), uid %d killed: "
229 "out of swap\n",
230 p->p_pid, p->p_comm,
231 p->p_cred && p->p_ucred ?
232 p->p_ucred->cr_uid : -1);
233 trapsignal(p, SIGKILL, EXC_DSI);
234 } else {
235 trapsignal(p, SIGSEGV, EXC_DSI);
236 }
237 KERNEL_PROC_UNLOCK(p);
238 break;
239 case EXC_ITMISS|EXC_USER:
240 case EXC_ISI|EXC_USER:
241 KERNEL_PROC_LOCK(p);
242 ftype = VM_PROT_READ | VM_PROT_EXECUTE;
243 DBPRINTF(TDB_ALL, ("trap(EXC_ISI|EXC_USER) at %x %s fault on %x tf %p\n",
244 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", frame->srr0, frame));
245 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0), 0, ftype);
246 if (rv == 0) {
247 KERNEL_PROC_UNLOCK(p);
248 break;
249 }
250 trapsignal(p, SIGSEGV, EXC_ISI);
251 KERNEL_PROC_UNLOCK(p);
252 break;
253 case EXC_SC|EXC_USER:
254 {
255 const struct sysent *callp;
256 size_t argsize;
257 register_t code, error;
258 register_t *params, rval[2];
259 int n;
260 register_t args[10];
261
262 KERNEL_PROC_LOCK(p);
263
264 uvmexp.syscalls++;
265
266 code = frame->fixreg[0];
267 callp = p->p_emul->e_sysent;
268 params = frame->fixreg + FIRSTARG;
269 n = NARGREG;
270
271 switch (code) {
272 case SYS_syscall:
273 /*
274 * code is first argument,
275 * followed by actual args.
276 */
277 code = *params++;
278 n -= 1;
279 break;
280 case SYS___syscall:
281 params++;
282 code = *params++;
283 n -= 2;
284 break;
285 default:
286 break;
287 }
288
289 code &= (SYS_NSYSENT - 1);
290 callp += code;
291 argsize = callp->sy_argsize;
292
293 if (argsize > n * sizeof(register_t)) {
294 memcpy(args, params, n * sizeof(register_t));
295 error = copyin(MOREARGS(frame->fixreg[1]),
296 args + n,
297 argsize - n * sizeof(register_t));
298 if (error)
299 goto syscall_bad;
300 params = args;
301 }
302
303
304 if ((error = trace_enter(p, code, params, rval)) != 0)
305 goto syscall_bad;
306
307 rval[0] = 0;
308 rval[1] = 0;
309
310 error = (*callp->sy_call)(p, params, rval);
311 switch (error) {
312 case 0:
313 frame->fixreg[FIRSTARG] = rval[0];
314 frame->fixreg[FIRSTARG + 1] = rval[1];
315 frame->cr &= ~0x10000000;
316 break;
317 case ERESTART:
318 /*
319 * Set user's pc back to redo the system call.
320 */
321 frame->srr0 -= 4;
322 break;
323 case EJUSTRETURN:
324 /* nothing to do */
325 break;
326 default:
327 syscall_bad:
328 if (p->p_emul->e_errno)
329 error = p->p_emul->e_errno[error];
330 frame->fixreg[FIRSTARG] = error;
331 frame->cr |= 0x10000000;
332 break;
333 }
334 KERNEL_PROC_UNLOCK(p);
335
336 trace_exit(p, code, args, rval, error);
337 }
338 break;
339
340 case EXC_AST|EXC_USER:
341 astpending = 0; /* we are about to do it */
342 KERNEL_PROC_LOCK(p);
343 uvmexp.softs++;
344 if (p->p_flag & P_OWEUPC) {
345 p->p_flag &= ~P_OWEUPC;
346 ADDUPROF(p);
347 }
348 /* Check whether we are being preempted. */
349 if (want_resched)
350 preempt(NULL);
351 KERNEL_PROC_UNLOCK(p);
352 break;
353
354
355 case EXC_ALI|EXC_USER:
356 KERNEL_PROC_LOCK(p);
357 if (fix_unaligned(p, frame) != 0)
358 trapsignal(p, SIGBUS, EXC_ALI);
359 else
360 frame->srr0 += 4;
361 KERNEL_PROC_UNLOCK(p);
362 break;
363
364 case EXC_PGM|EXC_USER:
365 /*
366 * Illegal insn:
367 *
368 * let's try to see if it's FPU and can be emulated.
369 */
370 uvmexp.traps ++;
371 if (!(p->p_addr->u_pcb.pcb_flags & PCB_FPU)) {
372 memset(&p->p_addr->u_pcb.pcb_fpu, 0,
373 sizeof p->p_addr->u_pcb.pcb_fpu);
374 p->p_addr->u_pcb.pcb_flags |= PCB_FPU;
375 }
376
377 if ((rv = fpu_emulate(frame,
378 (struct fpreg *)&p->p_addr->u_pcb.pcb_fpu))) {
379 KERNEL_PROC_LOCK(p);
380 trapsignal(p, rv, EXC_PGM);
381 KERNEL_PROC_UNLOCK(p);
382 }
383 break;
384
385 case EXC_MCHK:
386 {
387 faultbuf *fb;
388
389 if ((fb = p->p_addr->u_pcb.pcb_onfault) != NULL) {
390 frame->pid = KERNEL_PID;
391 frame->srr0 = (*fb)[0];
392 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
393 frame->fixreg[1] = (*fb)[1];
394 frame->fixreg[2] = (*fb)[2];
395 frame->fixreg[3] = 1; /* Return TRUE */
396 frame->cr = (*fb)[3];
397 memcpy(&frame->fixreg[13], &(*fb)[4],
398 19 * sizeof(register_t));
399 goto done;
400 }
401 }
402 goto brain_damage;
403 default:
404 brain_damage:
405 printf("trap type 0x%x at 0x%x\n", type, frame->srr0);
406 #ifdef DDB
407 if (kdb_trap(type, frame))
408 goto done;
409 #endif
410 #ifdef TRAP_PANICWAIT
411 printf("Press a key to panic.\n");
412 cngetc();
413 #endif
414 panic("trap");
415 }
416
417 /* Take pending signals. */
418 {
419 int sig;
420
421 while ((sig = CURSIG(p)) != 0)
422 postsig(sig);
423 }
424
425 curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
426 done:
427 }
428
429 int
430 ctx_setup(int ctx, int srr1)
431 {
432 volatile struct pmap *pm;
433
434 /* Update PID if we're returning to user mode. */
435 if (srr1 & PSL_PR) {
436 pm = curproc->p_vmspace->vm_map.pmap;
437 if (!pm->pm_ctx) {
438 ctx_alloc((struct pmap *)pm);
439 }
440 ctx = pm->pm_ctx;
441 if (srr1 & PSL_SE) {
442 int dbreg, mask = 0x48000000;
443 /*
444 * Set the Internal Debug and
445 * Instruction Completion bits of
446 * the DBCR0 register.
447 *
448 * XXX this is also used by jtag debuggers...
449 */
450 __asm __volatile("mfspr %0,0x3f2;"
451 "or %0,%0,%1;"
452 "mtspr 0x3f2,%0;" :
453 "=&r" (dbreg) : "r" (mask));
454 }
455 }
456 else if (!ctx) {
457 ctx = KERNEL_PID;
458 }
459 return (ctx);
460 }
461
462 void
463 child_return(void *arg)
464 {
465 struct proc *p = arg;
466 struct trapframe *tf = trapframe(p);
467
468 KERNEL_PROC_UNLOCK(p);
469
470 tf->fixreg[FIRSTARG] = 0;
471 tf->fixreg[FIRSTARG + 1] = 1;
472 tf->cr &= ~0x10000000;
473 tf->srr1 &= ~(PSL_FP|PSL_VEC); /* Disable FP & AltiVec, as we can't be them */
474 #ifdef KTRACE
475 if (KTRPOINT(p, KTR_SYSRET)) {
476 KERNEL_PROC_LOCK(p);
477 ktrsysret(p, SYS_fork, 0, 0);
478 KERNEL_PROC_UNLOCK(p);
479 }
480 #endif
481 /* Profiling? XXX */
482 curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
483 }
484
485 /*
486 * Used by copyin()/copyout()
487 */
488 extern vaddr_t vmaprange __P((struct proc *, vaddr_t, vsize_t, int));
489 extern void vunmaprange __P((vaddr_t, vsize_t));
490 static int bigcopyin __P((const void *, void *, size_t ));
491 static int bigcopyout __P((const void *, void *, size_t ));
492
493 int
494 copyin(const void *udaddr, void *kaddr, size_t len)
495 {
496 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
497 int msr, pid, tmp, ctx;
498 faultbuf env;
499
500 /* For bigger buffers use the faster copy */
501 if (len > 256) return (bigcopyin(udaddr, kaddr, len));
502
503 if (setfault(env)) {
504 curpcb->pcb_onfault = 0;
505 return EFAULT;
506 }
507
508 if (!(ctx = pm->pm_ctx)) {
509 /* No context -- assign it one */
510 ctx_alloc(pm);
511 ctx = pm->pm_ctx;
512 }
513
514 asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
515 "mfmsr %0;" /* Save MSR */
516 "li %1,0x20; "
517 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
518 "mfpid %1;" /* Save old PID */
519 "sync; isync;"
520
521 "1: bdz 2f;" /* while len */
522 "mtpid %3; sync;" /* Load user ctx */
523 "lbz %2,0(%4); addi %4,%4,1;" /* Load byte */
524 "sync; isync;"
525 "mtpid %1;sync;"
526 "stb %2,0(%5); dcbf 0,%5; addi %5,%5,1;" /* Store kernel byte */
527 "sync; isync;"
528 "b 1b;" /* repeat */
529
530 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
531 "sync; isync;"
532 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
533 : "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
534
535 curpcb->pcb_onfault = 0;
536 return 0;
537 }
538
539 static int
540 bigcopyin(const void *udaddr, void *kaddr, size_t len)
541 {
542 const char *up;
543 char *kp = kaddr;
544 struct proc *p = curproc;
545 int error;
546
547 if (!p) {
548 return EFAULT;
549 }
550
551 /*
552 * Stolen from physio():
553 */
554 PHOLD(p);
555 error = uvm_vslock(p, (caddr_t)udaddr, len, VM_PROT_READ);
556 if (error) {
557 PRELE(p);
558 return EFAULT;
559 }
560 up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ);
561
562 memcpy(kp, up, len);
563 vunmaprange((vaddr_t)up, len);
564 uvm_vsunlock(p, (caddr_t)udaddr, len);
565 PRELE(p);
566
567 return 0;
568 }
569
570 int
571 copyout(const void *kaddr, void *udaddr, size_t len)
572 {
573 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
574 int msr, pid, tmp, ctx;
575 faultbuf env;
576
577 /* For big copies use more efficient routine */
578 if (len > 256) return (bigcopyout(kaddr, udaddr, len));
579
580 if (setfault(env)) {
581 curpcb->pcb_onfault = 0;
582 return EFAULT;
583 }
584
585 if (!(ctx = pm->pm_ctx)) {
586 /* No context -- assign it one */
587 ctx_alloc(pm);
588 ctx = pm->pm_ctx;
589 }
590
591 asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
592 "mfmsr %0;" /* Save MSR */
593 "li %1,0x20; "
594 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
595 "mfpid %1;" /* Save old PID */
596 "sync; isync;"
597
598 "1: bdz 2f;" /* while len */
599 "mtpid %1;sync;"
600 "lbz %2,0(%5); addi %5,%5,1;" /* Load kernel byte */
601 "sync; isync;"
602 "mtpid %3; sync;" /* Load user ctx */
603 "stb %2,0(%4); dcbf 0,%4; addi %4,%4,1;" /* Store user byte */
604 "sync; isync;"
605 "b 1b;" /* repeat */
606
607 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
608 "sync; isync;"
609 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
610 : "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
611
612 curpcb->pcb_onfault = 0;
613 return 0;
614 }
615
616 static int
617 bigcopyout(const void *kaddr, void *udaddr, size_t len)
618 {
619 char *up;
620 const char *kp = (char *)kaddr;
621 struct proc *p = curproc;
622 int error;
623
624 if (!p) {
625 return EFAULT;
626 }
627
628 /*
629 * Stolen from physio():
630 */
631 PHOLD(p);
632 error = uvm_vslock(p, udaddr, len, VM_PROT_WRITE);
633 if (error) {
634 PRELE(p);
635 return EFAULT;
636 }
637 up = (char *)vmaprange(p, (vaddr_t)udaddr, len,
638 VM_PROT_READ|VM_PROT_WRITE);
639
640 memcpy(up, kp, len);
641 vunmaprange((vaddr_t)up, len);
642 uvm_vsunlock(p, udaddr, len);
643 PRELE(p);
644
645 return 0;
646 }
647
648 /*
649 * kcopy(const void *src, void *dst, size_t len);
650 *
651 * Copy len bytes from src to dst, aborting if we encounter a fatal
652 * page fault.
653 *
654 * kcopy() _must_ save and restore the old fault handler since it is
655 * called by uiomove(), which may be in the path of servicing a non-fatal
656 * page fault.
657 */
658 int
659 kcopy(const void *src, void *dst, size_t len)
660 {
661 faultbuf env, *oldfault;
662
663 oldfault = curpcb->pcb_onfault;
664 if (setfault(env)) {
665 curpcb->pcb_onfault = oldfault;
666 return EFAULT;
667 }
668
669 memcpy(dst, src, len);
670
671 curpcb->pcb_onfault = oldfault;
672 return 0;
673 }
674
675 int
676 badaddr(void *addr, size_t size)
677 {
678
679 return badaddr_read(addr, size, NULL);
680 }
681
682 int
683 badaddr_read(void *addr, size_t size, int *rptr)
684 {
685 faultbuf env;
686 int x;
687
688 /* Get rid of any stale machine checks that have been waiting. */
689 __asm __volatile ("sync; isync");
690
691 if (setfault(env)) {
692 curpcb->pcb_onfault = 0;
693 __asm __volatile ("sync");
694 return 1;
695 }
696
697 __asm __volatile ("sync");
698
699 switch (size) {
700 case 1:
701 x = *(volatile int8_t *)addr;
702 break;
703 case 2:
704 x = *(volatile int16_t *)addr;
705 break;
706 case 4:
707 x = *(volatile int32_t *)addr;
708 break;
709 default:
710 panic("badaddr: invalid size (%d)", size);
711 }
712
713 /* Make sure we took the machine check, if we caused one. */
714 __asm __volatile ("sync; isync");
715
716 curpcb->pcb_onfault = 0;
717 __asm __volatile ("sync"); /* To be sure. */
718
719 /* Use the value to avoid reorder. */
720 if (rptr)
721 *rptr = x;
722
723 return 0;
724 }
725
726 /*
727 * For now, this only deals with the particular unaligned access case
728 * that gcc tends to generate. Eventually it should handle all of the
729 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
730 */
731
732 static int
733 fix_unaligned(struct proc *p, struct trapframe *frame)
734 {
735
736 return -1;
737 }
738