trap.c revision 1.42 1 /* $NetBSD: trap.c,v 1.42 2007/05/22 20:06:33 rjs Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.42 2007/05/22 20:06:33 rjs Exp $");
71
72 #include "opt_altivec.h"
73 #include "opt_ddb.h"
74
75 #include <sys/param.h>
76 #include <sys/proc.h>
77 #include <sys/reboot.h>
78 #include <sys/syscall.h>
79 #include <sys/systm.h>
80 #include <sys/user.h>
81 #include <sys/pool.h>
82 #include <sys/userret.h>
83 #include <sys/kauth.h>
84
85 #include <uvm/uvm_extern.h>
86
87 #include <dev/cons.h>
88
89 #include <machine/cpu.h>
90 #include <machine/db_machdep.h>
91 #include <machine/fpu.h>
92 #include <machine/frame.h>
93 #include <machine/pcb.h>
94 #include <machine/psl.h>
95 #include <machine/trap.h>
96
97 #include <powerpc/spr.h>
98 #include <powerpc/ibm4xx/pmap.h>
99 #include <powerpc/ibm4xx/tlb.h>
100 #include <powerpc/fpu/fpu_extern.h>
101
102 /* These definitions should probably be somewhere else XXX */
103 #define FIRSTARG 3 /* first argument is in reg 3 */
104 #define NARGREG 8 /* 8 args are in registers */
105 #define MOREARGS(sp) ((void *)((int)(sp) + 8)) /* more args go here */
106
107 static int fix_unaligned __P((struct lwp *l, struct trapframe *frame));
108
109 void trap __P((struct trapframe *)); /* Called from locore / trap_subr */
110 /* Why are these not defined in a header? */
111 int badaddr __P((void *, size_t));
112 int badaddr_read __P((void *, size_t, int *));
113 int ctx_setup __P((int, int));
114
115 #ifdef DEBUG
116 #define TDB_ALL 0x1
117 int trapdebug = /* TDB_ALL */ 0;
118 #define DBPRINTF(x, y) if (trapdebug & (x)) printf y
119 #else
120 #define DBPRINTF(x, y)
121 #endif
122
123 void
124 trap(struct trapframe *frame)
125 {
126 struct lwp *l = curlwp;
127 struct proc *p = l ? l->l_proc : NULL;
128 int type = frame->exc;
129 int ftype, rv;
130 ksiginfo_t ksi;
131
132 KASSERT(l == 0 || (l->l_stat == LSONPROC));
133
134 if (frame->srr1 & PSL_PR) {
135 LWP_CACHE_CREDS(l, p);
136 type |= EXC_USER;
137 }
138
139 ftype = VM_PROT_READ;
140
141 DBPRINTF(TDB_ALL, ("trap(%x) at %lx from frame %p &frame %p\n",
142 type, frame->srr0, frame, &frame));
143
144 switch (type) {
145 case EXC_DEBUG|EXC_USER:
146 {
147 int srr2, srr3;
148
149 __asm volatile("mfspr %0,0x3f0" :
150 "=r" (rv), "=r" (srr2), "=r" (srr3) :);
151 printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2,
152 srr3);
153 /* XXX fall through or break here?! */
154 }
155 /*
156 * DEBUG intr -- probably single-step.
157 */
158 case EXC_TRC|EXC_USER:
159 frame->srr1 &= ~PSL_SE;
160 KSI_INIT_TRAP(&ksi);
161 ksi.ksi_signo = SIGTRAP;
162 ksi.ksi_trap = EXC_TRC;
163 ksi.ksi_addr = (void *)frame->srr0;
164 KERNEL_LOCK(1, l);
165 trapsignal(l, &ksi);
166 KERNEL_UNLOCK_LAST(l);
167 break;
168
169 /*
170 * If we could not find and install appropriate TLB entry, fall through.
171 */
172
173 case EXC_DSI:
174 /* FALLTHROUGH */
175 case EXC_DTMISS:
176 {
177 struct vm_map *map;
178 vaddr_t va;
179 struct faultbuf *fb = NULL;
180
181 KERNEL_LOCK(1, NULL);
182 va = frame->dar;
183 if (frame->tf_xtra[TF_PID] == KERNEL_PID) {
184 map = kernel_map;
185 } else {
186 map = &p->p_vmspace->vm_map;
187 }
188
189 if (frame->tf_xtra[TF_ESR] & (ESR_DST|ESR_DIZ))
190 ftype = VM_PROT_WRITE;
191
192 DBPRINTF(TDB_ALL,
193 ("trap(EXC_DSI) at %lx %s fault on %p esr %x\n",
194 frame->srr0,
195 (ftype & VM_PROT_WRITE) ? "write" : "read",
196 (void *)va, frame->tf_xtra[TF_ESR]));
197 rv = uvm_fault(map, trunc_page(va), ftype);
198 KERNEL_UNLOCK_ONE(NULL);
199 if (rv == 0)
200 goto done;
201 if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
202 frame->tf_xtra[TF_PID] = KERNEL_PID;
203 frame->srr0 = fb->fb_pc;
204 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
205 frame->fixreg[1] = fb->fb_sp;
206 frame->fixreg[2] = fb->fb_r2;
207 frame->fixreg[3] = 1; /* Return TRUE */
208 frame->cr = fb->fb_cr;
209 memcpy(&frame->fixreg[13], fb->fb_fixreg,
210 sizeof(fb->fb_fixreg));
211 goto done;
212 }
213 }
214 goto brain_damage;
215
216 case EXC_DSI|EXC_USER:
217 /* FALLTHROUGH */
218 case EXC_DTMISS|EXC_USER:
219 KERNEL_LOCK(1, l);
220
221 if (frame->tf_xtra[TF_ESR] & (ESR_DST|ESR_DIZ))
222 ftype = VM_PROT_WRITE;
223
224 DBPRINTF(TDB_ALL,
225 ("trap(EXC_DSI|EXC_USER) at %lx %s fault on %lx %x\n",
226 frame->srr0, (ftype & VM_PROT_WRITE) ? "write" : "read",
227 frame->dar, frame->tf_xtra[TF_ESR]));
228 KASSERT(l == curlwp && (l->l_stat == LSONPROC));
229 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->dar),
230 ftype);
231 if (rv == 0) {
232 KERNEL_UNLOCK_LAST(l);
233 break;
234 }
235 KSI_INIT_TRAP(&ksi);
236 ksi.ksi_signo = SIGSEGV;
237 ksi.ksi_trap = EXC_DSI;
238 ksi.ksi_addr = (void *)frame->dar;
239 if (rv == ENOMEM) {
240 printf("UVM: pid %d (%s) lid %d, uid %d killed: "
241 "out of swap\n",
242 p->p_pid, p->p_comm, l->l_lid,
243 l->l_cred ?
244 kauth_cred_geteuid(l->l_cred) : -1);
245 ksi.ksi_signo = SIGKILL;
246 }
247 trapsignal(l, &ksi);
248 KERNEL_UNLOCK_LAST(l);
249 break;
250
251 case EXC_ITMISS|EXC_USER:
252 case EXC_ISI|EXC_USER:
253 KERNEL_LOCK(1, l);
254 ftype = VM_PROT_EXECUTE;
255 DBPRINTF(TDB_ALL,
256 ("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n",
257 frame->srr0, frame));
258 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0),
259 ftype);
260 if (rv == 0) {
261 KERNEL_UNLOCK_LAST(l);
262 break;
263 }
264 KSI_INIT_TRAP(&ksi);
265 ksi.ksi_signo = SIGSEGV;
266 ksi.ksi_trap = EXC_ISI;
267 ksi.ksi_addr = (void *)frame->srr0;
268 ksi.ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
269 trapsignal(l, &ksi);
270 KERNEL_UNLOCK_LAST(l);
271 break;
272
273 case EXC_AST|EXC_USER:
274 curcpu()->ci_astpending = 0; /* we are about to do it */
275 uvmexp.softs++;
276 if (l->l_pflag & LP_OWEUPC) {
277 l->l_pflag &= ~LP_OWEUPC;
278 ADDUPROF(l);
279 }
280 /* Check whether we are being preempted. */
281 if (curcpu()->ci_want_resched)
282 preempt();
283 break;
284
285
286 case EXC_ALI|EXC_USER:
287 KERNEL_LOCK(1, l);
288 if (fix_unaligned(l, frame) != 0) {
289 KSI_INIT_TRAP(&ksi);
290 ksi.ksi_signo = SIGBUS;
291 ksi.ksi_trap = EXC_ALI;
292 ksi.ksi_addr = (void *)frame->dar;
293 trapsignal(l, &ksi);
294 } else
295 frame->srr0 += 4;
296 KERNEL_UNLOCK_LAST(l);
297 break;
298
299 case EXC_PGM|EXC_USER:
300 /*
301 * Illegal insn:
302 *
303 * let's try to see if it's FPU and can be emulated.
304 */
305 uvmexp.traps++;
306 if (!(l->l_addr->u_pcb.pcb_flags & PCB_FPU)) {
307 memset(&l->l_addr->u_pcb.pcb_fpu, 0,
308 sizeof l->l_addr->u_pcb.pcb_fpu);
309 l->l_addr->u_pcb.pcb_flags |= PCB_FPU;
310 }
311
312 if ((rv = fpu_emulate(frame,
313 (struct fpreg *)&l->l_addr->u_pcb.pcb_fpu))) {
314 KSI_INIT_TRAP(&ksi);
315 ksi.ksi_signo = rv;
316 ksi.ksi_trap = EXC_PGM;
317 ksi.ksi_addr = (void *)frame->srr0;
318 KERNEL_LOCK(1, l);
319 trapsignal(l, &ksi);
320 KERNEL_UNLOCK_LAST(l);
321 }
322 break;
323
324 case EXC_MCHK:
325 {
326 struct faultbuf *fb;
327
328 if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
329 frame->tf_xtra[TF_PID] = KERNEL_PID;
330 frame->srr0 = fb->fb_pc;
331 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
332 frame->fixreg[1] = fb->fb_sp;
333 frame->fixreg[2] = fb->fb_r2;
334 frame->fixreg[3] = 1; /* Return TRUE */
335 frame->cr = fb->fb_cr;
336 memcpy(&frame->fixreg[13], fb->fb_fixreg,
337 sizeof(fb->fb_fixreg));
338 goto done;
339 }
340 }
341 goto brain_damage;
342 default:
343 brain_damage:
344 printf("trap type 0x%x at 0x%lx\n", type, frame->srr0);
345 #ifdef DDB
346 if (kdb_trap(type, frame))
347 goto done;
348 #endif
349 #ifdef TRAP_PANICWAIT
350 printf("Press a key to panic.\n");
351 cngetc();
352 #endif
353 panic("trap");
354 }
355
356 /* Invoke MI userret code */
357 mi_userret(l);
358
359 curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
360 done:
361 return;
362 }
363
364 int
365 ctx_setup(int ctx, int srr1)
366 {
367 volatile struct pmap *pm;
368
369 /* Update PID if we're returning to user mode. */
370 if (srr1 & PSL_PR) {
371 pm = curproc->p_vmspace->vm_map.pmap;
372 if (!pm->pm_ctx) {
373 ctx_alloc(__UNVOLATILE(pm));
374 }
375 ctx = pm->pm_ctx;
376 if (srr1 & PSL_SE) {
377 int dbreg, mask = 0x48000000;
378 /*
379 * Set the Internal Debug and
380 * Instruction Completion bits of
381 * the DBCR0 register.
382 *
383 * XXX this is also used by jtag debuggers...
384 */
385 __asm volatile("mfspr %0,0x3f2;"
386 "or %0,%0,%1;"
387 "mtspr 0x3f2,%0;" :
388 "=&r" (dbreg) : "r" (mask));
389 }
390 }
391 else if (!ctx) {
392 ctx = KERNEL_PID;
393 }
394 return (ctx);
395 }
396
397 /*
398 * Used by copyin()/copyout()
399 */
400 extern vaddr_t vmaprange __P((struct proc *, vaddr_t, vsize_t, int));
401 extern void vunmaprange __P((vaddr_t, vsize_t));
402 static int bigcopyin __P((const void *, void *, size_t ));
403 static int bigcopyout __P((const void *, void *, size_t ));
404
405 int
406 copyin(const void *udaddr, void *kaddr, size_t len)
407 {
408 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
409 int msr, pid, tmp, ctx;
410 struct faultbuf env;
411
412 /* For bigger buffers use the faster copy */
413 if (len > 256)
414 return (bigcopyin(udaddr, kaddr, len));
415
416 if (setfault(&env)) {
417 curpcb->pcb_onfault = 0;
418 return EFAULT;
419 }
420
421 if (!(ctx = pm->pm_ctx)) {
422 /* No context -- assign it one */
423 ctx_alloc(pm);
424 ctx = pm->pm_ctx;
425 }
426
427 __asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
428 "mfmsr %0;" /* Save MSR */
429 "li %1,0x20; "
430 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
431 "mfpid %1;" /* Save old PID */
432 "sync; isync;"
433
434 "1: bdz 2f;" /* while len */
435 "mtpid %3; sync;" /* Load user ctx */
436 "lbz %2,0(%4); addi %4,%4,1;" /* Load byte */
437 "sync; isync;"
438 "mtpid %1;sync;"
439 "stb %2,0(%5); dcbf 0,%5; addi %5,%5,1;" /* Store kernel byte */
440 "sync; isync;"
441 "b 1b;" /* repeat */
442
443 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
444 "sync; isync;"
445 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
446 : "r" (ctx), "b" (udaddr), "b" (kaddr), "b" (len));
447
448 curpcb->pcb_onfault = 0;
449 return 0;
450 }
451
452 static int
453 bigcopyin(const void *udaddr, void *kaddr, size_t len)
454 {
455 const char *up;
456 char *kp = kaddr;
457 struct lwp *l = curlwp;
458 struct proc *p;
459 int error;
460
461 if (!l) {
462 return EFAULT;
463 }
464
465 p = l->l_proc;
466
467 /*
468 * Stolen from physio():
469 */
470 PHOLD(l);
471 error = uvm_vslock(p->p_vmspace, __UNCONST(udaddr), len, VM_PROT_READ);
472 if (error) {
473 PRELE(l);
474 return EFAULT;
475 }
476 up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ);
477
478 memcpy(kp, up, len);
479 vunmaprange((vaddr_t)up, len);
480 uvm_vsunlock(p->p_vmspace, __UNCONST(udaddr), len);
481 PRELE(l);
482
483 return 0;
484 }
485
486 int
487 copyout(const void *kaddr, void *udaddr, size_t len)
488 {
489 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
490 int msr, pid, tmp, ctx;
491 struct faultbuf env;
492
493 /* For big copies use more efficient routine */
494 if (len > 256)
495 return (bigcopyout(kaddr, udaddr, len));
496
497 if (setfault(&env)) {
498 curpcb->pcb_onfault = 0;
499 return EFAULT;
500 }
501
502 if (!(ctx = pm->pm_ctx)) {
503 /* No context -- assign it one */
504 ctx_alloc(pm);
505 ctx = pm->pm_ctx;
506 }
507
508 __asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
509 "mfmsr %0;" /* Save MSR */
510 "li %1,0x20; "
511 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
512 "mfpid %1;" /* Save old PID */
513 "sync; isync;"
514
515 "1: bdz 2f;" /* while len */
516 "mtpid %1;sync;"
517 "lbz %2,0(%5); addi %5,%5,1;" /* Load kernel byte */
518 "sync; isync;"
519 "mtpid %3; sync;" /* Load user ctx */
520 "stb %2,0(%4); dcbf 0,%4; addi %4,%4,1;" /* Store user byte */
521 "sync; isync;"
522 "b 1b;" /* repeat */
523
524 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
525 "sync; isync;"
526 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
527 : "r" (ctx), "b" (udaddr), "b" (kaddr), "b" (len));
528
529 curpcb->pcb_onfault = 0;
530 return 0;
531 }
532
533 static int
534 bigcopyout(const void *kaddr, void *udaddr, size_t len)
535 {
536 char *up;
537 const char *kp = (const char *)kaddr;
538 struct lwp *l = curlwp;
539 struct proc *p;
540 int error;
541
542 if (!l) {
543 return EFAULT;
544 }
545
546 p = l->l_proc;
547
548 /*
549 * Stolen from physio():
550 */
551 PHOLD(l);
552 error = uvm_vslock(p->p_vmspace, udaddr, len, VM_PROT_WRITE);
553 if (error) {
554 PRELE(l);
555 return EFAULT;
556 }
557 up = (char *)vmaprange(p, (vaddr_t)udaddr, len,
558 VM_PROT_READ | VM_PROT_WRITE);
559
560 memcpy(up, kp, len);
561 vunmaprange((vaddr_t)up, len);
562 uvm_vsunlock(p->p_vmspace, udaddr, len);
563 PRELE(l);
564
565 return 0;
566 }
567
568 /*
569 * kcopy(const void *src, void *dst, size_t len);
570 *
571 * Copy len bytes from src to dst, aborting if we encounter a fatal
572 * page fault.
573 *
574 * kcopy() _must_ save and restore the old fault handler since it is
575 * called by uiomove(), which may be in the path of servicing a non-fatal
576 * page fault.
577 */
578 int
579 kcopy(const void *src, void *dst, size_t len)
580 {
581 struct faultbuf env, *oldfault;
582
583 oldfault = curpcb->pcb_onfault;
584 if (setfault(&env)) {
585 curpcb->pcb_onfault = oldfault;
586 return EFAULT;
587 }
588
589 memcpy(dst, src, len);
590
591 curpcb->pcb_onfault = oldfault;
592 return 0;
593 }
594
595 int
596 badaddr(void *addr, size_t size)
597 {
598
599 return badaddr_read(addr, size, NULL);
600 }
601
602 int
603 badaddr_read(void *addr, size_t size, int *rptr)
604 {
605 struct faultbuf env;
606 int x;
607
608 /* Get rid of any stale machine checks that have been waiting. */
609 __asm volatile ("sync; isync");
610
611 if (setfault(&env)) {
612 curpcb->pcb_onfault = 0;
613 __asm volatile ("sync");
614 return 1;
615 }
616
617 __asm volatile ("sync");
618
619 switch (size) {
620 case 1:
621 x = *(volatile int8_t *)addr;
622 break;
623 case 2:
624 x = *(volatile int16_t *)addr;
625 break;
626 case 4:
627 x = *(volatile int32_t *)addr;
628 break;
629 default:
630 panic("badaddr: invalid size (%d)", size);
631 }
632
633 /* Make sure we took the machine check, if we caused one. */
634 __asm volatile ("sync; isync");
635
636 curpcb->pcb_onfault = 0;
637 __asm volatile ("sync"); /* To be sure. */
638
639 /* Use the value to avoid reorder. */
640 if (rptr)
641 *rptr = x;
642
643 return 0;
644 }
645
646 /*
647 * For now, this only deals with the particular unaligned access case
648 * that gcc tends to generate. Eventually it should handle all of the
649 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
650 */
651
652 static int
653 fix_unaligned(struct lwp *l, struct trapframe *frame)
654 {
655
656 return -1;
657 }
658
659 /*
660 * Start a new LWP
661 */
662 void
663 startlwp(arg)
664 void *arg;
665 {
666 int err;
667 ucontext_t *uc = arg;
668 struct lwp *l = curlwp;
669
670 err = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
671 #if DIAGNOSTIC
672 if (err) {
673 printf("Error %d from cpu_setmcontext.", err);
674 }
675 #endif
676 pool_put(&lwp_uc_pool, uc);
677
678 /* Invoke MI userret code */
679 mi_userret(l);
680
681 curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
682 }
683