trap.c revision 1.2.6.9 1 /* $NetBSD: trap.c,v 1.2.6.9 2002/08/01 02:43:01 nathanw Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include "opt_altivec.h"
70 #include "opt_ddb.h"
71 #include "opt_ktrace.h"
72 #include "opt_systrace.h"
73 #include "opt_syscall_debug.h"
74
75 #include <sys/param.h>
76 #include <sys/proc.h>
77 #include <sys/reboot.h>
78 #include <sys/syscall.h>
79 #include <sys/systm.h>
80 #include <sys/user.h>
81 #ifdef KTRACE
82 #include <sys/ktrace.h>
83 #endif
84 #include <sys/pool.h>
85 #include <sys/sa.h>
86 #include <sys/savar.h>
87 #ifdef SYSTRACE
88 #include <sys/systrace.h>
89 #endif
90
91 #include <uvm/uvm_extern.h>
92
93 #include <dev/cons.h>
94
95 #include <machine/cpu.h>
96 #include <machine/db_machdep.h>
97 #include <machine/fpu.h>
98 #include <machine/frame.h>
99 #include <machine/pcb.h>
100 #include <machine/psl.h>
101 #include <machine/trap.h>
102
103 #include <powerpc/spr.h>
104 #include <powerpc/ibm4xx/pmap.h>
105 #include <powerpc/ibm4xx/tlb.h>
106 #include <powerpc/fpu/fpu_extern.h>
107
108 /* These definitions should probably be somewhere else XXX */
109 #define FIRSTARG 3 /* first argument is in reg 3 */
110 #define NARGREG 8 /* 8 args are in registers */
111 #define MOREARGS(sp) ((caddr_t)((int)(sp) + 8)) /* more args go here */
112
113 #ifndef MULTIPROCESSOR
114 volatile int astpending;
115 volatile int want_resched;
116 #endif
117
118 static int fix_unaligned __P((struct lwp *l, struct trapframe *frame));
119
120 void trap __P((struct trapframe *)); /* Called from locore / trap_subr */
121 int setfault __P((faultbuf)); /* defined in locore.S */
122 /* Why are these not defined in a header? */
123 int badaddr __P((void *, size_t));
124 int badaddr_read __P((void *, size_t, int *));
125 int ctx_setup __P((int, int));
126
127 #ifdef DEBUG
128 #define TDB_ALL 0x1
129 int trapdebug = /* TDB_ALL */ 0;
130 #define DBPRINTF(x, y) if (trapdebug & (x)) printf y
131 #else
132 #define DBPRINTF(x, y)
133 #endif
134
135 void
136 trap(struct trapframe *frame)
137 {
138 struct lwp *l = curlwp;
139 struct proc *p = l ? l->l_proc : NULL;
140 int type = frame->exc;
141 int ftype, rv;
142
143 KASSERT(l == 0 || (l->l_stat == LSONPROC));
144
145 if (frame->srr1 & PSL_PR)
146 type |= EXC_USER;
147
148 ftype = VM_PROT_READ;
149
150 DBPRINTF(TDB_ALL, ("trap(%x) at %x from frame %p &frame %p\n",
151 type, frame->srr0, frame, &frame));
152
153 switch (type) {
154 case EXC_DEBUG|EXC_USER:
155 {
156 int srr2, srr3;
157 __asm __volatile("mfspr %0,0x3f0" : "=r" (rv), "=r" (srr2), "=r" (srr3) :);
158 printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2, srr3);
159 }
160 /*
161 * DEBUG intr -- probably single-step.
162 */
163 case EXC_TRC|EXC_USER:
164 KERNEL_PROC_LOCK(l);
165 frame->srr1 &= ~PSL_SE;
166 trapsignal(l, SIGTRAP, EXC_TRC);
167 KERNEL_PROC_UNLOCK(l);
168 break;
169
170 /* If we could not find and install appropriate TLB entry, fall through */
171
172 case EXC_DSI:
173 /* FALLTHROUGH */
174 case EXC_DTMISS:
175 {
176 struct vm_map *map;
177 vaddr_t va;
178 faultbuf *fb = NULL;
179
180 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
181 va = frame->dear;
182 if (frame->pid == KERNEL_PID) {
183 map = kernel_map;
184 } else {
185 map = &p->p_vmspace->vm_map;
186 }
187
188 if (frame->esr & (ESR_DST|ESR_DIZ))
189 ftype = VM_PROT_WRITE;
190
191 DBPRINTF(TDB_ALL, ("trap(EXC_DSI) at %x %s fault on %p esr %x\n",
192 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", (void *)va, frame->esr));
193 rv = uvm_fault(map, trunc_page(va), 0, ftype);
194 KERNEL_UNLOCK();
195 if (rv == 0)
196 goto done;
197 if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
198 frame->pid = KERNEL_PID;
199 frame->srr0 = (*fb)[0];
200 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
201 frame->fixreg[1] = (*fb)[1];
202 frame->fixreg[2] = (*fb)[2];
203 frame->fixreg[3] = 1; /* Return TRUE */
204 frame->cr = (*fb)[3];
205 memcpy(&frame->fixreg[13], &(*fb)[4],
206 19 * sizeof(register_t));
207 goto done;
208 }
209 }
210 goto brain_damage;
211
212 case EXC_DSI|EXC_USER:
213 /* FALLTHROUGH */
214 case EXC_DTMISS|EXC_USER:
215 KERNEL_PROC_LOCK(l);
216
217 if (frame->esr & (ESR_DST|ESR_DIZ))
218 ftype = VM_PROT_WRITE;
219
220 DBPRINTF(TDB_ALL, ("trap(EXC_DSI|EXC_USER) at %x %s fault on %x %x\n",
221 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", frame->dear, frame->esr));
222 KASSERT(l == curlwp && (l->l_stat == LSONPROC));
223 rv = uvm_fault(&p->p_vmspace->vm_map,
224 trunc_page(frame->dear), 0, ftype);
225 if (rv == 0) {
226 KERNEL_PROC_UNLOCK(l);
227 break;
228 }
229 if (rv == ENOMEM) {
230 printf("UVM: pid %d (%s) lid %d, uid %d killed: "
231 "out of swap\n",
232 p->p_pid, p->p_comm, l->l_lid,
233 p->p_cred && p->p_ucred ?
234 p->p_ucred->cr_uid : -1);
235 trapsignal(l, SIGKILL, EXC_DSI);
236 } else {
237 trapsignal(l, SIGSEGV, EXC_DSI);
238 }
239 KERNEL_PROC_UNLOCK(l);
240 break;
241 case EXC_ITMISS|EXC_USER:
242 case EXC_ISI|EXC_USER:
243 KERNEL_PROC_LOCK(l);
244 ftype = VM_PROT_READ | VM_PROT_EXECUTE;
245 DBPRINTF(TDB_ALL, ("trap(EXC_ISI|EXC_USER) at %x %s fault on %x tf %p\n",
246 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", frame->srr0, frame));
247 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0), 0, ftype);
248 if (rv == 0) {
249 KERNEL_PROC_UNLOCK(l);
250 break;
251 }
252 trapsignal(l, SIGSEGV, EXC_ISI);
253 KERNEL_PROC_UNLOCK(l);
254 break;
255
256 case EXC_SC|EXC_USER:
257 (*p->p_md.md_syscall)(frame);
258 break;
259
260 case EXC_AST|EXC_USER:
261 astpending = 0; /* we are about to do it */
262 KERNEL_PROC_LOCK(l);
263 uvmexp.softs++;
264 if (p->p_flag & P_OWEUPC) {
265 p->p_flag &= ~P_OWEUPC;
266 ADDUPROF(p);
267 }
268 /* Check whether we are being preempted. */
269 if (want_resched)
270 preempt(NULL);
271 KERNEL_PROC_UNLOCK(l);
272 break;
273
274
275 case EXC_ALI|EXC_USER:
276 KERNEL_PROC_LOCK(l);
277 if (fix_unaligned(l, frame) != 0)
278 trapsignal(l, SIGBUS, EXC_ALI);
279 else
280 frame->srr0 += 4;
281 KERNEL_PROC_UNLOCK(l);
282 break;
283
284 case EXC_PGM|EXC_USER:
285 /*
286 * Illegal insn:
287 *
288 * let's try to see if it's FPU and can be emulated.
289 */
290 uvmexp.traps ++;
291 if (!(l->l_addr->u_pcb.pcb_flags & PCB_FPU)) {
292 memset(&l->l_addr->u_pcb.pcb_fpu, 0,
293 sizeof l->l_addr->u_pcb.pcb_fpu);
294 l->l_addr->u_pcb.pcb_flags |= PCB_FPU;
295 }
296
297 if ((rv = fpu_emulate(frame,
298 (struct fpreg *)&l->l_addr->u_pcb.pcb_fpu))) {
299 KERNEL_PROC_LOCK(l);
300 trapsignal(l, rv, EXC_PGM);
301 KERNEL_PROC_UNLOCK(l);
302 }
303 break;
304
305 case EXC_MCHK:
306 {
307 faultbuf *fb;
308
309 if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
310 frame->pid = KERNEL_PID;
311 frame->srr0 = (*fb)[0];
312 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
313 frame->fixreg[1] = (*fb)[1];
314 frame->fixreg[2] = (*fb)[2];
315 frame->fixreg[3] = 1; /* Return TRUE */
316 frame->cr = (*fb)[3];
317 memcpy(&frame->fixreg[13], &(*fb)[4],
318 19 * sizeof(register_t));
319 goto done;
320 }
321 }
322 goto brain_damage;
323 default:
324 brain_damage:
325 printf("trap type 0x%x at 0x%x\n", type, frame->srr0);
326 #ifdef DDB
327 if (kdb_trap(type, frame))
328 goto done;
329 #endif
330 #ifdef TRAP_PANICWAIT
331 printf("Press a key to panic.\n");
332 cngetc();
333 #endif
334 panic("trap");
335 }
336
337 /* Take pending signals. */
338 {
339 int sig;
340
341 while ((sig = CURSIG(l)) != 0)
342 postsig(sig);
343 }
344
345 /* Invoke per-process kernel-exit handling, if any */
346 if (p->p_userret)
347 (p->p_userret)(l, p->p_userret_arg);
348
349 /* Invoke any pending upcalls */
350 if (l->l_flag & L_SA_UPCALL)
351 sa_upcall_userret(l);
352
353 curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
354 done:
355 }
356
357 int
358 ctx_setup(int ctx, int srr1)
359 {
360 volatile struct pmap *pm;
361
362 /* Update PID if we're returning to user mode. */
363 if (srr1 & PSL_PR) {
364 pm = curproc->p_vmspace->vm_map.pmap;
365 if (!pm->pm_ctx) {
366 ctx_alloc((struct pmap *)pm);
367 }
368 ctx = pm->pm_ctx;
369 if (srr1 & PSL_SE) {
370 int dbreg, mask = 0x48000000;
371 /*
372 * Set the Internal Debug and
373 * Instruction Completion bits of
374 * the DBCR0 register.
375 *
376 * XXX this is also used by jtag debuggers...
377 */
378 __asm __volatile("mfspr %0,0x3f2;"
379 "or %0,%0,%1;"
380 "mtspr 0x3f2,%0;" :
381 "=&r" (dbreg) : "r" (mask));
382 }
383 }
384 else if (!ctx) {
385 ctx = KERNEL_PID;
386 }
387 return (ctx);
388 }
389
390 /*
391 * Used by copyin()/copyout()
392 */
393 extern vaddr_t vmaprange __P((struct proc *, vaddr_t, vsize_t, int));
394 extern void vunmaprange __P((vaddr_t, vsize_t));
395 static int bigcopyin __P((const void *, void *, size_t ));
396 static int bigcopyout __P((const void *, void *, size_t ));
397
398 int
399 copyin(const void *udaddr, void *kaddr, size_t len)
400 {
401 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
402 int msr, pid, tmp, ctx;
403 faultbuf env;
404
405 /* For bigger buffers use the faster copy */
406 if (len > 256) return (bigcopyin(udaddr, kaddr, len));
407
408 if (setfault(env)) {
409 curpcb->pcb_onfault = 0;
410 return EFAULT;
411 }
412
413 if (!(ctx = pm->pm_ctx)) {
414 /* No context -- assign it one */
415 ctx_alloc(pm);
416 ctx = pm->pm_ctx;
417 }
418
419 asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
420 "mfmsr %0;" /* Save MSR */
421 "li %1,0x20; "
422 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
423 "mfpid %1;" /* Save old PID */
424 "sync; isync;"
425
426 "1: bdz 2f;" /* while len */
427 "mtpid %3; sync;" /* Load user ctx */
428 "lbz %2,0(%4); addi %4,%4,1;" /* Load byte */
429 "sync; isync;"
430 "mtpid %1;sync;"
431 "stb %2,0(%5); dcbf 0,%5; addi %5,%5,1;" /* Store kernel byte */
432 "sync; isync;"
433 "b 1b;" /* repeat */
434
435 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
436 "sync; isync;"
437 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
438 : "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
439
440 curpcb->pcb_onfault = 0;
441 return 0;
442 }
443
444 static int
445 bigcopyin(const void *udaddr, void *kaddr, size_t len)
446 {
447 const char *up;
448 char *kp = kaddr;
449 struct lwp *l = curlwp;
450 struct proc *p;
451 int error;
452
453 if (!l) {
454 return EFAULT;
455 }
456
457 p = l->l_proc;
458
459 /*
460 * Stolen from physio():
461 */
462 PHOLD(l);
463 error = uvm_vslock(p, (caddr_t)udaddr, len, VM_PROT_READ);
464 if (error) {
465 PRELE(l);
466 return EFAULT;
467 }
468 up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ);
469
470 memcpy(kp, up, len);
471 vunmaprange((vaddr_t)up, len);
472 uvm_vsunlock(p, (caddr_t)udaddr, len);
473 PRELE(l);
474
475 return 0;
476 }
477
478 int
479 copyout(const void *kaddr, void *udaddr, size_t len)
480 {
481 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
482 int msr, pid, tmp, ctx;
483 faultbuf env;
484
485 /* For big copies use more efficient routine */
486 if (len > 256) return (bigcopyout(kaddr, udaddr, len));
487
488 if (setfault(env)) {
489 curpcb->pcb_onfault = 0;
490 return EFAULT;
491 }
492
493 if (!(ctx = pm->pm_ctx)) {
494 /* No context -- assign it one */
495 ctx_alloc(pm);
496 ctx = pm->pm_ctx;
497 }
498
499 asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
500 "mfmsr %0;" /* Save MSR */
501 "li %1,0x20; "
502 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
503 "mfpid %1;" /* Save old PID */
504 "sync; isync;"
505
506 "1: bdz 2f;" /* while len */
507 "mtpid %1;sync;"
508 "lbz %2,0(%5); addi %5,%5,1;" /* Load kernel byte */
509 "sync; isync;"
510 "mtpid %3; sync;" /* Load user ctx */
511 "stb %2,0(%4); dcbf 0,%4; addi %4,%4,1;" /* Store user byte */
512 "sync; isync;"
513 "b 1b;" /* repeat */
514
515 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
516 "sync; isync;"
517 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
518 : "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
519
520 curpcb->pcb_onfault = 0;
521 return 0;
522 }
523
524 static int
525 bigcopyout(const void *kaddr, void *udaddr, size_t len)
526 {
527 char *up;
528 const char *kp = (char *)kaddr;
529 struct lwp *l = curlwp;
530 struct proc *p;
531 int error;
532
533 if (!l) {
534 return EFAULT;
535 }
536
537 p = l->l_proc;
538
539 /*
540 * Stolen from physio():
541 */
542 PHOLD(l);
543 error = uvm_vslock(p, udaddr, len, VM_PROT_WRITE);
544 if (error) {
545 PRELE(l);
546 return EFAULT;
547 }
548 up = (char *)vmaprange(p, (vaddr_t)udaddr, len,
549 VM_PROT_READ|VM_PROT_WRITE);
550
551 memcpy(up, kp, len);
552 vunmaprange((vaddr_t)up, len);
553 uvm_vsunlock(p, udaddr, len);
554 PRELE(l);
555
556 return 0;
557 }
558
559 /*
560 * kcopy(const void *src, void *dst, size_t len);
561 *
562 * Copy len bytes from src to dst, aborting if we encounter a fatal
563 * page fault.
564 *
565 * kcopy() _must_ save and restore the old fault handler since it is
566 * called by uiomove(), which may be in the path of servicing a non-fatal
567 * page fault.
568 */
569 int
570 kcopy(const void *src, void *dst, size_t len)
571 {
572 faultbuf env, *oldfault;
573
574 oldfault = curpcb->pcb_onfault;
575 if (setfault(env)) {
576 curpcb->pcb_onfault = oldfault;
577 return EFAULT;
578 }
579
580 memcpy(dst, src, len);
581
582 curpcb->pcb_onfault = oldfault;
583 return 0;
584 }
585
586 int
587 badaddr(void *addr, size_t size)
588 {
589
590 return badaddr_read(addr, size, NULL);
591 }
592
593 int
594 badaddr_read(void *addr, size_t size, int *rptr)
595 {
596 faultbuf env;
597 int x;
598
599 /* Get rid of any stale machine checks that have been waiting. */
600 __asm __volatile ("sync; isync");
601
602 if (setfault(env)) {
603 curpcb->pcb_onfault = 0;
604 __asm __volatile ("sync");
605 return 1;
606 }
607
608 __asm __volatile ("sync");
609
610 switch (size) {
611 case 1:
612 x = *(volatile int8_t *)addr;
613 break;
614 case 2:
615 x = *(volatile int16_t *)addr;
616 break;
617 case 4:
618 x = *(volatile int32_t *)addr;
619 break;
620 default:
621 panic("badaddr: invalid size (%d)", size);
622 }
623
624 /* Make sure we took the machine check, if we caused one. */
625 __asm __volatile ("sync; isync");
626
627 curpcb->pcb_onfault = 0;
628 __asm __volatile ("sync"); /* To be sure. */
629
630 /* Use the value to avoid reorder. */
631 if (rptr)
632 *rptr = x;
633
634 return 0;
635 }
636
637 /*
638 * For now, this only deals with the particular unaligned access case
639 * that gcc tends to generate. Eventually it should handle all of the
640 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
641 */
642
643 static int
644 fix_unaligned(struct lwp *l, struct trapframe *frame)
645 {
646
647 return -1;
648 }
649
650 /*
651 * Start a new LWP
652 */
653 void
654 startlwp(arg)
655 void *arg;
656 {
657 int err;
658 ucontext_t *uc = arg;
659 struct lwp *l = curlwp;
660
661 err = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
662 #if DIAGNOSTIC
663 if (err) {
664 printf("Error %d from cpu_setmcontext.", err);
665 }
666 #endif
667 pool_put(&lwp_uc_pool, uc);
668
669 upcallret(l);
670 }
671
672 /*
673 * XXX This is a terrible name.
674 */
675 void
676 upcallret(arg)
677 void *arg;
678 {
679 struct lwp *l = curlwp;
680 int sig;
681
682 /* Take pending signals. */
683 while ((sig = CURSIG(l)) != 0)
684 postsig(sig);
685
686 /* If our process is on the way out, die. */
687 if (l->l_proc->p_flag & P_WEXIT)
688 lwp_exit(l);
689
690 /* Invoke any pending upcalls */
691 if (l->l_flag & L_SA_UPCALL)
692 sa_upcall_userret(l);
693
694 curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
695 }
696