trap.c revision 1.10 1 /* $NetBSD: trap.c,v 1.10 2003/01/18 06:23:31 thorpej Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include "opt_altivec.h"
70 #include "opt_ddb.h"
71 #include "opt_ktrace.h"
72 #include "opt_systrace.h"
73 #include "opt_syscall_debug.h"
74
75 #include <sys/param.h>
76 #include <sys/proc.h>
77 #include <sys/reboot.h>
78 #include <sys/syscall.h>
79 #include <sys/systm.h>
80 #include <sys/user.h>
81 #ifdef KTRACE
82 #include <sys/ktrace.h>
83 #endif
84 #include <sys/pool.h>
85 #include <sys/sa.h>
86 #include <sys/savar.h>
87 #ifdef SYSTRACE
88 #include <sys/systrace.h>
89 #endif
90
91 #include <uvm/uvm_extern.h>
92
93 #include <dev/cons.h>
94
95 #include <machine/cpu.h>
96 #include <machine/db_machdep.h>
97 #include <machine/fpu.h>
98 #include <machine/frame.h>
99 #include <machine/pcb.h>
100 #include <machine/psl.h>
101 #include <machine/trap.h>
102
103 #include <powerpc/spr.h>
104 #include <powerpc/ibm4xx/pmap.h>
105 #include <powerpc/ibm4xx/tlb.h>
106 #include <powerpc/fpu/fpu_extern.h>
107
108 /* These definitions should probably be somewhere else XXX */
109 #define FIRSTARG 3 /* first argument is in reg 3 */
110 #define NARGREG 8 /* 8 args are in registers */
111 #define MOREARGS(sp) ((caddr_t)((int)(sp) + 8)) /* more args go here */
112
113 #ifndef MULTIPROCESSOR
114 volatile int astpending;
115 volatile int want_resched;
116 #endif
117
118 static int fix_unaligned __P((struct lwp *l, struct trapframe *frame));
119
120 void trap __P((struct trapframe *)); /* Called from locore / trap_subr */
121 int setfault __P((faultbuf)); /* defined in locore.S */
122 /* Why are these not defined in a header? */
123 int badaddr __P((void *, size_t));
124 int badaddr_read __P((void *, size_t, int *));
125 int ctx_setup __P((int, int));
126
127 #ifdef DEBUG
128 #define TDB_ALL 0x1
129 int trapdebug = /* TDB_ALL */ 0;
130 #define DBPRINTF(x, y) if (trapdebug & (x)) printf y
131 #else
132 #define DBPRINTF(x, y)
133 #endif
134
135 void
136 trap(struct trapframe *frame)
137 {
138 struct lwp *l = curlwp;
139 struct proc *p = l ? l->l_proc : NULL;
140 int type = frame->exc;
141 int ftype, rv;
142
143 KASSERT(l == 0 || (l->l_stat == LSONPROC));
144
145 if (frame->srr1 & PSL_PR)
146 type |= EXC_USER;
147
148 ftype = VM_PROT_READ;
149
150 DBPRINTF(TDB_ALL, ("trap(%x) at %x from frame %p &frame %p\n",
151 type, frame->srr0, frame, &frame));
152
153 switch (type) {
154 case EXC_DEBUG|EXC_USER:
155 {
156 int srr2, srr3;
157 __asm __volatile("mfspr %0,0x3f0" : "=r" (rv), "=r" (srr2), "=r" (srr3) :);
158 printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2, srr3);
159 }
160 /*
161 * DEBUG intr -- probably single-step.
162 */
163 case EXC_TRC|EXC_USER:
164 KERNEL_PROC_LOCK(l);
165 frame->srr1 &= ~PSL_SE;
166 trapsignal(l, SIGTRAP, EXC_TRC);
167 KERNEL_PROC_UNLOCK(l);
168 break;
169
170 /* If we could not find and install appropriate TLB entry, fall through */
171
172 case EXC_DSI:
173 /* FALLTHROUGH */
174 case EXC_DTMISS:
175 {
176 struct vm_map *map;
177 vaddr_t va;
178 faultbuf *fb = NULL;
179
180 KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
181 va = frame->dear;
182 if (frame->pid == KERNEL_PID) {
183 map = kernel_map;
184 } else {
185 map = &p->p_vmspace->vm_map;
186 }
187
188 if (frame->esr & (ESR_DST|ESR_DIZ))
189 ftype = VM_PROT_WRITE;
190
191 DBPRINTF(TDB_ALL, ("trap(EXC_DSI) at %x %s fault on %p esr %x\n",
192 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", (void *)va, frame->esr));
193 rv = uvm_fault(map, trunc_page(va), 0, ftype);
194 KERNEL_UNLOCK();
195 if (rv == 0)
196 goto done;
197 if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
198 frame->pid = KERNEL_PID;
199 frame->srr0 = (*fb)[0];
200 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
201 frame->fixreg[1] = (*fb)[1];
202 frame->fixreg[2] = (*fb)[2];
203 frame->fixreg[3] = 1; /* Return TRUE */
204 frame->cr = (*fb)[3];
205 memcpy(&frame->fixreg[13], &(*fb)[4],
206 19 * sizeof(register_t));
207 goto done;
208 }
209 }
210 goto brain_damage;
211
212 case EXC_DSI|EXC_USER:
213 /* FALLTHROUGH */
214 case EXC_DTMISS|EXC_USER:
215 KERNEL_PROC_LOCK(l);
216
217 if (frame->esr & (ESR_DST|ESR_DIZ))
218 ftype = VM_PROT_WRITE;
219
220 DBPRINTF(TDB_ALL, ("trap(EXC_DSI|EXC_USER) at %x %s fault on %x %x\n",
221 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", frame->dear, frame->esr));
222 KASSERT(l == curlwp && (l->l_stat == LSONPROC));
223 rv = uvm_fault(&p->p_vmspace->vm_map,
224 trunc_page(frame->dear), 0, ftype);
225 if (rv == 0) {
226 KERNEL_PROC_UNLOCK(l);
227 break;
228 }
229 if (rv == ENOMEM) {
230 printf("UVM: pid %d (%s) lid %d, uid %d killed: "
231 "out of swap\n",
232 p->p_pid, p->p_comm, l->l_lid,
233 p->p_cred && p->p_ucred ?
234 p->p_ucred->cr_uid : -1);
235 trapsignal(l, SIGKILL, EXC_DSI);
236 } else {
237 trapsignal(l, SIGSEGV, EXC_DSI);
238 }
239 KERNEL_PROC_UNLOCK(l);
240 break;
241 case EXC_ITMISS|EXC_USER:
242 case EXC_ISI|EXC_USER:
243 KERNEL_PROC_LOCK(l);
244 ftype = VM_PROT_READ | VM_PROT_EXECUTE;
245 DBPRINTF(TDB_ALL, ("trap(EXC_ISI|EXC_USER) at %x %s fault on %x tf %p\n",
246 frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", frame->srr0, frame));
247 rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0), 0, ftype);
248 if (rv == 0) {
249 KERNEL_PROC_UNLOCK(l);
250 break;
251 }
252 trapsignal(l, SIGSEGV, EXC_ISI);
253 KERNEL_PROC_UNLOCK(l);
254 break;
255
256 case EXC_AST|EXC_USER:
257 astpending = 0; /* we are about to do it */
258 KERNEL_PROC_LOCK(l);
259 uvmexp.softs++;
260 if (p->p_flag & P_OWEUPC) {
261 p->p_flag &= ~P_OWEUPC;
262 ADDUPROF(p);
263 }
264 /* Check whether we are being preempted. */
265 if (want_resched)
266 preempt(0);
267 KERNEL_PROC_UNLOCK(l);
268 break;
269
270
271 case EXC_ALI|EXC_USER:
272 KERNEL_PROC_LOCK(l);
273 if (fix_unaligned(l, frame) != 0)
274 trapsignal(l, SIGBUS, EXC_ALI);
275 else
276 frame->srr0 += 4;
277 KERNEL_PROC_UNLOCK(l);
278 break;
279
280 case EXC_PGM|EXC_USER:
281 /*
282 * Illegal insn:
283 *
284 * let's try to see if it's FPU and can be emulated.
285 */
286 uvmexp.traps ++;
287 if (!(l->l_addr->u_pcb.pcb_flags & PCB_FPU)) {
288 memset(&l->l_addr->u_pcb.pcb_fpu, 0,
289 sizeof l->l_addr->u_pcb.pcb_fpu);
290 l->l_addr->u_pcb.pcb_flags |= PCB_FPU;
291 }
292
293 if ((rv = fpu_emulate(frame,
294 (struct fpreg *)&l->l_addr->u_pcb.pcb_fpu))) {
295 KERNEL_PROC_LOCK(l);
296 trapsignal(l, rv, EXC_PGM);
297 KERNEL_PROC_UNLOCK(l);
298 }
299 break;
300
301 case EXC_MCHK:
302 {
303 faultbuf *fb;
304
305 if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
306 frame->pid = KERNEL_PID;
307 frame->srr0 = (*fb)[0];
308 frame->srr1 |= PSL_IR; /* Re-enable IMMU */
309 frame->fixreg[1] = (*fb)[1];
310 frame->fixreg[2] = (*fb)[2];
311 frame->fixreg[3] = 1; /* Return TRUE */
312 frame->cr = (*fb)[3];
313 memcpy(&frame->fixreg[13], &(*fb)[4],
314 19 * sizeof(register_t));
315 goto done;
316 }
317 }
318 goto brain_damage;
319 default:
320 brain_damage:
321 printf("trap type 0x%x at 0x%x\n", type, frame->srr0);
322 #ifdef DDB
323 if (kdb_trap(type, frame))
324 goto done;
325 #endif
326 #ifdef TRAP_PANICWAIT
327 printf("Press a key to panic.\n");
328 cngetc();
329 #endif
330 panic("trap");
331 }
332
333 /* Take pending signals. */
334 {
335 int sig;
336
337 while ((sig = CURSIG(l)) != 0)
338 postsig(sig);
339 }
340
341 /* Invoke per-process kernel-exit handling, if any */
342 if (p->p_userret)
343 (p->p_userret)(l, p->p_userret_arg);
344
345 /* Invoke any pending upcalls */
346 while (l->l_flag & L_SA_UPCALL)
347 sa_upcall_userret(l);
348
349 curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
350 done:
351 return;
352 }
353
354 int
355 ctx_setup(int ctx, int srr1)
356 {
357 volatile struct pmap *pm;
358
359 /* Update PID if we're returning to user mode. */
360 if (srr1 & PSL_PR) {
361 pm = curproc->p_vmspace->vm_map.pmap;
362 if (!pm->pm_ctx) {
363 ctx_alloc((struct pmap *)pm);
364 }
365 ctx = pm->pm_ctx;
366 if (srr1 & PSL_SE) {
367 int dbreg, mask = 0x48000000;
368 /*
369 * Set the Internal Debug and
370 * Instruction Completion bits of
371 * the DBCR0 register.
372 *
373 * XXX this is also used by jtag debuggers...
374 */
375 __asm __volatile("mfspr %0,0x3f2;"
376 "or %0,%0,%1;"
377 "mtspr 0x3f2,%0;" :
378 "=&r" (dbreg) : "r" (mask));
379 }
380 }
381 else if (!ctx) {
382 ctx = KERNEL_PID;
383 }
384 return (ctx);
385 }
386
387 /*
388 * Used by copyin()/copyout()
389 */
390 extern vaddr_t vmaprange __P((struct proc *, vaddr_t, vsize_t, int));
391 extern void vunmaprange __P((vaddr_t, vsize_t));
392 static int bigcopyin __P((const void *, void *, size_t ));
393 static int bigcopyout __P((const void *, void *, size_t ));
394
395 int
396 copyin(const void *udaddr, void *kaddr, size_t len)
397 {
398 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
399 int msr, pid, tmp, ctx;
400 faultbuf env;
401
402 /* For bigger buffers use the faster copy */
403 if (len > 256) return (bigcopyin(udaddr, kaddr, len));
404
405 if (setfault(env)) {
406 curpcb->pcb_onfault = 0;
407 return EFAULT;
408 }
409
410 if (!(ctx = pm->pm_ctx)) {
411 /* No context -- assign it one */
412 ctx_alloc(pm);
413 ctx = pm->pm_ctx;
414 }
415
416 asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
417 "mfmsr %0;" /* Save MSR */
418 "li %1,0x20; "
419 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
420 "mfpid %1;" /* Save old PID */
421 "sync; isync;"
422
423 "1: bdz 2f;" /* while len */
424 "mtpid %3; sync;" /* Load user ctx */
425 "lbz %2,0(%4); addi %4,%4,1;" /* Load byte */
426 "sync; isync;"
427 "mtpid %1;sync;"
428 "stb %2,0(%5); dcbf 0,%5; addi %5,%5,1;" /* Store kernel byte */
429 "sync; isync;"
430 "b 1b;" /* repeat */
431
432 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
433 "sync; isync;"
434 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
435 : "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
436
437 curpcb->pcb_onfault = 0;
438 return 0;
439 }
440
441 static int
442 bigcopyin(const void *udaddr, void *kaddr, size_t len)
443 {
444 const char *up;
445 char *kp = kaddr;
446 struct lwp *l = curlwp;
447 struct proc *p;
448 int error;
449
450 if (!l) {
451 return EFAULT;
452 }
453
454 p = l->l_proc;
455
456 /*
457 * Stolen from physio():
458 */
459 PHOLD(l);
460 error = uvm_vslock(p, (caddr_t)udaddr, len, VM_PROT_READ);
461 if (error) {
462 PRELE(l);
463 return EFAULT;
464 }
465 up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ);
466
467 memcpy(kp, up, len);
468 vunmaprange((vaddr_t)up, len);
469 uvm_vsunlock(p, (caddr_t)udaddr, len);
470 PRELE(l);
471
472 return 0;
473 }
474
475 int
476 copyout(const void *kaddr, void *udaddr, size_t len)
477 {
478 struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
479 int msr, pid, tmp, ctx;
480 faultbuf env;
481
482 /* For big copies use more efficient routine */
483 if (len > 256) return (bigcopyout(kaddr, udaddr, len));
484
485 if (setfault(env)) {
486 curpcb->pcb_onfault = 0;
487 return EFAULT;
488 }
489
490 if (!(ctx = pm->pm_ctx)) {
491 /* No context -- assign it one */
492 ctx_alloc(pm);
493 ctx = pm->pm_ctx;
494 }
495
496 asm volatile("addi %6,%6,1; mtctr %6;" /* Set up counter */
497 "mfmsr %0;" /* Save MSR */
498 "li %1,0x20; "
499 "andc %1,%0,%1; mtmsr %1;" /* Disable IMMU */
500 "mfpid %1;" /* Save old PID */
501 "sync; isync;"
502
503 "1: bdz 2f;" /* while len */
504 "mtpid %1;sync;"
505 "lbz %2,0(%5); addi %5,%5,1;" /* Load kernel byte */
506 "sync; isync;"
507 "mtpid %3; sync;" /* Load user ctx */
508 "stb %2,0(%4); dcbf 0,%4; addi %4,%4,1;" /* Store user byte */
509 "sync; isync;"
510 "b 1b;" /* repeat */
511
512 "2: mtpid %1; mtmsr %0;" /* Restore PID and MSR */
513 "sync; isync;"
514 : "=&r" (msr), "=&r" (pid), "=&r" (tmp)
515 : "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
516
517 curpcb->pcb_onfault = 0;
518 return 0;
519 }
520
521 static int
522 bigcopyout(const void *kaddr, void *udaddr, size_t len)
523 {
524 char *up;
525 const char *kp = (char *)kaddr;
526 struct lwp *l = curlwp;
527 struct proc *p;
528 int error;
529
530 if (!l) {
531 return EFAULT;
532 }
533
534 p = l->l_proc;
535
536 /*
537 * Stolen from physio():
538 */
539 PHOLD(l);
540 error = uvm_vslock(p, udaddr, len, VM_PROT_WRITE);
541 if (error) {
542 PRELE(l);
543 return EFAULT;
544 }
545 up = (char *)vmaprange(p, (vaddr_t)udaddr, len,
546 VM_PROT_READ|VM_PROT_WRITE);
547
548 memcpy(up, kp, len);
549 vunmaprange((vaddr_t)up, len);
550 uvm_vsunlock(p, udaddr, len);
551 PRELE(l);
552
553 return 0;
554 }
555
556 /*
557 * kcopy(const void *src, void *dst, size_t len);
558 *
559 * Copy len bytes from src to dst, aborting if we encounter a fatal
560 * page fault.
561 *
562 * kcopy() _must_ save and restore the old fault handler since it is
563 * called by uiomove(), which may be in the path of servicing a non-fatal
564 * page fault.
565 */
566 int
567 kcopy(const void *src, void *dst, size_t len)
568 {
569 faultbuf env, *oldfault;
570
571 oldfault = curpcb->pcb_onfault;
572 if (setfault(env)) {
573 curpcb->pcb_onfault = oldfault;
574 return EFAULT;
575 }
576
577 memcpy(dst, src, len);
578
579 curpcb->pcb_onfault = oldfault;
580 return 0;
581 }
582
583 int
584 badaddr(void *addr, size_t size)
585 {
586
587 return badaddr_read(addr, size, NULL);
588 }
589
590 int
591 badaddr_read(void *addr, size_t size, int *rptr)
592 {
593 faultbuf env;
594 int x;
595
596 /* Get rid of any stale machine checks that have been waiting. */
597 __asm __volatile ("sync; isync");
598
599 if (setfault(env)) {
600 curpcb->pcb_onfault = 0;
601 __asm __volatile ("sync");
602 return 1;
603 }
604
605 __asm __volatile ("sync");
606
607 switch (size) {
608 case 1:
609 x = *(volatile int8_t *)addr;
610 break;
611 case 2:
612 x = *(volatile int16_t *)addr;
613 break;
614 case 4:
615 x = *(volatile int32_t *)addr;
616 break;
617 default:
618 panic("badaddr: invalid size (%d)", size);
619 }
620
621 /* Make sure we took the machine check, if we caused one. */
622 __asm __volatile ("sync; isync");
623
624 curpcb->pcb_onfault = 0;
625 __asm __volatile ("sync"); /* To be sure. */
626
627 /* Use the value to avoid reorder. */
628 if (rptr)
629 *rptr = x;
630
631 return 0;
632 }
633
634 /*
635 * For now, this only deals with the particular unaligned access case
636 * that gcc tends to generate. Eventually it should handle all of the
637 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
638 */
639
640 static int
641 fix_unaligned(struct lwp *l, struct trapframe *frame)
642 {
643
644 return -1;
645 }
646
647 /*
648 * Start a new LWP
649 */
650 void
651 startlwp(arg)
652 void *arg;
653 {
654 int err;
655 ucontext_t *uc = arg;
656 struct lwp *l = curlwp;
657
658 err = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
659 #if DIAGNOSTIC
660 if (err) {
661 printf("Error %d from cpu_setmcontext.", err);
662 }
663 #endif
664 pool_put(&lwp_uc_pool, uc);
665
666 upcallret(l);
667 }
668
669 /*
670 * XXX This is a terrible name.
671 */
672 void
673 upcallret(l)
674 struct lwp *l;
675 {
676 int sig;
677
678 /* Take pending signals. */
679 while ((sig = CURSIG(l)) != 0)
680 postsig(sig);
681
682 /* Invoke per-process kernel-exit handling, if any */
683 if (l->l_proc->p_userret)
684 (l->l_proc->p_userret)(l, l->l_proc->p_userret_arg);
685
686 /* Invoke any pending upcalls */
687 while (l->l_flag & L_SA_UPCALL)
688 sa_upcall_userret(l);
689
690 curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
691 }
692