kern_lwp.c revision 1.45 1 /* $NetBSD: kern_lwp.c,v 1.45 2006/10/11 04:51:06 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.45 2006/10/11 04:51:06 thorpej Exp $");
41
42 #include "opt_multiprocessor.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/pool.h>
47 #include <sys/lock.h>
48 #include <sys/proc.h>
49 #include <sys/sa.h>
50 #include <sys/savar.h>
51 #include <sys/types.h>
52 #include <sys/ucontext.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/syscallargs.h>
56 #include <sys/kauth.h>
57
58 #include <uvm/uvm_extern.h>
59
60 POOL_INIT(lwp_pool, sizeof(struct lwp), 0, 0, 0, "lwppl",
61 &pool_allocator_nointr);
62 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
63 &pool_allocator_nointr);
64
65 static specificdata_domain_t lwp_specificdata_domain;
66
67 struct lwplist alllwp;
68
69 #define LWP_DEBUG
70
71 #ifdef LWP_DEBUG
72 int lwp_debug = 0;
73 #define DPRINTF(x) if (lwp_debug) printf x
74 #else
75 #define DPRINTF(x)
76 #endif
77
78 void
79 lwpinit(void)
80 {
81
82 lwp_specificdata_domain = specificdata_domain_create();
83 KASSERT(lwp_specificdata_domain != NULL);
84 }
85
86 /* ARGSUSED */
87 int
88 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
89 {
90 struct sys__lwp_create_args /* {
91 syscallarg(const ucontext_t *) ucp;
92 syscallarg(u_long) flags;
93 syscallarg(lwpid_t *) new_lwp;
94 } */ *uap = v;
95 struct proc *p = l->l_proc;
96 struct lwp *l2;
97 vaddr_t uaddr;
98 boolean_t inmem;
99 ucontext_t *newuc;
100 int s, error;
101
102 if (p->p_flag & P_SA)
103 return EINVAL;
104
105 newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
106
107 error = copyin(SCARG(uap, ucp), newuc,
108 l->l_proc->p_emul->e_sa->sae_ucsize);
109 if (error) {
110 pool_put(&lwp_uc_pool, newuc);
111 return (error);
112 }
113
114 /* XXX check against resource limits */
115
116 inmem = uvm_uarea_alloc(&uaddr);
117 if (__predict_false(uaddr == 0)) {
118 pool_put(&lwp_uc_pool, newuc);
119 return (ENOMEM);
120 }
121
122 /* XXX flags:
123 * __LWP_ASLWP is probably needed for Solaris compat.
124 */
125
126 newlwp(l, p, uaddr, inmem,
127 SCARG(uap, flags) & LWP_DETACHED,
128 NULL, 0, startlwp, newuc, &l2);
129
130 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
131 SCHED_LOCK(s);
132 l2->l_stat = LSRUN;
133 setrunqueue(l2);
134 p->p_nrlwps++;
135 SCHED_UNLOCK(s);
136 } else {
137 l2->l_stat = LSSUSPENDED;
138 }
139
140 error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
141 sizeof(l2->l_lid));
142 if (error) {
143 /* XXX We should destroy the LWP. */
144 return (error);
145 }
146
147 return (0);
148 }
149
150
151 int
152 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
153 {
154
155 lwp_exit(l);
156 /* NOTREACHED */
157 return (0);
158 }
159
160
161 int
162 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
163 {
164
165 *retval = l->l_lid;
166
167 return (0);
168 }
169
170
171 int
172 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
173 {
174
175 *retval = (uintptr_t) l->l_private;
176
177 return (0);
178 }
179
180
181 int
182 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
183 {
184 struct sys__lwp_setprivate_args /* {
185 syscallarg(void *) ptr;
186 } */ *uap = v;
187
188 l->l_private = SCARG(uap, ptr);
189
190 return (0);
191 }
192
193
194 int
195 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
196 {
197 struct sys__lwp_suspend_args /* {
198 syscallarg(lwpid_t) target;
199 } */ *uap = v;
200 int target_lid;
201 struct proc *p = l->l_proc;
202 struct lwp *t;
203 struct lwp *t2;
204
205 if (p->p_flag & P_SA)
206 return EINVAL;
207
208 target_lid = SCARG(uap, target);
209
210 LIST_FOREACH(t, &p->p_lwps, l_sibling)
211 if (t->l_lid == target_lid)
212 break;
213
214 if (t == NULL)
215 return (ESRCH);
216
217 if (t == l) {
218 /*
219 * Check for deadlock, which is only possible
220 * when we're suspending ourself.
221 */
222 LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
223 if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
224 break;
225 }
226
227 if (t2 == NULL) /* All other LWPs are suspended */
228 return (EDEADLK);
229 }
230
231 return lwp_suspend(l, t);
232 }
233
234 inline int
235 lwp_suspend(struct lwp *l, struct lwp *t)
236 {
237 struct proc *p = t->l_proc;
238 int s;
239
240 if (t == l) {
241 SCHED_LOCK(s);
242 KASSERT(l->l_stat == LSONPROC);
243 l->l_stat = LSSUSPENDED;
244 p->p_nrlwps--;
245 /* XXX NJWLWP check if this makes sense here: */
246 p->p_stats->p_ru.ru_nvcsw++;
247 mi_switch(l, NULL);
248 SCHED_ASSERT_UNLOCKED();
249 splx(s);
250 } else {
251 switch (t->l_stat) {
252 case LSSUSPENDED:
253 return (0); /* _lwp_suspend() is idempotent */
254 case LSRUN:
255 SCHED_LOCK(s);
256 remrunqueue(t);
257 t->l_stat = LSSUSPENDED;
258 p->p_nrlwps--;
259 SCHED_UNLOCK(s);
260 break;
261 case LSSLEEP:
262 t->l_stat = LSSUSPENDED;
263 break;
264 case LSIDL:
265 case LSZOMB:
266 return (EINTR); /* It's what Solaris does..... */
267 case LSSTOP:
268 panic("_lwp_suspend: Stopped LWP in running process!");
269 break;
270 case LSONPROC:
271 /* XXX multiprocessor LWPs? Implement me! */
272 return (EINVAL);
273 }
274 }
275
276 return (0);
277 }
278
279
280 int
281 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
282 {
283 struct sys__lwp_continue_args /* {
284 syscallarg(lwpid_t) target;
285 } */ *uap = v;
286 int s, target_lid;
287 struct proc *p = l->l_proc;
288 struct lwp *t;
289
290 if (p->p_flag & P_SA)
291 return EINVAL;
292
293 target_lid = SCARG(uap, target);
294
295 LIST_FOREACH(t, &p->p_lwps, l_sibling)
296 if (t->l_lid == target_lid)
297 break;
298
299 if (t == NULL)
300 return (ESRCH);
301
302 SCHED_LOCK(s);
303 lwp_continue(t);
304 SCHED_UNLOCK(s);
305
306 return (0);
307 }
308
309 void
310 lwp_continue(struct lwp *l)
311 {
312
313 DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
314 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
315 l->l_wchan));
316
317 if (l->l_stat != LSSUSPENDED)
318 return;
319
320 if (l->l_wchan == 0) {
321 /* LWP was runnable before being suspended. */
322 setrunnable(l);
323 } else {
324 /* LWP was sleeping before being suspended. */
325 l->l_stat = LSSLEEP;
326 }
327 }
328
329 int
330 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
331 {
332 struct sys__lwp_wakeup_args /* {
333 syscallarg(lwpid_t) target;
334 } */ *uap = v;
335 lwpid_t target_lid;
336 struct lwp *t;
337 struct proc *p;
338 int error;
339 int s;
340
341 p = l->l_proc;
342 target_lid = SCARG(uap, target);
343
344 SCHED_LOCK(s);
345
346 LIST_FOREACH(t, &p->p_lwps, l_sibling)
347 if (t->l_lid == target_lid)
348 break;
349
350 if (t == NULL) {
351 error = ESRCH;
352 goto exit;
353 }
354
355 if (t->l_stat != LSSLEEP) {
356 error = ENODEV;
357 goto exit;
358 }
359
360 if ((t->l_flag & L_SINTR) == 0) {
361 error = EBUSY;
362 goto exit;
363 }
364 /*
365 * Tell ltsleep to wakeup.
366 */
367 t->l_flag |= L_CANCELLED;
368
369 setrunnable(t);
370 error = 0;
371 exit:
372 SCHED_UNLOCK(s);
373
374 return error;
375 }
376
377 int
378 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
379 {
380 struct sys__lwp_wait_args /* {
381 syscallarg(lwpid_t) wait_for;
382 syscallarg(lwpid_t *) departed;
383 } */ *uap = v;
384 int error;
385 lwpid_t dep;
386
387 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
388 if (error)
389 return (error);
390
391 if (SCARG(uap, departed)) {
392 error = copyout(&dep, SCARG(uap, departed),
393 sizeof(dep));
394 if (error)
395 return (error);
396 }
397
398 return (0);
399 }
400
401
402 int
403 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
404 {
405 struct proc *p = l->l_proc;
406 struct lwp *l2, *l3;
407 int nfound, error, wpri;
408 static const char waitstr1[] = "lwpwait";
409 static const char waitstr2[] = "lwpwait2";
410
411 DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
412 p->p_pid, l->l_lid, lid));
413
414 if (lid == l->l_lid)
415 return (EDEADLK); /* Waiting for ourselves makes no sense. */
416
417 wpri = PWAIT |
418 ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
419 loop:
420 nfound = 0;
421 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
422 if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
423 ((lid != 0) && (lid != l2->l_lid)))
424 continue;
425
426 nfound++;
427 if (l2->l_stat == LSZOMB) {
428 if (departed)
429 *departed = l2->l_lid;
430
431 simple_lock(&p->p_lock);
432 LIST_REMOVE(l2, l_sibling);
433 p->p_nlwps--;
434 p->p_nzlwps--;
435 simple_unlock(&p->p_lock);
436 /* XXX decrement limits */
437
438 pool_put(&lwp_pool, l2);
439
440 return (0);
441 } else if (l2->l_stat == LSSLEEP ||
442 l2->l_stat == LSSUSPENDED) {
443 /* Deadlock checks.
444 * 1. If all other LWPs are waiting for exits
445 * or suspended, we would deadlock.
446 */
447
448 LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
449 if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
450 !(l3->l_stat == LSSLEEP &&
451 l3->l_wchan == (caddr_t) &p->p_nlwps))
452 break;
453 }
454 if (l3 == NULL) /* Everyone else is waiting. */
455 return (EDEADLK);
456
457 /* XXX we'd like to check for a cycle of waiting
458 * LWPs (specific LID waits, not any-LWP waits)
459 * and detect that sort of deadlock, but we don't
460 * have a good place to store the lwp that is
461 * being waited for. wchan is already filled with
462 * &p->p_nlwps, and putting the lwp address in
463 * there for deadlock tracing would require
464 * exiting LWPs to call wakeup on both their
465 * own address and &p->p_nlwps, to get threads
466 * sleeping on any LWP exiting.
467 *
468 * Revisit later. Maybe another auxillary
469 * storage location associated with sleeping
470 * is in order.
471 */
472 }
473 }
474
475 if (nfound == 0)
476 return (ESRCH);
477
478 if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
479 (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
480 return (error);
481
482 goto loop;
483 }
484
485
486 int
487 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
488 int flags, void *stack, size_t stacksize,
489 void (*func)(void *), void *arg, struct lwp **rnewlwpp)
490 {
491 struct lwp *l2;
492 int s;
493
494 l2 = pool_get(&lwp_pool, PR_WAITOK);
495
496 l2->l_stat = LSIDL;
497 l2->l_forw = l2->l_back = NULL;
498 l2->l_proc = p2;
499
500 lwp_initspecific(l2);
501
502 memset(&l2->l_startzero, 0,
503 (unsigned) ((caddr_t)&l2->l_endzero -
504 (caddr_t)&l2->l_startzero));
505 memcpy(&l2->l_startcopy, &l1->l_startcopy,
506 (unsigned) ((caddr_t)&l2->l_endcopy -
507 (caddr_t)&l2->l_startcopy));
508
509 #if !defined(MULTIPROCESSOR)
510 /*
511 * In the single-processor case, all processes will always run
512 * on the same CPU. So, initialize the child's CPU to the parent's
513 * now. In the multiprocessor case, the child's CPU will be
514 * initialized in the low-level context switch code when the
515 * process runs.
516 */
517 KASSERT(l1->l_cpu != NULL);
518 l2->l_cpu = l1->l_cpu;
519 #else
520 /*
521 * zero child's CPU pointer so we don't get trash.
522 */
523 l2->l_cpu = NULL;
524 #endif /* ! MULTIPROCESSOR */
525
526 l2->l_flag = inmem ? L_INMEM : 0;
527 l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
528
529 lwp_update_creds(l2);
530 callout_init(&l2->l_tsleep_ch);
531
532 if (rnewlwpp != NULL)
533 *rnewlwpp = l2;
534
535 l2->l_addr = UAREA_TO_USER(uaddr);
536 uvm_lwp_fork(l1, l2, stack, stacksize, func,
537 (arg != NULL) ? arg : l2);
538
539 simple_lock(&p2->p_lock);
540 l2->l_lid = ++p2->p_nlwpid;
541 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
542 p2->p_nlwps++;
543 simple_unlock(&p2->p_lock);
544
545 /* XXX should be locked differently... */
546 s = proclist_lock_write();
547 LIST_INSERT_HEAD(&alllwp, l2, l_list);
548 proclist_unlock_write(s);
549
550 if (p2->p_emul->e_lwp_fork)
551 (*p2->p_emul->e_lwp_fork)(l1, l2);
552
553 return (0);
554 }
555
556
557 /*
558 * Quit the process. This will call cpu_exit, which will call cpu_switch,
559 * so this can only be used meaningfully if you're willing to switch away.
560 * Calling with l!=curlwp would be weird.
561 */
562 void
563 lwp_exit(struct lwp *l)
564 {
565 struct proc *p = l->l_proc;
566 int s;
567
568 DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
569 DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
570 p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
571
572 if (p->p_emul->e_lwp_exit)
573 (*p->p_emul->e_lwp_exit)(l);
574
575 /*
576 * If we are the last live LWP in a process, we need to exit
577 * the entire process (if that's not already going on). We do
578 * so with an exit status of zero, because it's a "controlled"
579 * exit, and because that's what Solaris does.
580 *
581 * Note: the last LWP's specificdata will be deleted here.
582 */
583 if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
584 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
585 p->p_pid, l->l_lid));
586 exit1(l, 0);
587 /* NOTREACHED */
588 }
589
590 /* Delete the specificdata while it's still safe to sleep. */
591 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
592
593 s = proclist_lock_write();
594 LIST_REMOVE(l, l_list);
595 proclist_unlock_write(s);
596
597 /*
598 * Release our cached credentials, and collate accounting flags.
599 */
600 kauth_cred_free(l->l_cred);
601 simple_lock(&p->p_lock);
602 p->p_acflag |= l->l_acflag;
603 simple_unlock(&p->p_lock);
604
605 /* Free MD LWP resources */
606 #ifndef __NO_CPU_LWP_FREE
607 cpu_lwp_free(l, 0);
608 #endif
609
610 pmap_deactivate(l);
611
612 if (l->l_flag & L_DETACHED) {
613 simple_lock(&p->p_lock);
614 LIST_REMOVE(l, l_sibling);
615 p->p_nlwps--;
616 simple_unlock(&p->p_lock);
617
618 curlwp = NULL;
619 l->l_proc = NULL;
620 }
621
622 SCHED_LOCK(s);
623 p->p_nrlwps--;
624 l->l_stat = LSDEAD;
625 SCHED_UNLOCK(s);
626
627 /* This LWP no longer needs to hold the kernel lock. */
628 KERNEL_PROC_UNLOCK(l);
629
630 /* cpu_exit() will not return */
631 cpu_exit(l);
632 }
633
634 /*
635 * We are called from cpu_exit() once it is safe to schedule the
636 * dead process's resources to be freed (i.e., once we've switched to
637 * the idle PCB for the current CPU).
638 *
639 * NOTE: One must be careful with locking in this routine. It's
640 * called from a critical section in machine-dependent code, so
641 * we should refrain from changing any interrupt state.
642 */
643 void
644 lwp_exit2(struct lwp *l)
645 {
646 struct proc *p;
647
648 KERNEL_LOCK(LK_EXCLUSIVE);
649 /*
650 * Free the VM resources we're still holding on to.
651 */
652 uvm_lwp_exit(l);
653
654 if (l->l_flag & L_DETACHED) {
655 /* Nobody waits for detached LWPs. */
656 pool_put(&lwp_pool, l);
657 KERNEL_UNLOCK();
658 } else {
659 l->l_stat = LSZOMB;
660 p = l->l_proc;
661 p->p_nzlwps++;
662 KERNEL_UNLOCK();
663 wakeup(&p->p_nlwps);
664 }
665 }
666
667 /*
668 * Pick a LWP to represent the process for those operations which
669 * want information about a "process" that is actually associated
670 * with a LWP.
671 */
672 struct lwp *
673 proc_representative_lwp(struct proc *p)
674 {
675 struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
676 struct lwp *signalled;
677
678 /* Trivial case: only one LWP */
679 if (p->p_nlwps == 1)
680 return (LIST_FIRST(&p->p_lwps));
681
682 switch (p->p_stat) {
683 case SSTOP:
684 case SACTIVE:
685 /* Pick the most live LWP */
686 onproc = running = sleeping = stopped = suspended = NULL;
687 signalled = NULL;
688 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
689 if (l->l_lid == p->p_sigctx.ps_lwp)
690 signalled = l;
691 switch (l->l_stat) {
692 case LSONPROC:
693 onproc = l;
694 break;
695 case LSRUN:
696 running = l;
697 break;
698 case LSSLEEP:
699 sleeping = l;
700 break;
701 case LSSTOP:
702 stopped = l;
703 break;
704 case LSSUSPENDED:
705 suspended = l;
706 break;
707 }
708 }
709 if (signalled)
710 return signalled;
711 if (onproc)
712 return onproc;
713 if (running)
714 return running;
715 if (sleeping)
716 return sleeping;
717 if (stopped)
718 return stopped;
719 if (suspended)
720 return suspended;
721 break;
722 case SZOMB:
723 /* Doesn't really matter... */
724 return (LIST_FIRST(&p->p_lwps));
725 #ifdef DIAGNOSTIC
726 case SIDL:
727 /* We have more than one LWP and we're in SIDL?
728 * How'd that happen?
729 */
730 panic("Too many LWPs (%d) in SIDL process %d (%s)",
731 p->p_nrlwps, p->p_pid, p->p_comm);
732 default:
733 panic("Process %d (%s) in unknown state %d",
734 p->p_pid, p->p_comm, p->p_stat);
735 #endif
736 }
737
738 panic("proc_representative_lwp: couldn't find a lwp for process"
739 " %d (%s)", p->p_pid, p->p_comm);
740 /* NOTREACHED */
741 return NULL;
742 }
743
744 /*
745 * Update an LWP's cached credentials to mirror the process' master copy.
746 *
747 * This happens early in the syscall path, on user trap, and on LWP
748 * creation. A long-running LWP can also voluntarily choose to update
749 * it's credentials by calling this routine. This may be called from
750 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
751 */
752 void
753 lwp_update_creds(struct lwp *l)
754 {
755 kauth_cred_t oc;
756 struct proc *p;
757
758 p = l->l_proc;
759 oc = l->l_cred;
760
761 simple_lock(&p->p_lock);
762 kauth_cred_hold(p->p_cred);
763 l->l_cred = p->p_cred;
764 simple_unlock(&p->p_lock);
765 if (oc != NULL)
766 kauth_cred_free(oc);
767 }
768
769 /*
770 * lwp_specific_key_create --
771 * Create a key for subsystem lwp-specific data.
772 */
773 int
774 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
775 {
776
777 return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
778 }
779
780 /*
781 * lwp_specific_key_delete --
782 * Delete a key for subsystem lwp-specific data.
783 */
784 void
785 lwp_specific_key_delete(specificdata_key_t key)
786 {
787
788 specificdata_key_delete(lwp_specificdata_domain, key);
789 }
790
791 /*
792 * lwp_initspecific --
793 * Initialize an LWP's specificdata container.
794 */
795 void
796 lwp_initspecific(struct lwp *l)
797 {
798 int error;
799
800 error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
801 KASSERT(error == 0);
802 }
803
804 /*
805 * lwp_finispecific --
806 * Finalize an LWP's specificdata container.
807 */
808 void
809 lwp_finispecific(struct lwp *l)
810 {
811
812 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
813 }
814
815 /*
816 * lwp_getspecific --
817 * Return lwp-specific data corresponding to the specified key.
818 *
819 * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
820 * only its OWN SPECIFIC DATA. If it is necessary to access another
821 * LWP's specifc data, care must be taken to ensure that doing so
822 * would not cause internal data structure inconsistency (i.e. caller
823 * can guarantee that the target LWP is not inside an lwp_getspecific()
824 * or lwp_setspecific() call).
825 */
826 void *
827 lwp_getspecific(specificdata_key_t key)
828 {
829
830 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
831 &curlwp->l_specdataref, key));
832 }
833
834 /*
835 * lwp_setspecific --
836 * Set lwp-specific data corresponding to the specified key.
837 */
838 void
839 lwp_setspecific(specificdata_key_t key, void *data)
840 {
841
842 specificdata_setspecific(lwp_specificdata_domain,
843 &curlwp->l_specdataref, key, data);
844 }
845