kern_lwp.c revision 1.41 1 /* $NetBSD: kern_lwp.c,v 1.41 2006/10/08 04:28:44 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.41 2006/10/08 04:28:44 thorpej Exp $");
41
42 #include "opt_multiprocessor.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/pool.h>
47 #include <sys/lock.h>
48 #include <sys/proc.h>
49 #include <sys/sa.h>
50 #include <sys/savar.h>
51 #include <sys/types.h>
52 #include <sys/ucontext.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/syscallargs.h>
56 #include <sys/kauth.h>
57
58 #include <uvm/uvm_extern.h>
59
60 POOL_INIT(lwp_pool, sizeof(struct lwp), 0, 0, 0, "lwppl",
61 &pool_allocator_nointr);
62 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
63 &pool_allocator_nointr);
64
65 static specificdata_domain_t lwp_specificdata_domain;
66
67 struct lwplist alllwp;
68
69 #define LWP_DEBUG
70
71 #ifdef LWP_DEBUG
72 int lwp_debug = 0;
73 #define DPRINTF(x) if (lwp_debug) printf x
74 #else
75 #define DPRINTF(x)
76 #endif
77
78 void
79 lwpinit(void)
80 {
81
82 lwp_specificdata_domain = specificdata_domain_create();
83 KASSERT(lwp_specificdata_domain != NULL);
84 }
85
86 /* ARGSUSED */
87 int
88 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
89 {
90 struct sys__lwp_create_args /* {
91 syscallarg(const ucontext_t *) ucp;
92 syscallarg(u_long) flags;
93 syscallarg(lwpid_t *) new_lwp;
94 } */ *uap = v;
95 struct proc *p = l->l_proc;
96 struct lwp *l2;
97 vaddr_t uaddr;
98 boolean_t inmem;
99 ucontext_t *newuc;
100 int s, error;
101
102 if (p->p_flag & P_SA)
103 return EINVAL;
104
105 newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
106
107 error = copyin(SCARG(uap, ucp), newuc,
108 l->l_proc->p_emul->e_sa->sae_ucsize);
109 if (error) {
110 pool_put(&lwp_uc_pool, newuc);
111 return (error);
112 }
113
114 /* XXX check against resource limits */
115
116 inmem = uvm_uarea_alloc(&uaddr);
117 if (__predict_false(uaddr == 0)) {
118 pool_put(&lwp_uc_pool, newuc);
119 return (ENOMEM);
120 }
121
122 /* XXX flags:
123 * __LWP_ASLWP is probably needed for Solaris compat.
124 */
125
126 newlwp(l, p, uaddr, inmem,
127 SCARG(uap, flags) & LWP_DETACHED,
128 NULL, 0, startlwp, newuc, &l2);
129
130 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
131 SCHED_LOCK(s);
132 l2->l_stat = LSRUN;
133 setrunqueue(l2);
134 p->p_nrlwps++;
135 SCHED_UNLOCK(s);
136 } else {
137 l2->l_stat = LSSUSPENDED;
138 }
139
140 error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
141 sizeof(l2->l_lid));
142 if (error) {
143 /* XXX We should destroy the LWP. */
144 return (error);
145 }
146
147 return (0);
148 }
149
150
151 int
152 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
153 {
154
155 lwp_exit(l);
156 /* NOTREACHED */
157 return (0);
158 }
159
160
161 int
162 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
163 {
164
165 *retval = l->l_lid;
166
167 return (0);
168 }
169
170
171 int
172 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
173 {
174
175 *retval = (uintptr_t) l->l_private;
176
177 return (0);
178 }
179
180
181 int
182 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
183 {
184 struct sys__lwp_setprivate_args /* {
185 syscallarg(void *) ptr;
186 } */ *uap = v;
187
188 l->l_private = SCARG(uap, ptr);
189
190 return (0);
191 }
192
193
194 int
195 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
196 {
197 struct sys__lwp_suspend_args /* {
198 syscallarg(lwpid_t) target;
199 } */ *uap = v;
200 int target_lid;
201 struct proc *p = l->l_proc;
202 struct lwp *t;
203 struct lwp *t2;
204
205 if (p->p_flag & P_SA)
206 return EINVAL;
207
208 target_lid = SCARG(uap, target);
209
210 LIST_FOREACH(t, &p->p_lwps, l_sibling)
211 if (t->l_lid == target_lid)
212 break;
213
214 if (t == NULL)
215 return (ESRCH);
216
217 if (t == l) {
218 /*
219 * Check for deadlock, which is only possible
220 * when we're suspending ourself.
221 */
222 LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
223 if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
224 break;
225 }
226
227 if (t2 == NULL) /* All other LWPs are suspended */
228 return (EDEADLK);
229 }
230
231 return lwp_suspend(l, t);
232 }
233
234 inline int
235 lwp_suspend(struct lwp *l, struct lwp *t)
236 {
237 struct proc *p = t->l_proc;
238 int s;
239
240 if (t == l) {
241 SCHED_LOCK(s);
242 KASSERT(l->l_stat == LSONPROC);
243 l->l_stat = LSSUSPENDED;
244 p->p_nrlwps--;
245 /* XXX NJWLWP check if this makes sense here: */
246 p->p_stats->p_ru.ru_nvcsw++;
247 mi_switch(l, NULL);
248 SCHED_ASSERT_UNLOCKED();
249 splx(s);
250 } else {
251 switch (t->l_stat) {
252 case LSSUSPENDED:
253 return (0); /* _lwp_suspend() is idempotent */
254 case LSRUN:
255 SCHED_LOCK(s);
256 remrunqueue(t);
257 t->l_stat = LSSUSPENDED;
258 p->p_nrlwps--;
259 SCHED_UNLOCK(s);
260 break;
261 case LSSLEEP:
262 t->l_stat = LSSUSPENDED;
263 break;
264 case LSIDL:
265 case LSZOMB:
266 return (EINTR); /* It's what Solaris does..... */
267 case LSSTOP:
268 panic("_lwp_suspend: Stopped LWP in running process!");
269 break;
270 case LSONPROC:
271 /* XXX multiprocessor LWPs? Implement me! */
272 return (EINVAL);
273 }
274 }
275
276 return (0);
277 }
278
279
280 int
281 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
282 {
283 struct sys__lwp_continue_args /* {
284 syscallarg(lwpid_t) target;
285 } */ *uap = v;
286 int s, target_lid;
287 struct proc *p = l->l_proc;
288 struct lwp *t;
289
290 if (p->p_flag & P_SA)
291 return EINVAL;
292
293 target_lid = SCARG(uap, target);
294
295 LIST_FOREACH(t, &p->p_lwps, l_sibling)
296 if (t->l_lid == target_lid)
297 break;
298
299 if (t == NULL)
300 return (ESRCH);
301
302 SCHED_LOCK(s);
303 lwp_continue(t);
304 SCHED_UNLOCK(s);
305
306 return (0);
307 }
308
309 void
310 lwp_continue(struct lwp *l)
311 {
312
313 DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
314 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
315 l->l_wchan));
316
317 if (l->l_stat != LSSUSPENDED)
318 return;
319
320 if (l->l_wchan == 0) {
321 /* LWP was runnable before being suspended. */
322 setrunnable(l);
323 } else {
324 /* LWP was sleeping before being suspended. */
325 l->l_stat = LSSLEEP;
326 }
327 }
328
329 int
330 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
331 {
332 struct sys__lwp_wakeup_args /* {
333 syscallarg(lwpid_t) target;
334 } */ *uap = v;
335 lwpid_t target_lid;
336 struct lwp *t;
337 struct proc *p;
338 int error;
339 int s;
340
341 p = l->l_proc;
342 target_lid = SCARG(uap, target);
343
344 SCHED_LOCK(s);
345
346 LIST_FOREACH(t, &p->p_lwps, l_sibling)
347 if (t->l_lid == target_lid)
348 break;
349
350 if (t == NULL) {
351 error = ESRCH;
352 goto exit;
353 }
354
355 if (t->l_stat != LSSLEEP) {
356 error = ENODEV;
357 goto exit;
358 }
359
360 if ((t->l_flag & L_SINTR) == 0) {
361 error = EBUSY;
362 goto exit;
363 }
364 /*
365 * Tell ltsleep to wakeup.
366 */
367 t->l_flag |= L_CANCELLED;
368
369 setrunnable(t);
370 error = 0;
371 exit:
372 SCHED_UNLOCK(s);
373
374 return error;
375 }
376
377 int
378 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
379 {
380 struct sys__lwp_wait_args /* {
381 syscallarg(lwpid_t) wait_for;
382 syscallarg(lwpid_t *) departed;
383 } */ *uap = v;
384 int error;
385 lwpid_t dep;
386
387 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
388 if (error)
389 return (error);
390
391 if (SCARG(uap, departed)) {
392 error = copyout(&dep, SCARG(uap, departed),
393 sizeof(dep));
394 if (error)
395 return (error);
396 }
397
398 return (0);
399 }
400
401
402 int
403 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
404 {
405 struct proc *p = l->l_proc;
406 struct lwp *l2, *l3;
407 int nfound, error, wpri;
408 static const char waitstr1[] = "lwpwait";
409 static const char waitstr2[] = "lwpwait2";
410
411 DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
412 p->p_pid, l->l_lid, lid));
413
414 if (lid == l->l_lid)
415 return (EDEADLK); /* Waiting for ourselves makes no sense. */
416
417 wpri = PWAIT |
418 ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
419 loop:
420 nfound = 0;
421 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
422 if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
423 ((lid != 0) && (lid != l2->l_lid)))
424 continue;
425
426 nfound++;
427 if (l2->l_stat == LSZOMB) {
428 if (departed)
429 *departed = l2->l_lid;
430
431 simple_lock(&p->p_lock);
432 LIST_REMOVE(l2, l_sibling);
433 p->p_nlwps--;
434 p->p_nzlwps--;
435 simple_unlock(&p->p_lock);
436 /* XXX decrement limits */
437
438 specificdata_fini(lwp_specificdata_domain,
439 &l->l_specdataref);
440 pool_put(&lwp_pool, l2);
441
442 return (0);
443 } else if (l2->l_stat == LSSLEEP ||
444 l2->l_stat == LSSUSPENDED) {
445 /* Deadlock checks.
446 * 1. If all other LWPs are waiting for exits
447 * or suspended, we would deadlock.
448 */
449
450 LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
451 if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
452 !(l3->l_stat == LSSLEEP &&
453 l3->l_wchan == (caddr_t) &p->p_nlwps))
454 break;
455 }
456 if (l3 == NULL) /* Everyone else is waiting. */
457 return (EDEADLK);
458
459 /* XXX we'd like to check for a cycle of waiting
460 * LWPs (specific LID waits, not any-LWP waits)
461 * and detect that sort of deadlock, but we don't
462 * have a good place to store the lwp that is
463 * being waited for. wchan is already filled with
464 * &p->p_nlwps, and putting the lwp address in
465 * there for deadlock tracing would require
466 * exiting LWPs to call wakeup on both their
467 * own address and &p->p_nlwps, to get threads
468 * sleeping on any LWP exiting.
469 *
470 * Revisit later. Maybe another auxillary
471 * storage location associated with sleeping
472 * is in order.
473 */
474 }
475 }
476
477 if (nfound == 0)
478 return (ESRCH);
479
480 if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
481 (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
482 return (error);
483
484 goto loop;
485 }
486
487
488 int
489 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
490 int flags, void *stack, size_t stacksize,
491 void (*func)(void *), void *arg, struct lwp **rnewlwpp)
492 {
493 struct lwp *l2;
494 int s, error;
495
496 l2 = pool_get(&lwp_pool, PR_WAITOK);
497
498 l2->l_stat = LSIDL;
499 l2->l_forw = l2->l_back = NULL;
500 l2->l_proc = p2;
501
502 error = specificdata_init(lwp_specificdata_domain, &l2->l_specdataref);
503 KASSERT(error == 0);
504
505 memset(&l2->l_startzero, 0,
506 (unsigned) ((caddr_t)&l2->l_endzero -
507 (caddr_t)&l2->l_startzero));
508 memcpy(&l2->l_startcopy, &l1->l_startcopy,
509 (unsigned) ((caddr_t)&l2->l_endcopy -
510 (caddr_t)&l2->l_startcopy));
511
512 #if !defined(MULTIPROCESSOR)
513 /*
514 * In the single-processor case, all processes will always run
515 * on the same CPU. So, initialize the child's CPU to the parent's
516 * now. In the multiprocessor case, the child's CPU will be
517 * initialized in the low-level context switch code when the
518 * process runs.
519 */
520 KASSERT(l1->l_cpu != NULL);
521 l2->l_cpu = l1->l_cpu;
522 #else
523 /*
524 * zero child's CPU pointer so we don't get trash.
525 */
526 l2->l_cpu = NULL;
527 #endif /* ! MULTIPROCESSOR */
528
529 l2->l_flag = inmem ? L_INMEM : 0;
530 l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
531
532 lwp_update_creds(l2);
533 callout_init(&l2->l_tsleep_ch);
534
535 if (rnewlwpp != NULL)
536 *rnewlwpp = l2;
537
538 l2->l_addr = UAREA_TO_USER(uaddr);
539 uvm_lwp_fork(l1, l2, stack, stacksize, func,
540 (arg != NULL) ? arg : l2);
541
542 simple_lock(&p2->p_lock);
543 l2->l_lid = ++p2->p_nlwpid;
544 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
545 p2->p_nlwps++;
546 simple_unlock(&p2->p_lock);
547
548 /* XXX should be locked differently... */
549 s = proclist_lock_write();
550 LIST_INSERT_HEAD(&alllwp, l2, l_list);
551 proclist_unlock_write(s);
552
553 if (p2->p_emul->e_lwp_fork)
554 (*p2->p_emul->e_lwp_fork)(l1, l2);
555
556 return (0);
557 }
558
559
560 /*
561 * Quit the process. This will call cpu_exit, which will call cpu_switch,
562 * so this can only be used meaningfully if you're willing to switch away.
563 * Calling with l!=curlwp would be weird.
564 */
565 void
566 lwp_exit(struct lwp *l)
567 {
568 struct proc *p = l->l_proc;
569 int s;
570
571 DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
572 DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
573 p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
574
575 if (p->p_emul->e_lwp_exit)
576 (*p->p_emul->e_lwp_exit)(l);
577
578 /*
579 * If we are the last live LWP in a process, we need to exit
580 * the entire process (if that's not already going on). We do
581 * so with an exit status of zero, because it's a "controlled"
582 * exit, and because that's what Solaris does.
583 */
584 if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
585 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
586 p->p_pid, l->l_lid));
587 exit1(l, 0);
588 /* NOTREACHED */
589 }
590
591 s = proclist_lock_write();
592 LIST_REMOVE(l, l_list);
593 proclist_unlock_write(s);
594
595 /*
596 * Release our cached credentials, and collate accounting flags.
597 */
598 kauth_cred_free(l->l_cred);
599 simple_lock(&p->p_lock);
600 p->p_acflag |= l->l_acflag;
601 simple_unlock(&p->p_lock);
602
603 /* Free MD LWP resources */
604 #ifndef __NO_CPU_LWP_FREE
605 cpu_lwp_free(l, 0);
606 #endif
607
608 pmap_deactivate(l);
609
610 if (l->l_flag & L_DETACHED) {
611 simple_lock(&p->p_lock);
612 LIST_REMOVE(l, l_sibling);
613 p->p_nlwps--;
614 simple_unlock(&p->p_lock);
615
616 curlwp = NULL;
617 l->l_proc = NULL;
618 }
619
620 SCHED_LOCK(s);
621 p->p_nrlwps--;
622 l->l_stat = LSDEAD;
623 SCHED_UNLOCK(s);
624
625 /* This LWP no longer needs to hold the kernel lock. */
626 KERNEL_PROC_UNLOCK(l);
627
628 /* cpu_exit() will not return */
629 cpu_exit(l);
630 }
631
632 /*
633 * We are called from cpu_exit() once it is safe to schedule the
634 * dead process's resources to be freed (i.e., once we've switched to
635 * the idle PCB for the current CPU).
636 *
637 * NOTE: One must be careful with locking in this routine. It's
638 * called from a critical section in machine-dependent code, so
639 * we should refrain from changing any interrupt state.
640 */
641 void
642 lwp_exit2(struct lwp *l)
643 {
644 struct proc *p;
645
646 KERNEL_LOCK(LK_EXCLUSIVE);
647 /*
648 * Free the VM resources we're still holding on to.
649 */
650 uvm_lwp_exit(l);
651
652 if (l->l_flag & L_DETACHED) {
653 /* Nobody waits for detached LWPs. */
654 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
655 pool_put(&lwp_pool, l);
656 KERNEL_UNLOCK();
657 } else {
658 l->l_stat = LSZOMB;
659 p = l->l_proc;
660 p->p_nzlwps++;
661 KERNEL_UNLOCK();
662 wakeup(&p->p_nlwps);
663 }
664 }
665
666 /*
667 * Pick a LWP to represent the process for those operations which
668 * want information about a "process" that is actually associated
669 * with a LWP.
670 */
671 struct lwp *
672 proc_representative_lwp(struct proc *p)
673 {
674 struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
675 struct lwp *signalled;
676
677 /* Trivial case: only one LWP */
678 if (p->p_nlwps == 1)
679 return (LIST_FIRST(&p->p_lwps));
680
681 switch (p->p_stat) {
682 case SSTOP:
683 case SACTIVE:
684 /* Pick the most live LWP */
685 onproc = running = sleeping = stopped = suspended = NULL;
686 signalled = NULL;
687 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
688 if (l->l_lid == p->p_sigctx.ps_lwp)
689 signalled = l;
690 switch (l->l_stat) {
691 case LSONPROC:
692 onproc = l;
693 break;
694 case LSRUN:
695 running = l;
696 break;
697 case LSSLEEP:
698 sleeping = l;
699 break;
700 case LSSTOP:
701 stopped = l;
702 break;
703 case LSSUSPENDED:
704 suspended = l;
705 break;
706 }
707 }
708 if (signalled)
709 return signalled;
710 if (onproc)
711 return onproc;
712 if (running)
713 return running;
714 if (sleeping)
715 return sleeping;
716 if (stopped)
717 return stopped;
718 if (suspended)
719 return suspended;
720 break;
721 case SZOMB:
722 /* Doesn't really matter... */
723 return (LIST_FIRST(&p->p_lwps));
724 #ifdef DIAGNOSTIC
725 case SIDL:
726 /* We have more than one LWP and we're in SIDL?
727 * How'd that happen?
728 */
729 panic("Too many LWPs (%d) in SIDL process %d (%s)",
730 p->p_nrlwps, p->p_pid, p->p_comm);
731 default:
732 panic("Process %d (%s) in unknown state %d",
733 p->p_pid, p->p_comm, p->p_stat);
734 #endif
735 }
736
737 panic("proc_representative_lwp: couldn't find a lwp for process"
738 " %d (%s)", p->p_pid, p->p_comm);
739 /* NOTREACHED */
740 return NULL;
741 }
742
743 /*
744 * Update an LWP's cached credentials to mirror the process' master copy.
745 *
746 * This happens early in the syscall path, on user trap, and on LWP
747 * creation. A long-running LWP can also voluntarily choose to update
748 * it's credentials by calling this routine. This may be called from
749 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
750 */
751 void
752 lwp_update_creds(struct lwp *l)
753 {
754 kauth_cred_t oc;
755 struct proc *p;
756
757 p = l->l_proc;
758 oc = l->l_cred;
759
760 simple_lock(&p->p_lock);
761 kauth_cred_hold(p->p_cred);
762 l->l_cred = p->p_cred;
763 simple_unlock(&p->p_lock);
764 if (oc != NULL)
765 kauth_cred_free(oc);
766 }
767
768 /*
769 * lwp_specific_key_create --
770 * Create a key for subsystem lwp-specific data.
771 */
772 int
773 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
774 {
775
776 return (specificdata_key_create(lwp_specificdata_domain,
777 keyp, dtor));
778 }
779
780 /*
781 * lwp_specific_key_delete --
782 * Delete a key for subsystem lwp-specific data.
783 */
784 void
785 lwp_specific_key_delete(specificdata_key_t key)
786 {
787
788 specificdata_key_delete(lwp_specificdata_domain, key);
789 }
790
791 /*
792 * lwp_getspecific --
793 * Return lwp-specific data corresponding to the specified key.
794 *
795 * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
796 * only its OWN SPECIFIC DATA. If it is necessary to access another
797 * LWP's specifc data, care must be taken to ensure that doing so
798 * would not cause internal data structure inconsistency (i.e. caller
799 * can guarantee that the target LWP is not inside an lwp_getspecific()
800 * or lwp_setspecific() call).
801 */
802 void *
803 lwp_getspecific(struct lwp *l, specificdata_key_t key)
804 {
805
806 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
807 &l->l_specdataref, key));
808 }
809
810 /*
811 * lwp_setspecific --
812 * Set lwp-specific data corresponding to the specified key.
813 */
814 void
815 lwp_setspecific(struct lwp *l, specificdata_key_t key, void *data)
816 {
817
818 specificdata_setspecific(lwp_specificdata_domain,
819 &l->l_specdataref, key, data);
820 }
821