kern_lwp.c revision 1.43 1 /* $NetBSD: kern_lwp.c,v 1.43 2006/10/09 00:39:06 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.43 2006/10/09 00:39:06 martin Exp $");
41
42 #include "opt_multiprocessor.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/pool.h>
47 #include <sys/lock.h>
48 #include <sys/proc.h>
49 #include <sys/sa.h>
50 #include <sys/savar.h>
51 #include <sys/types.h>
52 #include <sys/ucontext.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/syscallargs.h>
56 #include <sys/kauth.h>
57
58 #include <uvm/uvm_extern.h>
59
60 POOL_INIT(lwp_pool, sizeof(struct lwp), 0, 0, 0, "lwppl",
61 &pool_allocator_nointr);
62 POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
63 &pool_allocator_nointr);
64
65 static specificdata_domain_t lwp_specificdata_domain;
66
67 struct lwplist alllwp;
68
69 #define LWP_DEBUG
70
71 #ifdef LWP_DEBUG
72 int lwp_debug = 0;
73 #define DPRINTF(x) if (lwp_debug) printf x
74 #else
75 #define DPRINTF(x)
76 #endif
77
78 void
79 lwpinit(void)
80 {
81
82 lwp_specificdata_domain = specificdata_domain_create();
83 KASSERT(lwp_specificdata_domain != NULL);
84 }
85
86 /* ARGSUSED */
87 int
88 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
89 {
90 struct sys__lwp_create_args /* {
91 syscallarg(const ucontext_t *) ucp;
92 syscallarg(u_long) flags;
93 syscallarg(lwpid_t *) new_lwp;
94 } */ *uap = v;
95 struct proc *p = l->l_proc;
96 struct lwp *l2;
97 vaddr_t uaddr;
98 boolean_t inmem;
99 ucontext_t *newuc;
100 int s, error;
101
102 if (p->p_flag & P_SA)
103 return EINVAL;
104
105 newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
106
107 error = copyin(SCARG(uap, ucp), newuc,
108 l->l_proc->p_emul->e_sa->sae_ucsize);
109 if (error) {
110 pool_put(&lwp_uc_pool, newuc);
111 return (error);
112 }
113
114 /* XXX check against resource limits */
115
116 inmem = uvm_uarea_alloc(&uaddr);
117 if (__predict_false(uaddr == 0)) {
118 pool_put(&lwp_uc_pool, newuc);
119 return (ENOMEM);
120 }
121
122 /* XXX flags:
123 * __LWP_ASLWP is probably needed for Solaris compat.
124 */
125
126 newlwp(l, p, uaddr, inmem,
127 SCARG(uap, flags) & LWP_DETACHED,
128 NULL, 0, startlwp, newuc, &l2);
129
130 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
131 SCHED_LOCK(s);
132 l2->l_stat = LSRUN;
133 setrunqueue(l2);
134 p->p_nrlwps++;
135 SCHED_UNLOCK(s);
136 } else {
137 l2->l_stat = LSSUSPENDED;
138 }
139
140 error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
141 sizeof(l2->l_lid));
142 if (error) {
143 /* XXX We should destroy the LWP. */
144 return (error);
145 }
146
147 return (0);
148 }
149
150
151 int
152 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
153 {
154
155 lwp_exit(l);
156 /* NOTREACHED */
157 return (0);
158 }
159
160
161 int
162 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
163 {
164
165 *retval = l->l_lid;
166
167 return (0);
168 }
169
170
171 int
172 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
173 {
174
175 *retval = (uintptr_t) l->l_private;
176
177 return (0);
178 }
179
180
181 int
182 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
183 {
184 struct sys__lwp_setprivate_args /* {
185 syscallarg(void *) ptr;
186 } */ *uap = v;
187
188 l->l_private = SCARG(uap, ptr);
189
190 return (0);
191 }
192
193
194 int
195 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
196 {
197 struct sys__lwp_suspend_args /* {
198 syscallarg(lwpid_t) target;
199 } */ *uap = v;
200 int target_lid;
201 struct proc *p = l->l_proc;
202 struct lwp *t;
203 struct lwp *t2;
204
205 if (p->p_flag & P_SA)
206 return EINVAL;
207
208 target_lid = SCARG(uap, target);
209
210 LIST_FOREACH(t, &p->p_lwps, l_sibling)
211 if (t->l_lid == target_lid)
212 break;
213
214 if (t == NULL)
215 return (ESRCH);
216
217 if (t == l) {
218 /*
219 * Check for deadlock, which is only possible
220 * when we're suspending ourself.
221 */
222 LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
223 if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
224 break;
225 }
226
227 if (t2 == NULL) /* All other LWPs are suspended */
228 return (EDEADLK);
229 }
230
231 return lwp_suspend(l, t);
232 }
233
234 inline int
235 lwp_suspend(struct lwp *l, struct lwp *t)
236 {
237 struct proc *p = t->l_proc;
238 int s;
239
240 if (t == l) {
241 SCHED_LOCK(s);
242 KASSERT(l->l_stat == LSONPROC);
243 l->l_stat = LSSUSPENDED;
244 p->p_nrlwps--;
245 /* XXX NJWLWP check if this makes sense here: */
246 p->p_stats->p_ru.ru_nvcsw++;
247 mi_switch(l, NULL);
248 SCHED_ASSERT_UNLOCKED();
249 splx(s);
250 } else {
251 switch (t->l_stat) {
252 case LSSUSPENDED:
253 return (0); /* _lwp_suspend() is idempotent */
254 case LSRUN:
255 SCHED_LOCK(s);
256 remrunqueue(t);
257 t->l_stat = LSSUSPENDED;
258 p->p_nrlwps--;
259 SCHED_UNLOCK(s);
260 break;
261 case LSSLEEP:
262 t->l_stat = LSSUSPENDED;
263 break;
264 case LSIDL:
265 case LSZOMB:
266 return (EINTR); /* It's what Solaris does..... */
267 case LSSTOP:
268 panic("_lwp_suspend: Stopped LWP in running process!");
269 break;
270 case LSONPROC:
271 /* XXX multiprocessor LWPs? Implement me! */
272 return (EINVAL);
273 }
274 }
275
276 return (0);
277 }
278
279
280 int
281 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
282 {
283 struct sys__lwp_continue_args /* {
284 syscallarg(lwpid_t) target;
285 } */ *uap = v;
286 int s, target_lid;
287 struct proc *p = l->l_proc;
288 struct lwp *t;
289
290 if (p->p_flag & P_SA)
291 return EINVAL;
292
293 target_lid = SCARG(uap, target);
294
295 LIST_FOREACH(t, &p->p_lwps, l_sibling)
296 if (t->l_lid == target_lid)
297 break;
298
299 if (t == NULL)
300 return (ESRCH);
301
302 SCHED_LOCK(s);
303 lwp_continue(t);
304 SCHED_UNLOCK(s);
305
306 return (0);
307 }
308
309 void
310 lwp_continue(struct lwp *l)
311 {
312
313 DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
314 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
315 l->l_wchan));
316
317 if (l->l_stat != LSSUSPENDED)
318 return;
319
320 if (l->l_wchan == 0) {
321 /* LWP was runnable before being suspended. */
322 setrunnable(l);
323 } else {
324 /* LWP was sleeping before being suspended. */
325 l->l_stat = LSSLEEP;
326 }
327 }
328
329 int
330 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
331 {
332 struct sys__lwp_wakeup_args /* {
333 syscallarg(lwpid_t) target;
334 } */ *uap = v;
335 lwpid_t target_lid;
336 struct lwp *t;
337 struct proc *p;
338 int error;
339 int s;
340
341 p = l->l_proc;
342 target_lid = SCARG(uap, target);
343
344 SCHED_LOCK(s);
345
346 LIST_FOREACH(t, &p->p_lwps, l_sibling)
347 if (t->l_lid == target_lid)
348 break;
349
350 if (t == NULL) {
351 error = ESRCH;
352 goto exit;
353 }
354
355 if (t->l_stat != LSSLEEP) {
356 error = ENODEV;
357 goto exit;
358 }
359
360 if ((t->l_flag & L_SINTR) == 0) {
361 error = EBUSY;
362 goto exit;
363 }
364 /*
365 * Tell ltsleep to wakeup.
366 */
367 t->l_flag |= L_CANCELLED;
368
369 setrunnable(t);
370 error = 0;
371 exit:
372 SCHED_UNLOCK(s);
373
374 return error;
375 }
376
377 int
378 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
379 {
380 struct sys__lwp_wait_args /* {
381 syscallarg(lwpid_t) wait_for;
382 syscallarg(lwpid_t *) departed;
383 } */ *uap = v;
384 int error;
385 lwpid_t dep;
386
387 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
388 if (error)
389 return (error);
390
391 if (SCARG(uap, departed)) {
392 error = copyout(&dep, SCARG(uap, departed),
393 sizeof(dep));
394 if (error)
395 return (error);
396 }
397
398 return (0);
399 }
400
401
402 int
403 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
404 {
405 struct proc *p = l->l_proc;
406 struct lwp *l2, *l3;
407 int nfound, error, wpri;
408 static const char waitstr1[] = "lwpwait";
409 static const char waitstr2[] = "lwpwait2";
410
411 DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
412 p->p_pid, l->l_lid, lid));
413
414 if (lid == l->l_lid)
415 return (EDEADLK); /* Waiting for ourselves makes no sense. */
416
417 wpri = PWAIT |
418 ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
419 loop:
420 nfound = 0;
421 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
422 if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
423 ((lid != 0) && (lid != l2->l_lid)))
424 continue;
425
426 nfound++;
427 if (l2->l_stat == LSZOMB) {
428 if (departed)
429 *departed = l2->l_lid;
430
431 simple_lock(&p->p_lock);
432 LIST_REMOVE(l2, l_sibling);
433 p->p_nlwps--;
434 p->p_nzlwps--;
435 simple_unlock(&p->p_lock);
436 /* XXX decrement limits */
437
438 specificdata_fini(lwp_specificdata_domain,
439 &l->l_specdataref);
440 pool_put(&lwp_pool, l2);
441
442 return (0);
443 } else if (l2->l_stat == LSSLEEP ||
444 l2->l_stat == LSSUSPENDED) {
445 /* Deadlock checks.
446 * 1. If all other LWPs are waiting for exits
447 * or suspended, we would deadlock.
448 */
449
450 LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
451 if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
452 !(l3->l_stat == LSSLEEP &&
453 l3->l_wchan == (caddr_t) &p->p_nlwps))
454 break;
455 }
456 if (l3 == NULL) /* Everyone else is waiting. */
457 return (EDEADLK);
458
459 /* XXX we'd like to check for a cycle of waiting
460 * LWPs (specific LID waits, not any-LWP waits)
461 * and detect that sort of deadlock, but we don't
462 * have a good place to store the lwp that is
463 * being waited for. wchan is already filled with
464 * &p->p_nlwps, and putting the lwp address in
465 * there for deadlock tracing would require
466 * exiting LWPs to call wakeup on both their
467 * own address and &p->p_nlwps, to get threads
468 * sleeping on any LWP exiting.
469 *
470 * Revisit later. Maybe another auxillary
471 * storage location associated with sleeping
472 * is in order.
473 */
474 }
475 }
476
477 if (nfound == 0)
478 return (ESRCH);
479
480 if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
481 (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
482 return (error);
483
484 goto loop;
485 }
486
487
488 int
489 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
490 int flags, void *stack, size_t stacksize,
491 void (*func)(void *), void *arg, struct lwp **rnewlwpp)
492 {
493 struct lwp *l2;
494 int s;
495
496 l2 = pool_get(&lwp_pool, PR_WAITOK);
497
498 l2->l_stat = LSIDL;
499 l2->l_forw = l2->l_back = NULL;
500 l2->l_proc = p2;
501
502 lwp_initspecific(l2);
503
504 memset(&l2->l_startzero, 0,
505 (unsigned) ((caddr_t)&l2->l_endzero -
506 (caddr_t)&l2->l_startzero));
507 memcpy(&l2->l_startcopy, &l1->l_startcopy,
508 (unsigned) ((caddr_t)&l2->l_endcopy -
509 (caddr_t)&l2->l_startcopy));
510
511 #if !defined(MULTIPROCESSOR)
512 /*
513 * In the single-processor case, all processes will always run
514 * on the same CPU. So, initialize the child's CPU to the parent's
515 * now. In the multiprocessor case, the child's CPU will be
516 * initialized in the low-level context switch code when the
517 * process runs.
518 */
519 KASSERT(l1->l_cpu != NULL);
520 l2->l_cpu = l1->l_cpu;
521 #else
522 /*
523 * zero child's CPU pointer so we don't get trash.
524 */
525 l2->l_cpu = NULL;
526 #endif /* ! MULTIPROCESSOR */
527
528 l2->l_flag = inmem ? L_INMEM : 0;
529 l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
530
531 lwp_update_creds(l2);
532 callout_init(&l2->l_tsleep_ch);
533
534 if (rnewlwpp != NULL)
535 *rnewlwpp = l2;
536
537 l2->l_addr = UAREA_TO_USER(uaddr);
538 uvm_lwp_fork(l1, l2, stack, stacksize, func,
539 (arg != NULL) ? arg : l2);
540
541 simple_lock(&p2->p_lock);
542 l2->l_lid = ++p2->p_nlwpid;
543 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
544 p2->p_nlwps++;
545 simple_unlock(&p2->p_lock);
546
547 /* XXX should be locked differently... */
548 s = proclist_lock_write();
549 LIST_INSERT_HEAD(&alllwp, l2, l_list);
550 proclist_unlock_write(s);
551
552 if (p2->p_emul->e_lwp_fork)
553 (*p2->p_emul->e_lwp_fork)(l1, l2);
554
555 return (0);
556 }
557
558
559 /*
560 * Quit the process. This will call cpu_exit, which will call cpu_switch,
561 * so this can only be used meaningfully if you're willing to switch away.
562 * Calling with l!=curlwp would be weird.
563 */
564 void
565 lwp_exit(struct lwp *l)
566 {
567 struct proc *p = l->l_proc;
568 int s;
569
570 DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
571 DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
572 p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
573
574 if (p->p_emul->e_lwp_exit)
575 (*p->p_emul->e_lwp_exit)(l);
576
577 /*
578 * If we are the last live LWP in a process, we need to exit
579 * the entire process (if that's not already going on). We do
580 * so with an exit status of zero, because it's a "controlled"
581 * exit, and because that's what Solaris does.
582 */
583 if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
584 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
585 p->p_pid, l->l_lid));
586 exit1(l, 0);
587 /* NOTREACHED */
588 }
589
590 s = proclist_lock_write();
591 LIST_REMOVE(l, l_list);
592 proclist_unlock_write(s);
593
594 /*
595 * Release our cached credentials, and collate accounting flags.
596 */
597 kauth_cred_free(l->l_cred);
598 simple_lock(&p->p_lock);
599 p->p_acflag |= l->l_acflag;
600 simple_unlock(&p->p_lock);
601
602 /* Free MD LWP resources */
603 #ifndef __NO_CPU_LWP_FREE
604 cpu_lwp_free(l, 0);
605 #endif
606
607 pmap_deactivate(l);
608
609 if (l->l_flag & L_DETACHED) {
610 simple_lock(&p->p_lock);
611 LIST_REMOVE(l, l_sibling);
612 p->p_nlwps--;
613 simple_unlock(&p->p_lock);
614
615 curlwp = NULL;
616 l->l_proc = NULL;
617 }
618
619 SCHED_LOCK(s);
620 p->p_nrlwps--;
621 l->l_stat = LSDEAD;
622 SCHED_UNLOCK(s);
623
624 /* This LWP no longer needs to hold the kernel lock. */
625 KERNEL_PROC_UNLOCK(l);
626
627 /* cpu_exit() will not return */
628 cpu_exit(l);
629 }
630
631 /*
632 * We are called from cpu_exit() once it is safe to schedule the
633 * dead process's resources to be freed (i.e., once we've switched to
634 * the idle PCB for the current CPU).
635 *
636 * NOTE: One must be careful with locking in this routine. It's
637 * called from a critical section in machine-dependent code, so
638 * we should refrain from changing any interrupt state.
639 */
640 void
641 lwp_exit2(struct lwp *l)
642 {
643 struct proc *p;
644
645 KERNEL_LOCK(LK_EXCLUSIVE);
646 /*
647 * Free the VM resources we're still holding on to.
648 */
649 uvm_lwp_exit(l);
650
651 if (l->l_flag & L_DETACHED) {
652 /* Nobody waits for detached LWPs. */
653 specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
654 pool_put(&lwp_pool, l);
655 KERNEL_UNLOCK();
656 } else {
657 l->l_stat = LSZOMB;
658 p = l->l_proc;
659 p->p_nzlwps++;
660 KERNEL_UNLOCK();
661 wakeup(&p->p_nlwps);
662 }
663 }
664
665 /*
666 * Pick a LWP to represent the process for those operations which
667 * want information about a "process" that is actually associated
668 * with a LWP.
669 */
670 struct lwp *
671 proc_representative_lwp(struct proc *p)
672 {
673 struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
674 struct lwp *signalled;
675
676 /* Trivial case: only one LWP */
677 if (p->p_nlwps == 1)
678 return (LIST_FIRST(&p->p_lwps));
679
680 switch (p->p_stat) {
681 case SSTOP:
682 case SACTIVE:
683 /* Pick the most live LWP */
684 onproc = running = sleeping = stopped = suspended = NULL;
685 signalled = NULL;
686 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
687 if (l->l_lid == p->p_sigctx.ps_lwp)
688 signalled = l;
689 switch (l->l_stat) {
690 case LSONPROC:
691 onproc = l;
692 break;
693 case LSRUN:
694 running = l;
695 break;
696 case LSSLEEP:
697 sleeping = l;
698 break;
699 case LSSTOP:
700 stopped = l;
701 break;
702 case LSSUSPENDED:
703 suspended = l;
704 break;
705 }
706 }
707 if (signalled)
708 return signalled;
709 if (onproc)
710 return onproc;
711 if (running)
712 return running;
713 if (sleeping)
714 return sleeping;
715 if (stopped)
716 return stopped;
717 if (suspended)
718 return suspended;
719 break;
720 case SZOMB:
721 /* Doesn't really matter... */
722 return (LIST_FIRST(&p->p_lwps));
723 #ifdef DIAGNOSTIC
724 case SIDL:
725 /* We have more than one LWP and we're in SIDL?
726 * How'd that happen?
727 */
728 panic("Too many LWPs (%d) in SIDL process %d (%s)",
729 p->p_nrlwps, p->p_pid, p->p_comm);
730 default:
731 panic("Process %d (%s) in unknown state %d",
732 p->p_pid, p->p_comm, p->p_stat);
733 #endif
734 }
735
736 panic("proc_representative_lwp: couldn't find a lwp for process"
737 " %d (%s)", p->p_pid, p->p_comm);
738 /* NOTREACHED */
739 return NULL;
740 }
741
742 /*
743 * Update an LWP's cached credentials to mirror the process' master copy.
744 *
745 * This happens early in the syscall path, on user trap, and on LWP
746 * creation. A long-running LWP can also voluntarily choose to update
747 * it's credentials by calling this routine. This may be called from
748 * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
749 */
750 void
751 lwp_update_creds(struct lwp *l)
752 {
753 kauth_cred_t oc;
754 struct proc *p;
755
756 p = l->l_proc;
757 oc = l->l_cred;
758
759 simple_lock(&p->p_lock);
760 kauth_cred_hold(p->p_cred);
761 l->l_cred = p->p_cred;
762 simple_unlock(&p->p_lock);
763 if (oc != NULL)
764 kauth_cred_free(oc);
765 }
766
767 /*
768 * lwp_specific_key_create --
769 * Create a key for subsystem lwp-specific data.
770 */
771 int
772 lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
773 {
774
775 return (specificdata_key_create(lwp_specificdata_domain,
776 keyp, dtor));
777 }
778
779 /*
780 * lwp_specific_key_delete --
781 * Delete a key for subsystem lwp-specific data.
782 */
783 void
784 lwp_specific_key_delete(specificdata_key_t key)
785 {
786
787 specificdata_key_delete(lwp_specificdata_domain, key);
788 }
789
790 void
791 lwp_initspecific(struct lwp *l)
792 {
793 int error;
794 error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
795 KASSERT(error == 0);
796 }
797
798 /*
799 * lwp_getspecific --
800 * Return lwp-specific data corresponding to the specified key.
801 *
802 * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
803 * only its OWN SPECIFIC DATA. If it is necessary to access another
804 * LWP's specifc data, care must be taken to ensure that doing so
805 * would not cause internal data structure inconsistency (i.e. caller
806 * can guarantee that the target LWP is not inside an lwp_getspecific()
807 * or lwp_setspecific() call).
808 */
809 void *
810 lwp_getspecific(struct lwp *l, specificdata_key_t key)
811 {
812
813 return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
814 &l->l_specdataref, key));
815 }
816
817 /*
818 * lwp_setspecific --
819 * Set lwp-specific data corresponding to the specified key.
820 */
821 void
822 lwp_setspecific(struct lwp *l, specificdata_key_t key, void *data)
823 {
824
825 specificdata_setspecific(lwp_specificdata_domain,
826 &l->l_specdataref, key, data);
827 }
828