sys_lwp.c revision 1.72 1 /* $NetBSD: sys_lwp.c,v 1.72 2020/01/25 15:41:52 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description
34 * of LWPs.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.72 2020/01/25 15:41:52 ad Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/pool.h>
43 #include <sys/proc.h>
44 #include <sys/types.h>
45 #include <sys/syscallargs.h>
46 #include <sys/kauth.h>
47 #include <sys/kmem.h>
48 #include <sys/ptrace.h>
49 #include <sys/sleepq.h>
50 #include <sys/lwpctl.h>
51 #include <sys/cpu.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #define LWP_UNPARK_MAX 1024
56
57 static const stack_t lwp_ss_init = SS_INIT;
58
59 static syncobj_t lwp_park_sobj = {
60 .sobj_flag = SOBJ_SLEEPQ_LIFO,
61 .sobj_unsleep = sleepq_unsleep,
62 .sobj_changepri = sleepq_changepri,
63 .sobj_lendpri = sleepq_lendpri,
64 .sobj_owner = syncobj_noowner,
65 };
66
67 static sleeptab_t lwp_park_tab;
68
69 void
70 lwp_sys_init(void)
71 {
72 sleeptab_init(&lwp_park_tab);
73 }
74
75 static void
76 mi_startlwp(void *arg)
77 {
78 struct lwp *l = curlwp;
79 struct proc *p = l->l_proc;
80
81 (p->p_emul->e_startlwp)(arg);
82
83 /* If the process is traced, report lwp creation to a debugger */
84 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) ==
85 (PSL_TRACED|PSL_TRACELWP_CREATE)) {
86 /* Paranoid check */
87 mutex_enter(proc_lock);
88 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) !=
89 (PSL_TRACED|PSL_TRACELWP_CREATE)) {
90 mutex_exit(proc_lock);
91 return;
92 }
93
94 mutex_enter(p->p_lock);
95 eventswitch(TRAP_LWP, PTRACE_LWP_CREATE, l->l_lid);
96 }
97 }
98
99 int
100 do_lwp_create(lwp_t *l, void *arg, u_long flags, lwp_t **l2,
101 const sigset_t *sigmask, const stack_t *sigstk)
102 {
103 struct proc *p = l->l_proc;
104 vaddr_t uaddr;
105 int error;
106
107 /* XXX check against resource limits */
108
109 uaddr = uvm_uarea_alloc();
110 if (__predict_false(uaddr == 0))
111 return ENOMEM;
112
113 error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0,
114 mi_startlwp, arg, l2, l->l_class, sigmask, &lwp_ss_init);
115 if (__predict_false(error)) {
116 uvm_uarea_free(uaddr);
117 return error;
118 }
119
120 return 0;
121 }
122
123 int
124 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap,
125 register_t *retval)
126 {
127 /* {
128 syscallarg(const ucontext_t *) ucp;
129 syscallarg(u_long) flags;
130 syscallarg(lwpid_t *) new_lwp;
131 } */
132 struct proc *p = l->l_proc;
133 ucontext_t *newuc;
134 lwp_t *l2;
135 int error;
136
137 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP);
138 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
139 if (error)
140 goto fail;
141
142 /* validate the ucontext */
143 if ((newuc->uc_flags & _UC_CPU) == 0) {
144 error = EINVAL;
145 goto fail;
146 }
147 error = cpu_mcontext_validate(l, &newuc->uc_mcontext);
148 if (error)
149 goto fail;
150
151 const sigset_t *sigmask = newuc->uc_flags & _UC_SIGMASK ?
152 &newuc->uc_sigmask : &l->l_sigmask;
153 error = do_lwp_create(l, newuc, SCARG(uap, flags), &l2, sigmask,
154 &SS_INIT);
155 if (error)
156 goto fail;
157
158 error = copyout(&l2->l_lid, SCARG(uap, new_lwp), sizeof(l2->l_lid));
159 if (error != 0)
160 lwp_exit(l2);
161 else
162 lwp_start(l2, SCARG(uap, flags));
163 return error;
164
165 fail:
166 kmem_free(newuc, sizeof(ucontext_t));
167 return error;
168 }
169
170 int
171 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
172 {
173
174 lwp_exit(l);
175 return 0;
176 }
177
178 int
179 sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
180 {
181
182 *retval = l->l_lid;
183 return 0;
184 }
185
186 int
187 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
188 {
189
190 *retval = (uintptr_t)l->l_private;
191 return 0;
192 }
193
194 int
195 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap,
196 register_t *retval)
197 {
198 /* {
199 syscallarg(void *) ptr;
200 } */
201
202 return lwp_setprivate(l, SCARG(uap, ptr));
203 }
204
205 int
206 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
207 register_t *retval)
208 {
209 /* {
210 syscallarg(lwpid_t) target;
211 } */
212 struct proc *p = l->l_proc;
213 struct lwp *t;
214 int error;
215
216 mutex_enter(p->p_lock);
217 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
218 mutex_exit(p->p_lock);
219 return ESRCH;
220 }
221
222 /*
223 * Check for deadlock, which is only possible when we're suspending
224 * ourself. XXX There is a short race here, as p_nrlwps is only
225 * incremented when an LWP suspends itself on the kernel/user
226 * boundary. It's still possible to kill -9 the process so we
227 * don't bother checking further.
228 */
229 lwp_lock(t);
230 if ((t == l && p->p_nrlwps == 1) ||
231 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
232 lwp_unlock(t);
233 mutex_exit(p->p_lock);
234 return EDEADLK;
235 }
236
237 /*
238 * Suspend the LWP. XXX If it's on a different CPU, we should wait
239 * for it to be preempted, where it will put itself to sleep.
240 *
241 * Suspension of the current LWP will happen on return to userspace.
242 */
243 error = lwp_suspend(l, t);
244 if (error) {
245 mutex_exit(p->p_lock);
246 return error;
247 }
248
249 /*
250 * Wait for:
251 * o process exiting
252 * o target LWP suspended
253 * o target LWP not suspended and L_WSUSPEND clear
254 * o target LWP exited
255 */
256 for (;;) {
257 error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
258 if (error) {
259 error = ERESTART;
260 break;
261 }
262 if (lwp_find(p, SCARG(uap, target)) == NULL) {
263 error = ESRCH;
264 break;
265 }
266 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
267 error = ERESTART;
268 break;
269 }
270 if (t->l_stat == LSSUSPENDED ||
271 (t->l_flag & LW_WSUSPEND) == 0)
272 break;
273 }
274 mutex_exit(p->p_lock);
275
276 return error;
277 }
278
279 int
280 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap,
281 register_t *retval)
282 {
283 /* {
284 syscallarg(lwpid_t) target;
285 } */
286 int error;
287 struct proc *p = l->l_proc;
288 struct lwp *t;
289
290 error = 0;
291
292 mutex_enter(p->p_lock);
293 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
294 mutex_exit(p->p_lock);
295 return ESRCH;
296 }
297
298 lwp_lock(t);
299 lwp_continue(t);
300 mutex_exit(p->p_lock);
301
302 return error;
303 }
304
305 int
306 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap,
307 register_t *retval)
308 {
309 /* {
310 syscallarg(lwpid_t) target;
311 } */
312 struct lwp *t;
313 struct proc *p;
314 int error;
315
316 p = l->l_proc;
317 mutex_enter(p->p_lock);
318
319 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
320 mutex_exit(p->p_lock);
321 return ESRCH;
322 }
323
324 lwp_lock(t);
325 t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
326
327 if (t->l_stat != LSSLEEP) {
328 lwp_unlock(t);
329 error = ENODEV;
330 } else if ((t->l_flag & LW_SINTR) == 0) {
331 lwp_unlock(t);
332 error = EBUSY;
333 } else {
334 /* Wake it up. lwp_unsleep() will release the LWP lock. */
335 lwp_unsleep(t, true);
336 error = 0;
337 }
338
339 mutex_exit(p->p_lock);
340
341 return error;
342 }
343
344 int
345 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap,
346 register_t *retval)
347 {
348 /* {
349 syscallarg(lwpid_t) wait_for;
350 syscallarg(lwpid_t *) departed;
351 } */
352 struct proc *p = l->l_proc;
353 int error;
354 lwpid_t dep;
355
356 mutex_enter(p->p_lock);
357 error = lwp_wait(l, SCARG(uap, wait_for), &dep, false);
358 mutex_exit(p->p_lock);
359
360 if (!error && SCARG(uap, departed)) {
361 error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
362 }
363
364 return error;
365 }
366
367 int
368 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap,
369 register_t *retval)
370 {
371 /* {
372 syscallarg(lwpid_t) target;
373 syscallarg(int) signo;
374 } */
375 struct proc *p = l->l_proc;
376 struct lwp *t;
377 ksiginfo_t ksi;
378 int signo = SCARG(uap, signo);
379 int error = 0;
380
381 if ((u_int)signo >= NSIG)
382 return EINVAL;
383
384 KSI_INIT(&ksi);
385 ksi.ksi_signo = signo;
386 ksi.ksi_code = SI_LWP;
387 ksi.ksi_pid = p->p_pid;
388 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
389 ksi.ksi_lid = SCARG(uap, target);
390
391 mutex_enter(proc_lock);
392 mutex_enter(p->p_lock);
393 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
394 error = ESRCH;
395 else if (signo != 0)
396 kpsignal2(p, &ksi);
397 mutex_exit(p->p_lock);
398 mutex_exit(proc_lock);
399
400 return error;
401 }
402
403 int
404 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap,
405 register_t *retval)
406 {
407 /* {
408 syscallarg(lwpid_t) target;
409 } */
410 struct proc *p;
411 struct lwp *t;
412 lwpid_t target;
413 int error;
414
415 target = SCARG(uap, target);
416 p = l->l_proc;
417
418 mutex_enter(p->p_lock);
419
420 if (l->l_lid == target)
421 t = l;
422 else {
423 /*
424 * We can't use lwp_find() here because the target might
425 * be a zombie.
426 */
427 LIST_FOREACH(t, &p->p_lwps, l_sibling)
428 if (t->l_lid == target)
429 break;
430 }
431
432 /*
433 * If the LWP is already detached, there's nothing to do.
434 * If it's a zombie, we need to clean up after it. LSZOMB
435 * is visible with the proc mutex held.
436 *
437 * After we have detached or released the LWP, kick any
438 * other LWPs that may be sitting in _lwp_wait(), waiting
439 * for the target LWP to exit.
440 */
441 if (t != NULL && t->l_stat != LSIDL) {
442 if ((t->l_prflag & LPR_DETACHED) == 0) {
443 p->p_ndlwps++;
444 t->l_prflag |= LPR_DETACHED;
445 if (t->l_stat == LSZOMB) {
446 /* Releases proc mutex. */
447 lwp_free(t, false, false);
448 return 0;
449 }
450 error = 0;
451
452 /*
453 * Have any LWPs sleeping in lwp_wait() recheck
454 * for deadlock.
455 */
456 cv_broadcast(&p->p_lwpcv);
457 } else
458 error = EINVAL;
459 } else
460 error = ESRCH;
461
462 mutex_exit(p->p_lock);
463
464 return error;
465 }
466
467 static inline wchan_t
468 lwp_park_wchan(struct proc *p, const void *hint)
469 {
470
471 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
472 }
473
474 int
475 lwp_unpark(lwpid_t target, const void *hint)
476 {
477 sleepq_t *sq;
478 wchan_t wchan;
479 kmutex_t *mp;
480 proc_t *p;
481 lwp_t *t;
482
483 /*
484 * Easy case: search for the LWP on the sleep queue. If
485 * it's parked, remove it from the queue and set running.
486 */
487 p = curproc;
488 wchan = lwp_park_wchan(p, hint);
489 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
490
491 TAILQ_FOREACH(t, sq, l_sleepchain)
492 if (t->l_proc == p && t->l_lid == target)
493 break;
494
495 if (__predict_true(t != NULL)) {
496 sleepq_remove(sq, t);
497 mutex_spin_exit(mp);
498 return 0;
499 }
500
501 /*
502 * The LWP hasn't parked yet. Take the hit and mark the
503 * operation as pending.
504 */
505 mutex_spin_exit(mp);
506
507 mutex_enter(p->p_lock);
508 if ((t = lwp_find(p, target)) == NULL) {
509 mutex_exit(p->p_lock);
510 return ESRCH;
511 }
512
513 /*
514 * It may not have parked yet, we may have raced, or it
515 * is parked on a different user sync object.
516 */
517 lwp_lock(t);
518 if (t->l_syncobj == &lwp_park_sobj) {
519 /* Releases the LWP lock. */
520 lwp_unsleep(t, true);
521 } else {
522 /*
523 * Set the operation pending. The next call to _lwp_park
524 * will return early.
525 */
526 t->l_flag |= LW_UNPARKED;
527 lwp_unlock(t);
528 }
529
530 mutex_exit(p->p_lock);
531 return 0;
532 }
533
534 int
535 lwp_park(clockid_t clock_id, int flags, struct timespec *ts, const void *hint)
536 {
537 sleepq_t *sq;
538 kmutex_t *mp;
539 wchan_t wchan;
540 int timo, error;
541 struct timespec start;
542 lwp_t *l;
543 bool timeremain = !(flags & TIMER_ABSTIME) && ts;
544
545 if (ts != NULL) {
546 if ((error = ts2timo(clock_id, flags, ts, &timo,
547 timeremain ? &start : NULL)) != 0)
548 return error;
549 KASSERT(timo != 0);
550 } else {
551 timo = 0;
552 }
553
554 /* Find and lock the sleep queue. */
555 l = curlwp;
556 wchan = lwp_park_wchan(l->l_proc, hint);
557 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
558
559 /*
560 * Before going the full route and blocking, check to see if an
561 * unpark op is pending.
562 */
563 lwp_lock(l);
564 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
565 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
566 lwp_unlock(l);
567 mutex_spin_exit(mp);
568 return EALREADY;
569 }
570 lwp_unlock_to(l, mp);
571 l->l_biglocks = 0;
572 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
573 error = sleepq_block(timo, true);
574 switch (error) {
575 case EWOULDBLOCK:
576 error = ETIMEDOUT;
577 if (timeremain)
578 memset(ts, 0, sizeof(*ts));
579 break;
580 case ERESTART:
581 error = EINTR;
582 /*FALLTHROUGH*/
583 default:
584 if (timeremain)
585 clock_timeleft(clock_id, ts, &start);
586 break;
587 }
588 return error;
589 }
590
591 /*
592 * 'park' an LWP waiting on a user-level synchronisation object. The LWP
593 * will remain parked until another LWP in the same process calls in and
594 * requests that it be unparked.
595 */
596 int
597 sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap,
598 register_t *retval)
599 {
600 /* {
601 syscallarg(clockid_t) clock_id;
602 syscallarg(int) flags;
603 syscallarg(struct timespec *) ts;
604 syscallarg(lwpid_t) unpark;
605 syscallarg(const void *) hint;
606 syscallarg(const void *) unparkhint;
607 } */
608 struct timespec ts, *tsp;
609 int error;
610
611 if (SCARG(uap, ts) == NULL)
612 tsp = NULL;
613 else {
614 error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
615 if (error != 0)
616 return error;
617 tsp = &ts;
618 }
619
620 if (SCARG(uap, unpark) != 0) {
621 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint));
622 if (error != 0)
623 return error;
624 }
625
626 error = lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp,
627 SCARG(uap, hint));
628 if (SCARG(uap, ts) != NULL && (SCARG(uap, flags) & TIMER_ABSTIME) == 0)
629 (void)copyout(tsp, SCARG(uap, ts), sizeof(*tsp));
630 return error;
631 }
632
633 int
634 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap,
635 register_t *retval)
636 {
637 /* {
638 syscallarg(lwpid_t) target;
639 syscallarg(const void *) hint;
640 } */
641
642 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint));
643 }
644
645 int
646 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap,
647 register_t *retval)
648 {
649 /* {
650 syscallarg(const lwpid_t *) targets;
651 syscallarg(size_t) ntargets;
652 syscallarg(const void *) hint;
653 } */
654 struct proc *p;
655 struct lwp *t;
656 sleepq_t *sq;
657 wchan_t wchan;
658 lwpid_t targets[32], *tp, *tpp, *tmax, target;
659 int error;
660 kmutex_t *mp;
661 u_int ntargets;
662 size_t sz;
663
664 p = l->l_proc;
665 ntargets = SCARG(uap, ntargets);
666
667 if (SCARG(uap, targets) == NULL) {
668 /*
669 * Let the caller know how much we are willing to do, and
670 * let it unpark the LWPs in blocks.
671 */
672 *retval = LWP_UNPARK_MAX;
673 return 0;
674 }
675 if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
676 return EINVAL;
677
678 /*
679 * Copy in the target array. If it's a small number of LWPs, then
680 * place the numbers on the stack.
681 */
682 sz = sizeof(target) * ntargets;
683 if (sz <= sizeof(targets))
684 tp = targets;
685 else
686 tp = kmem_alloc(sz, KM_SLEEP);
687 error = copyin(SCARG(uap, targets), tp, sz);
688 if (error != 0) {
689 if (tp != targets) {
690 kmem_free(tp, sz);
691 }
692 return error;
693 }
694
695 wchan = lwp_park_wchan(p, SCARG(uap, hint));
696 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
697
698 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
699 target = *tpp;
700
701 /*
702 * Easy case: search for the LWP on the sleep queue. If
703 * it's parked, remove it from the queue and set running.
704 */
705 TAILQ_FOREACH(t, sq, l_sleepchain)
706 if (t->l_proc == p && t->l_lid == target)
707 break;
708
709 if (t != NULL) {
710 sleepq_remove(sq, t);
711 continue;
712 }
713
714 /*
715 * The LWP hasn't parked yet. Take the hit and
716 * mark the operation as pending.
717 */
718 mutex_spin_exit(mp);
719 mutex_enter(p->p_lock);
720 if ((t = lwp_find(p, target)) == NULL) {
721 mutex_exit(p->p_lock);
722 mutex_spin_enter(mp);
723 continue;
724 }
725 lwp_lock(t);
726
727 /*
728 * It may not have parked yet, we may have raced, or
729 * it is parked on a different user sync object.
730 */
731 if (t->l_syncobj == &lwp_park_sobj) {
732 /* Releases the LWP lock. */
733 lwp_unsleep(t, true);
734 } else {
735 /*
736 * Set the operation pending. The next call to
737 * _lwp_park will return early.
738 */
739 t->l_flag |= LW_UNPARKED;
740 lwp_unlock(t);
741 }
742
743 mutex_exit(p->p_lock);
744 mutex_spin_enter(mp);
745 }
746
747 mutex_spin_exit(mp);
748 if (tp != targets)
749 kmem_free(tp, sz);
750
751 return 0;
752 }
753
754 int
755 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap,
756 register_t *retval)
757 {
758 /* {
759 syscallarg(lwpid_t) target;
760 syscallarg(const char *) name;
761 } */
762 char *name, *oname;
763 lwpid_t target;
764 proc_t *p;
765 lwp_t *t;
766 int error;
767
768 if ((target = SCARG(uap, target)) == 0)
769 target = l->l_lid;
770
771 name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
772 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
773 switch (error) {
774 case ENAMETOOLONG:
775 case 0:
776 name[MAXCOMLEN - 1] = '\0';
777 break;
778 default:
779 kmem_free(name, MAXCOMLEN);
780 return error;
781 }
782
783 p = curproc;
784 mutex_enter(p->p_lock);
785 if ((t = lwp_find(p, target)) == NULL) {
786 mutex_exit(p->p_lock);
787 kmem_free(name, MAXCOMLEN);
788 return ESRCH;
789 }
790 lwp_lock(t);
791 oname = t->l_name;
792 t->l_name = name;
793 lwp_unlock(t);
794 mutex_exit(p->p_lock);
795
796 if (oname != NULL)
797 kmem_free(oname, MAXCOMLEN);
798
799 return 0;
800 }
801
802 int
803 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap,
804 register_t *retval)
805 {
806 /* {
807 syscallarg(lwpid_t) target;
808 syscallarg(char *) name;
809 syscallarg(size_t) len;
810 } */
811 char name[MAXCOMLEN];
812 lwpid_t target;
813 size_t len;
814 proc_t *p;
815 lwp_t *t;
816
817 if ((target = SCARG(uap, target)) == 0)
818 target = l->l_lid;
819
820 p = curproc;
821 mutex_enter(p->p_lock);
822 if ((t = lwp_find(p, target)) == NULL) {
823 mutex_exit(p->p_lock);
824 return ESRCH;
825 }
826 lwp_lock(t);
827 if (t->l_name == NULL)
828 name[0] = '\0';
829 else
830 strlcpy(name, t->l_name, sizeof(name));
831 lwp_unlock(t);
832 mutex_exit(p->p_lock);
833
834 len = uimin(SCARG(uap, len), sizeof(name));
835
836 return copyoutstr(name, SCARG(uap, name), len, NULL);
837 }
838
839 int
840 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap,
841 register_t *retval)
842 {
843 /* {
844 syscallarg(int) features;
845 syscallarg(struct lwpctl **) address;
846 } */
847 int error, features;
848 vaddr_t vaddr;
849
850 features = SCARG(uap, features);
851 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
852 if (features != 0)
853 return ENODEV;
854 if ((error = lwp_ctl_alloc(&vaddr)) != 0)
855 return error;
856 return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
857 }
858