sys_lwp.c revision 1.73 1 /* $NetBSD: sys_lwp.c,v 1.73 2020/01/26 19:08:09 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description
34 * of LWPs.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.73 2020/01/26 19:08:09 ad Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/pool.h>
43 #include <sys/proc.h>
44 #include <sys/types.h>
45 #include <sys/syscallargs.h>
46 #include <sys/kauth.h>
47 #include <sys/kmem.h>
48 #include <sys/ptrace.h>
49 #include <sys/sleepq.h>
50 #include <sys/lwpctl.h>
51 #include <sys/cpu.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #define LWP_UNPARK_MAX 1024
56
57 static const stack_t lwp_ss_init = SS_INIT;
58
59 static syncobj_t lwp_park_sobj = {
60 .sobj_flag = SOBJ_SLEEPQ_LIFO,
61 .sobj_unsleep = sleepq_unsleep,
62 .sobj_changepri = sleepq_changepri,
63 .sobj_lendpri = sleepq_lendpri,
64 .sobj_owner = syncobj_noowner,
65 };
66
67 static sleeptab_t lwp_park_tab;
68
69 void
70 lwp_sys_init(void)
71 {
72 sleeptab_init(&lwp_park_tab);
73 }
74
75 static void
76 mi_startlwp(void *arg)
77 {
78 struct lwp *l = curlwp;
79 struct proc *p = l->l_proc;
80
81 (p->p_emul->e_startlwp)(arg);
82
83 /* If the process is traced, report lwp creation to a debugger */
84 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) ==
85 (PSL_TRACED|PSL_TRACELWP_CREATE)) {
86 /* Paranoid check */
87 mutex_enter(proc_lock);
88 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) !=
89 (PSL_TRACED|PSL_TRACELWP_CREATE)) {
90 mutex_exit(proc_lock);
91 return;
92 }
93
94 mutex_enter(p->p_lock);
95 eventswitch(TRAP_LWP, PTRACE_LWP_CREATE, l->l_lid);
96 }
97 }
98
99 int
100 do_lwp_create(lwp_t *l, void *arg, u_long flags, lwp_t **l2,
101 const sigset_t *sigmask, const stack_t *sigstk)
102 {
103 struct proc *p = l->l_proc;
104 vaddr_t uaddr;
105 int error;
106
107 /* XXX check against resource limits */
108
109 uaddr = uvm_uarea_alloc();
110 if (__predict_false(uaddr == 0))
111 return ENOMEM;
112
113 error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0,
114 mi_startlwp, arg, l2, l->l_class, sigmask, &lwp_ss_init);
115 if (__predict_false(error)) {
116 uvm_uarea_free(uaddr);
117 return error;
118 }
119
120 return 0;
121 }
122
123 int
124 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap,
125 register_t *retval)
126 {
127 /* {
128 syscallarg(const ucontext_t *) ucp;
129 syscallarg(u_long) flags;
130 syscallarg(lwpid_t *) new_lwp;
131 } */
132 struct proc *p = l->l_proc;
133 ucontext_t *newuc;
134 lwp_t *l2;
135 int error;
136
137 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP);
138 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
139 if (error)
140 goto fail;
141
142 /* validate the ucontext */
143 if ((newuc->uc_flags & _UC_CPU) == 0) {
144 error = EINVAL;
145 goto fail;
146 }
147 error = cpu_mcontext_validate(l, &newuc->uc_mcontext);
148 if (error)
149 goto fail;
150
151 const sigset_t *sigmask = newuc->uc_flags & _UC_SIGMASK ?
152 &newuc->uc_sigmask : &l->l_sigmask;
153 error = do_lwp_create(l, newuc, SCARG(uap, flags), &l2, sigmask,
154 &SS_INIT);
155 if (error)
156 goto fail;
157
158 error = copyout(&l2->l_lid, SCARG(uap, new_lwp), sizeof(l2->l_lid));
159 if (error == 0) {
160 lwp_start(l2, SCARG(uap, flags));
161 return 0;
162 }
163 lwp_exit(l2);
164 fail:
165 kmem_free(newuc, sizeof(ucontext_t));
166 return error;
167 }
168
169 int
170 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
171 {
172
173 lwp_exit(l);
174 return 0;
175 }
176
177 int
178 sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
179 {
180
181 *retval = l->l_lid;
182 return 0;
183 }
184
185 int
186 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
187 {
188
189 *retval = (uintptr_t)l->l_private;
190 return 0;
191 }
192
193 int
194 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap,
195 register_t *retval)
196 {
197 /* {
198 syscallarg(void *) ptr;
199 } */
200
201 return lwp_setprivate(l, SCARG(uap, ptr));
202 }
203
204 int
205 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
206 register_t *retval)
207 {
208 /* {
209 syscallarg(lwpid_t) target;
210 } */
211 struct proc *p = l->l_proc;
212 struct lwp *t;
213 int error;
214
215 mutex_enter(p->p_lock);
216 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
217 mutex_exit(p->p_lock);
218 return ESRCH;
219 }
220
221 /*
222 * Check for deadlock, which is only possible when we're suspending
223 * ourself. XXX There is a short race here, as p_nrlwps is only
224 * incremented when an LWP suspends itself on the kernel/user
225 * boundary. It's still possible to kill -9 the process so we
226 * don't bother checking further.
227 */
228 lwp_lock(t);
229 if ((t == l && p->p_nrlwps == 1) ||
230 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
231 lwp_unlock(t);
232 mutex_exit(p->p_lock);
233 return EDEADLK;
234 }
235
236 /*
237 * Suspend the LWP. XXX If it's on a different CPU, we should wait
238 * for it to be preempted, where it will put itself to sleep.
239 *
240 * Suspension of the current LWP will happen on return to userspace.
241 */
242 error = lwp_suspend(l, t);
243 if (error) {
244 mutex_exit(p->p_lock);
245 return error;
246 }
247
248 /*
249 * Wait for:
250 * o process exiting
251 * o target LWP suspended
252 * o target LWP not suspended and L_WSUSPEND clear
253 * o target LWP exited
254 */
255 for (;;) {
256 error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
257 if (error) {
258 error = ERESTART;
259 break;
260 }
261 if (lwp_find(p, SCARG(uap, target)) == NULL) {
262 error = ESRCH;
263 break;
264 }
265 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
266 error = ERESTART;
267 break;
268 }
269 if (t->l_stat == LSSUSPENDED ||
270 (t->l_flag & LW_WSUSPEND) == 0)
271 break;
272 }
273 mutex_exit(p->p_lock);
274
275 return error;
276 }
277
278 int
279 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap,
280 register_t *retval)
281 {
282 /* {
283 syscallarg(lwpid_t) target;
284 } */
285 int error;
286 struct proc *p = l->l_proc;
287 struct lwp *t;
288
289 error = 0;
290
291 mutex_enter(p->p_lock);
292 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
293 mutex_exit(p->p_lock);
294 return ESRCH;
295 }
296
297 lwp_lock(t);
298 lwp_continue(t);
299 mutex_exit(p->p_lock);
300
301 return error;
302 }
303
304 int
305 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap,
306 register_t *retval)
307 {
308 /* {
309 syscallarg(lwpid_t) target;
310 } */
311 struct lwp *t;
312 struct proc *p;
313 int error;
314
315 p = l->l_proc;
316 mutex_enter(p->p_lock);
317
318 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
319 mutex_exit(p->p_lock);
320 return ESRCH;
321 }
322
323 lwp_lock(t);
324 t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
325
326 if (t->l_stat != LSSLEEP) {
327 lwp_unlock(t);
328 error = ENODEV;
329 } else if ((t->l_flag & LW_SINTR) == 0) {
330 lwp_unlock(t);
331 error = EBUSY;
332 } else {
333 /* Wake it up. lwp_unsleep() will release the LWP lock. */
334 lwp_unsleep(t, true);
335 error = 0;
336 }
337
338 mutex_exit(p->p_lock);
339
340 return error;
341 }
342
343 int
344 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap,
345 register_t *retval)
346 {
347 /* {
348 syscallarg(lwpid_t) wait_for;
349 syscallarg(lwpid_t *) departed;
350 } */
351 struct proc *p = l->l_proc;
352 int error;
353 lwpid_t dep;
354
355 mutex_enter(p->p_lock);
356 error = lwp_wait(l, SCARG(uap, wait_for), &dep, false);
357 mutex_exit(p->p_lock);
358
359 if (!error && SCARG(uap, departed)) {
360 error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
361 }
362
363 return error;
364 }
365
366 int
367 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap,
368 register_t *retval)
369 {
370 /* {
371 syscallarg(lwpid_t) target;
372 syscallarg(int) signo;
373 } */
374 struct proc *p = l->l_proc;
375 struct lwp *t;
376 ksiginfo_t ksi;
377 int signo = SCARG(uap, signo);
378 int error = 0;
379
380 if ((u_int)signo >= NSIG)
381 return EINVAL;
382
383 KSI_INIT(&ksi);
384 ksi.ksi_signo = signo;
385 ksi.ksi_code = SI_LWP;
386 ksi.ksi_pid = p->p_pid;
387 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
388 ksi.ksi_lid = SCARG(uap, target);
389
390 mutex_enter(proc_lock);
391 mutex_enter(p->p_lock);
392 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
393 error = ESRCH;
394 else if (signo != 0)
395 kpsignal2(p, &ksi);
396 mutex_exit(p->p_lock);
397 mutex_exit(proc_lock);
398
399 return error;
400 }
401
402 int
403 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap,
404 register_t *retval)
405 {
406 /* {
407 syscallarg(lwpid_t) target;
408 } */
409 struct proc *p;
410 struct lwp *t;
411 lwpid_t target;
412 int error;
413
414 target = SCARG(uap, target);
415 p = l->l_proc;
416
417 mutex_enter(p->p_lock);
418
419 if (l->l_lid == target)
420 t = l;
421 else {
422 /*
423 * We can't use lwp_find() here because the target might
424 * be a zombie.
425 */
426 LIST_FOREACH(t, &p->p_lwps, l_sibling)
427 if (t->l_lid == target)
428 break;
429 }
430
431 /*
432 * If the LWP is already detached, there's nothing to do.
433 * If it's a zombie, we need to clean up after it. LSZOMB
434 * is visible with the proc mutex held.
435 *
436 * After we have detached or released the LWP, kick any
437 * other LWPs that may be sitting in _lwp_wait(), waiting
438 * for the target LWP to exit.
439 */
440 if (t != NULL && t->l_stat != LSIDL) {
441 if ((t->l_prflag & LPR_DETACHED) == 0) {
442 p->p_ndlwps++;
443 t->l_prflag |= LPR_DETACHED;
444 if (t->l_stat == LSZOMB) {
445 /* Releases proc mutex. */
446 lwp_free(t, false, false);
447 return 0;
448 }
449 error = 0;
450
451 /*
452 * Have any LWPs sleeping in lwp_wait() recheck
453 * for deadlock.
454 */
455 cv_broadcast(&p->p_lwpcv);
456 } else
457 error = EINVAL;
458 } else
459 error = ESRCH;
460
461 mutex_exit(p->p_lock);
462
463 return error;
464 }
465
466 static inline wchan_t
467 lwp_park_wchan(struct proc *p, const void *hint)
468 {
469
470 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
471 }
472
473 int
474 lwp_unpark(lwpid_t target, const void *hint)
475 {
476 sleepq_t *sq;
477 wchan_t wchan;
478 kmutex_t *mp;
479 proc_t *p;
480 lwp_t *t;
481
482 /*
483 * Easy case: search for the LWP on the sleep queue. If
484 * it's parked, remove it from the queue and set running.
485 */
486 p = curproc;
487 wchan = lwp_park_wchan(p, hint);
488 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
489
490 TAILQ_FOREACH(t, sq, l_sleepchain)
491 if (t->l_proc == p && t->l_lid == target)
492 break;
493
494 if (__predict_true(t != NULL)) {
495 sleepq_remove(sq, t);
496 mutex_spin_exit(mp);
497 return 0;
498 }
499
500 /*
501 * The LWP hasn't parked yet. Take the hit and mark the
502 * operation as pending.
503 */
504 mutex_spin_exit(mp);
505
506 mutex_enter(p->p_lock);
507 if ((t = lwp_find(p, target)) == NULL) {
508 mutex_exit(p->p_lock);
509 return ESRCH;
510 }
511
512 /*
513 * It may not have parked yet, we may have raced, or it
514 * is parked on a different user sync object.
515 */
516 lwp_lock(t);
517 if (t->l_syncobj == &lwp_park_sobj) {
518 /* Releases the LWP lock. */
519 lwp_unsleep(t, true);
520 } else {
521 /*
522 * Set the operation pending. The next call to _lwp_park
523 * will return early.
524 */
525 t->l_flag |= LW_UNPARKED;
526 lwp_unlock(t);
527 }
528
529 mutex_exit(p->p_lock);
530 return 0;
531 }
532
533 int
534 lwp_park(clockid_t clock_id, int flags, struct timespec *ts, const void *hint)
535 {
536 sleepq_t *sq;
537 kmutex_t *mp;
538 wchan_t wchan;
539 int timo, error;
540 struct timespec start;
541 lwp_t *l;
542 bool timeremain = !(flags & TIMER_ABSTIME) && ts;
543
544 if (ts != NULL) {
545 if ((error = ts2timo(clock_id, flags, ts, &timo,
546 timeremain ? &start : NULL)) != 0)
547 return error;
548 KASSERT(timo != 0);
549 } else {
550 timo = 0;
551 }
552
553 /* Find and lock the sleep queue. */
554 l = curlwp;
555 wchan = lwp_park_wchan(l->l_proc, hint);
556 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
557
558 /*
559 * Before going the full route and blocking, check to see if an
560 * unpark op is pending.
561 */
562 lwp_lock(l);
563 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
564 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
565 lwp_unlock(l);
566 mutex_spin_exit(mp);
567 return EALREADY;
568 }
569 lwp_unlock_to(l, mp);
570 l->l_biglocks = 0;
571 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
572 error = sleepq_block(timo, true);
573 switch (error) {
574 case EWOULDBLOCK:
575 error = ETIMEDOUT;
576 if (timeremain)
577 memset(ts, 0, sizeof(*ts));
578 break;
579 case ERESTART:
580 error = EINTR;
581 /*FALLTHROUGH*/
582 default:
583 if (timeremain)
584 clock_timeleft(clock_id, ts, &start);
585 break;
586 }
587 return error;
588 }
589
590 /*
591 * 'park' an LWP waiting on a user-level synchronisation object. The LWP
592 * will remain parked until another LWP in the same process calls in and
593 * requests that it be unparked.
594 */
595 int
596 sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap,
597 register_t *retval)
598 {
599 /* {
600 syscallarg(clockid_t) clock_id;
601 syscallarg(int) flags;
602 syscallarg(struct timespec *) ts;
603 syscallarg(lwpid_t) unpark;
604 syscallarg(const void *) hint;
605 syscallarg(const void *) unparkhint;
606 } */
607 struct timespec ts, *tsp;
608 int error;
609
610 if (SCARG(uap, ts) == NULL)
611 tsp = NULL;
612 else {
613 error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
614 if (error != 0)
615 return error;
616 tsp = &ts;
617 }
618
619 if (SCARG(uap, unpark) != 0) {
620 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint));
621 if (error != 0)
622 return error;
623 }
624
625 error = lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp,
626 SCARG(uap, hint));
627 if (SCARG(uap, ts) != NULL && (SCARG(uap, flags) & TIMER_ABSTIME) == 0)
628 (void)copyout(tsp, SCARG(uap, ts), sizeof(*tsp));
629 return error;
630 }
631
632 int
633 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap,
634 register_t *retval)
635 {
636 /* {
637 syscallarg(lwpid_t) target;
638 syscallarg(const void *) hint;
639 } */
640
641 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint));
642 }
643
644 int
645 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap,
646 register_t *retval)
647 {
648 /* {
649 syscallarg(const lwpid_t *) targets;
650 syscallarg(size_t) ntargets;
651 syscallarg(const void *) hint;
652 } */
653 struct proc *p;
654 struct lwp *t;
655 sleepq_t *sq;
656 wchan_t wchan;
657 lwpid_t targets[32], *tp, *tpp, *tmax, target;
658 int error;
659 kmutex_t *mp;
660 u_int ntargets;
661 size_t sz;
662
663 p = l->l_proc;
664 ntargets = SCARG(uap, ntargets);
665
666 if (SCARG(uap, targets) == NULL) {
667 /*
668 * Let the caller know how much we are willing to do, and
669 * let it unpark the LWPs in blocks.
670 */
671 *retval = LWP_UNPARK_MAX;
672 return 0;
673 }
674 if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
675 return EINVAL;
676
677 /*
678 * Copy in the target array. If it's a small number of LWPs, then
679 * place the numbers on the stack.
680 */
681 sz = sizeof(target) * ntargets;
682 if (sz <= sizeof(targets))
683 tp = targets;
684 else
685 tp = kmem_alloc(sz, KM_SLEEP);
686 error = copyin(SCARG(uap, targets), tp, sz);
687 if (error != 0) {
688 if (tp != targets) {
689 kmem_free(tp, sz);
690 }
691 return error;
692 }
693
694 wchan = lwp_park_wchan(p, SCARG(uap, hint));
695 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
696
697 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
698 target = *tpp;
699
700 /*
701 * Easy case: search for the LWP on the sleep queue. If
702 * it's parked, remove it from the queue and set running.
703 */
704 TAILQ_FOREACH(t, sq, l_sleepchain)
705 if (t->l_proc == p && t->l_lid == target)
706 break;
707
708 if (t != NULL) {
709 sleepq_remove(sq, t);
710 continue;
711 }
712
713 /*
714 * The LWP hasn't parked yet. Take the hit and
715 * mark the operation as pending.
716 */
717 mutex_spin_exit(mp);
718 mutex_enter(p->p_lock);
719 if ((t = lwp_find(p, target)) == NULL) {
720 mutex_exit(p->p_lock);
721 mutex_spin_enter(mp);
722 continue;
723 }
724 lwp_lock(t);
725
726 /*
727 * It may not have parked yet, we may have raced, or
728 * it is parked on a different user sync object.
729 */
730 if (t->l_syncobj == &lwp_park_sobj) {
731 /* Releases the LWP lock. */
732 lwp_unsleep(t, true);
733 } else {
734 /*
735 * Set the operation pending. The next call to
736 * _lwp_park will return early.
737 */
738 t->l_flag |= LW_UNPARKED;
739 lwp_unlock(t);
740 }
741
742 mutex_exit(p->p_lock);
743 mutex_spin_enter(mp);
744 }
745
746 mutex_spin_exit(mp);
747 if (tp != targets)
748 kmem_free(tp, sz);
749
750 return 0;
751 }
752
753 int
754 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap,
755 register_t *retval)
756 {
757 /* {
758 syscallarg(lwpid_t) target;
759 syscallarg(const char *) name;
760 } */
761 char *name, *oname;
762 lwpid_t target;
763 proc_t *p;
764 lwp_t *t;
765 int error;
766
767 if ((target = SCARG(uap, target)) == 0)
768 target = l->l_lid;
769
770 name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
771 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
772 switch (error) {
773 case ENAMETOOLONG:
774 case 0:
775 name[MAXCOMLEN - 1] = '\0';
776 break;
777 default:
778 kmem_free(name, MAXCOMLEN);
779 return error;
780 }
781
782 p = curproc;
783 mutex_enter(p->p_lock);
784 if ((t = lwp_find(p, target)) == NULL) {
785 mutex_exit(p->p_lock);
786 kmem_free(name, MAXCOMLEN);
787 return ESRCH;
788 }
789 lwp_lock(t);
790 oname = t->l_name;
791 t->l_name = name;
792 lwp_unlock(t);
793 mutex_exit(p->p_lock);
794
795 if (oname != NULL)
796 kmem_free(oname, MAXCOMLEN);
797
798 return 0;
799 }
800
801 int
802 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap,
803 register_t *retval)
804 {
805 /* {
806 syscallarg(lwpid_t) target;
807 syscallarg(char *) name;
808 syscallarg(size_t) len;
809 } */
810 char name[MAXCOMLEN];
811 lwpid_t target;
812 size_t len;
813 proc_t *p;
814 lwp_t *t;
815
816 if ((target = SCARG(uap, target)) == 0)
817 target = l->l_lid;
818
819 p = curproc;
820 mutex_enter(p->p_lock);
821 if ((t = lwp_find(p, target)) == NULL) {
822 mutex_exit(p->p_lock);
823 return ESRCH;
824 }
825 lwp_lock(t);
826 if (t->l_name == NULL)
827 name[0] = '\0';
828 else
829 strlcpy(name, t->l_name, sizeof(name));
830 lwp_unlock(t);
831 mutex_exit(p->p_lock);
832
833 len = uimin(SCARG(uap, len), sizeof(name));
834
835 return copyoutstr(name, SCARG(uap, name), len, NULL);
836 }
837
838 int
839 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap,
840 register_t *retval)
841 {
842 /* {
843 syscallarg(int) features;
844 syscallarg(struct lwpctl **) address;
845 } */
846 int error, features;
847 vaddr_t vaddr;
848
849 features = SCARG(uap, features);
850 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
851 if (features != 0)
852 return ENODEV;
853 if ((error = lwp_ctl_alloc(&vaddr)) != 0)
854 return error;
855 return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
856 }
857