sys_lwp.c revision 1.52.14.1 1 /* $NetBSD: sys_lwp.c,v 1.52.14.1 2012/05/21 15:25:56 riz Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description
34 * of LWPs.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.52.14.1 2012/05/21 15:25:56 riz Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/pool.h>
43 #include <sys/proc.h>
44 #include <sys/types.h>
45 #include <sys/syscallargs.h>
46 #include <sys/kauth.h>
47 #include <sys/kmem.h>
48 #include <sys/sleepq.h>
49 #include <sys/lwpctl.h>
50 #include <sys/cpu.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include "opt_sa.h"
55
56 #define LWP_UNPARK_MAX 1024
57
58 static syncobj_t lwp_park_sobj = {
59 SOBJ_SLEEPQ_LIFO,
60 sleepq_unsleep,
61 sleepq_changepri,
62 sleepq_lendpri,
63 syncobj_noowner,
64 };
65
66 static sleeptab_t lwp_park_tab;
67
68 void
69 lwp_sys_init(void)
70 {
71 sleeptab_init(&lwp_park_tab);
72 }
73
74 int
75 do_lwp_create(lwp_t *l, void *arg, u_long flags, lwpid_t *new_lwp)
76 {
77 struct proc *p = l->l_proc;
78 struct lwp *l2;
79 struct schedstate_percpu *spc;
80 vaddr_t uaddr;
81 int error;
82
83 #ifdef KERN_SA
84 mutex_enter(p->p_lock);
85 if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) {
86 mutex_exit(p->p_lock);
87 return EINVAL;
88 }
89 mutex_exit(p->p_lock);
90 #endif
91
92 /* XXX check against resource limits */
93
94 uaddr = uvm_uarea_alloc();
95 if (__predict_false(uaddr == 0))
96 return ENOMEM;
97
98 error = lwp_create(l, p, uaddr, flags & LWP_DETACHED,
99 NULL, 0, p->p_emul->e_startlwp, arg, &l2, l->l_class);
100 if (__predict_false(error)) {
101 uvm_uarea_free(uaddr);
102 return error;
103 }
104
105 *new_lwp = l2->l_lid;
106
107 /*
108 * Set the new LWP running, unless the caller has requested that
109 * it be created in suspended state. If the process is stopping,
110 * then the LWP is created stopped.
111 */
112 mutex_enter(p->p_lock);
113 lwp_lock(l2);
114 spc = &l2->l_cpu->ci_schedstate;
115 if ((flags & LWP_SUSPENDED) == 0 &&
116 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) {
117 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
118 KASSERT(l2->l_wchan == NULL);
119 l2->l_stat = LSSTOP;
120 p->p_nrlwps--;
121 lwp_unlock_to(l2, spc->spc_lwplock);
122 } else {
123 KASSERT(lwp_locked(l2, spc->spc_mutex));
124 l2->l_stat = LSRUN;
125 sched_enqueue(l2, false);
126 lwp_unlock(l2);
127 }
128 } else {
129 l2->l_stat = LSSUSPENDED;
130 p->p_nrlwps--;
131 lwp_unlock_to(l2, spc->spc_lwplock);
132 }
133 mutex_exit(p->p_lock);
134
135 return 0;
136 }
137
138 int
139 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap,
140 register_t *retval)
141 {
142 /* {
143 syscallarg(const ucontext_t *) ucp;
144 syscallarg(u_long) flags;
145 syscallarg(lwpid_t *) new_lwp;
146 } */
147 struct proc *p = l->l_proc;
148 ucontext_t *newuc = NULL;
149 lwpid_t lid;
150 int error;
151
152 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP);
153 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
154 if (error)
155 goto fail;
156
157 /* validate the ucontext */
158 if ((newuc->uc_flags & _UC_CPU) == 0) {
159 error = EINVAL;
160 goto fail;
161 }
162 error = cpu_mcontext_validate(l, &newuc->uc_mcontext);
163 if (error)
164 goto fail;
165
166 error = do_lwp_create(l, newuc, SCARG(uap, flags), &lid);
167 if (error)
168 goto fail;
169
170 /*
171 * do not free ucontext in case of an error here,
172 * the lwp will actually run and access it
173 */
174 return copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
175
176 fail:
177 kmem_free(newuc, sizeof(ucontext_t));
178 return error;
179 }
180
181 int
182 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
183 {
184
185 lwp_exit(l);
186 return 0;
187 }
188
189 int
190 sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
191 {
192
193 *retval = l->l_lid;
194 return 0;
195 }
196
197 int
198 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
199 {
200
201 *retval = (uintptr_t)l->l_private;
202 return 0;
203 }
204
205 int
206 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap,
207 register_t *retval)
208 {
209 /* {
210 syscallarg(void *) ptr;
211 } */
212
213 return lwp_setprivate(l, SCARG(uap, ptr));
214 }
215
216 int
217 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
218 register_t *retval)
219 {
220 /* {
221 syscallarg(lwpid_t) target;
222 } */
223 struct proc *p = l->l_proc;
224 struct lwp *t;
225 int error;
226
227 mutex_enter(p->p_lock);
228
229 #ifdef KERN_SA
230 if ((p->p_sflag & PS_SA) != 0 || p->p_sa != NULL) {
231 mutex_exit(p->p_lock);
232 return EINVAL;
233 }
234 #endif
235
236 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
237 mutex_exit(p->p_lock);
238 return ESRCH;
239 }
240
241 /*
242 * Check for deadlock, which is only possible when we're suspending
243 * ourself. XXX There is a short race here, as p_nrlwps is only
244 * incremented when an LWP suspends itself on the kernel/user
245 * boundary. It's still possible to kill -9 the process so we
246 * don't bother checking further.
247 */
248 lwp_lock(t);
249 if ((t == l && p->p_nrlwps == 1) ||
250 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
251 lwp_unlock(t);
252 mutex_exit(p->p_lock);
253 return EDEADLK;
254 }
255
256 /*
257 * Suspend the LWP. XXX If it's on a different CPU, we should wait
258 * for it to be preempted, where it will put itself to sleep.
259 *
260 * Suspension of the current LWP will happen on return to userspace.
261 */
262 error = lwp_suspend(l, t);
263 if (error) {
264 mutex_exit(p->p_lock);
265 return error;
266 }
267
268 /*
269 * Wait for:
270 * o process exiting
271 * o target LWP suspended
272 * o target LWP not suspended and L_WSUSPEND clear
273 * o target LWP exited
274 */
275 for (;;) {
276 error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
277 if (error) {
278 error = ERESTART;
279 break;
280 }
281 if (lwp_find(p, SCARG(uap, target)) == NULL) {
282 error = ESRCH;
283 break;
284 }
285 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
286 error = ERESTART;
287 break;
288 }
289 if (t->l_stat == LSSUSPENDED ||
290 (t->l_flag & LW_WSUSPEND) == 0)
291 break;
292 }
293 mutex_exit(p->p_lock);
294
295 return error;
296 }
297
298 int
299 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap,
300 register_t *retval)
301 {
302 /* {
303 syscallarg(lwpid_t) target;
304 } */
305 int error;
306 struct proc *p = l->l_proc;
307 struct lwp *t;
308
309 error = 0;
310
311 mutex_enter(p->p_lock);
312 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
313 mutex_exit(p->p_lock);
314 return ESRCH;
315 }
316
317 lwp_lock(t);
318 lwp_continue(t);
319 mutex_exit(p->p_lock);
320
321 return error;
322 }
323
324 int
325 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap,
326 register_t *retval)
327 {
328 /* {
329 syscallarg(lwpid_t) target;
330 } */
331 struct lwp *t;
332 struct proc *p;
333 int error;
334
335 p = l->l_proc;
336 mutex_enter(p->p_lock);
337
338 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
339 mutex_exit(p->p_lock);
340 return ESRCH;
341 }
342
343 lwp_lock(t);
344 t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
345
346 if (t->l_stat != LSSLEEP) {
347 lwp_unlock(t);
348 error = ENODEV;
349 } else if ((t->l_flag & LW_SINTR) == 0) {
350 lwp_unlock(t);
351 error = EBUSY;
352 } else {
353 /* Wake it up. lwp_unsleep() will release the LWP lock. */
354 lwp_unsleep(t, true);
355 error = 0;
356 }
357
358 mutex_exit(p->p_lock);
359
360 return error;
361 }
362
363 int
364 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap,
365 register_t *retval)
366 {
367 /* {
368 syscallarg(lwpid_t) wait_for;
369 syscallarg(lwpid_t *) departed;
370 } */
371 struct proc *p = l->l_proc;
372 int error;
373 lwpid_t dep;
374
375 mutex_enter(p->p_lock);
376 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
377 mutex_exit(p->p_lock);
378
379 if (error)
380 return error;
381
382 if (SCARG(uap, departed)) {
383 error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
384 if (error)
385 return error;
386 }
387
388 return 0;
389 }
390
391 int
392 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap,
393 register_t *retval)
394 {
395 /* {
396 syscallarg(lwpid_t) target;
397 syscallarg(int) signo;
398 } */
399 struct proc *p = l->l_proc;
400 struct lwp *t;
401 ksiginfo_t ksi;
402 int signo = SCARG(uap, signo);
403 int error = 0;
404
405 if ((u_int)signo >= NSIG)
406 return EINVAL;
407
408 KSI_INIT(&ksi);
409 ksi.ksi_signo = signo;
410 ksi.ksi_code = SI_LWP;
411 ksi.ksi_pid = p->p_pid;
412 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
413 ksi.ksi_lid = SCARG(uap, target);
414
415 mutex_enter(proc_lock);
416 mutex_enter(p->p_lock);
417 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
418 error = ESRCH;
419 else if (signo != 0)
420 kpsignal2(p, &ksi);
421 mutex_exit(p->p_lock);
422 mutex_exit(proc_lock);
423
424 return error;
425 }
426
427 int
428 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap,
429 register_t *retval)
430 {
431 /* {
432 syscallarg(lwpid_t) target;
433 } */
434 struct proc *p;
435 struct lwp *t;
436 lwpid_t target;
437 int error;
438
439 target = SCARG(uap, target);
440 p = l->l_proc;
441
442 mutex_enter(p->p_lock);
443
444 if (l->l_lid == target)
445 t = l;
446 else {
447 /*
448 * We can't use lwp_find() here because the target might
449 * be a zombie.
450 */
451 LIST_FOREACH(t, &p->p_lwps, l_sibling)
452 if (t->l_lid == target)
453 break;
454 }
455
456 /*
457 * If the LWP is already detached, there's nothing to do.
458 * If it's a zombie, we need to clean up after it. LSZOMB
459 * is visible with the proc mutex held.
460 *
461 * After we have detached or released the LWP, kick any
462 * other LWPs that may be sitting in _lwp_wait(), waiting
463 * for the target LWP to exit.
464 */
465 if (t != NULL && t->l_stat != LSIDL) {
466 if ((t->l_prflag & LPR_DETACHED) == 0) {
467 p->p_ndlwps++;
468 t->l_prflag |= LPR_DETACHED;
469 if (t->l_stat == LSZOMB) {
470 /* Releases proc mutex. */
471 lwp_free(t, false, false);
472 return 0;
473 }
474 error = 0;
475
476 /*
477 * Have any LWPs sleeping in lwp_wait() recheck
478 * for deadlock.
479 */
480 cv_broadcast(&p->p_lwpcv);
481 } else
482 error = EINVAL;
483 } else
484 error = ESRCH;
485
486 mutex_exit(p->p_lock);
487
488 return error;
489 }
490
491 static inline wchan_t
492 lwp_park_wchan(struct proc *p, const void *hint)
493 {
494
495 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
496 }
497
498 int
499 lwp_unpark(lwpid_t target, const void *hint)
500 {
501 sleepq_t *sq;
502 wchan_t wchan;
503 kmutex_t *mp;
504 proc_t *p;
505 lwp_t *t;
506
507 /*
508 * Easy case: search for the LWP on the sleep queue. If
509 * it's parked, remove it from the queue and set running.
510 */
511 p = curproc;
512 wchan = lwp_park_wchan(p, hint);
513 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
514
515 TAILQ_FOREACH(t, sq, l_sleepchain)
516 if (t->l_proc == p && t->l_lid == target)
517 break;
518
519 if (__predict_true(t != NULL)) {
520 sleepq_remove(sq, t);
521 mutex_spin_exit(mp);
522 return 0;
523 }
524
525 /*
526 * The LWP hasn't parked yet. Take the hit and mark the
527 * operation as pending.
528 */
529 mutex_spin_exit(mp);
530
531 mutex_enter(p->p_lock);
532 if ((t = lwp_find(p, target)) == NULL) {
533 mutex_exit(p->p_lock);
534 return ESRCH;
535 }
536
537 /*
538 * It may not have parked yet, we may have raced, or it
539 * is parked on a different user sync object.
540 */
541 lwp_lock(t);
542 if (t->l_syncobj == &lwp_park_sobj) {
543 /* Releases the LWP lock. */
544 lwp_unsleep(t, true);
545 } else {
546 /*
547 * Set the operation pending. The next call to _lwp_park
548 * will return early.
549 */
550 t->l_flag |= LW_UNPARKED;
551 lwp_unlock(t);
552 }
553
554 mutex_exit(p->p_lock);
555 return 0;
556 }
557
558 int
559 lwp_park(struct timespec *ts, const void *hint)
560 {
561 sleepq_t *sq;
562 kmutex_t *mp;
563 wchan_t wchan;
564 int timo, error;
565 lwp_t *l;
566
567 /* Fix up the given timeout value. */
568 if (ts != NULL) {
569 error = abstimeout2timo(ts, &timo);
570 if (error) {
571 return error;
572 }
573 KASSERT(timo != 0);
574 } else {
575 timo = 0;
576 }
577
578 /* Find and lock the sleep queue. */
579 l = curlwp;
580 wchan = lwp_park_wchan(l->l_proc, hint);
581 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
582
583 /*
584 * Before going the full route and blocking, check to see if an
585 * unpark op is pending.
586 */
587 lwp_lock(l);
588 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
589 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
590 lwp_unlock(l);
591 mutex_spin_exit(mp);
592 return EALREADY;
593 }
594 lwp_unlock_to(l, mp);
595 l->l_biglocks = 0;
596 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
597 error = sleepq_block(timo, true);
598 switch (error) {
599 case EWOULDBLOCK:
600 error = ETIMEDOUT;
601 break;
602 case ERESTART:
603 error = EINTR;
604 break;
605 default:
606 /* nothing */
607 break;
608 }
609 return error;
610 }
611
612 /*
613 * 'park' an LWP waiting on a user-level synchronisation object. The LWP
614 * will remain parked until another LWP in the same process calls in and
615 * requests that it be unparked.
616 */
617 int
618 sys____lwp_park50(struct lwp *l, const struct sys____lwp_park50_args *uap,
619 register_t *retval)
620 {
621 /* {
622 syscallarg(const struct timespec *) ts;
623 syscallarg(lwpid_t) unpark;
624 syscallarg(const void *) hint;
625 syscallarg(const void *) unparkhint;
626 } */
627 struct timespec ts, *tsp;
628 int error;
629
630 if (SCARG(uap, ts) == NULL)
631 tsp = NULL;
632 else {
633 error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
634 if (error != 0)
635 return error;
636 tsp = &ts;
637 }
638
639 if (SCARG(uap, unpark) != 0) {
640 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint));
641 if (error != 0)
642 return error;
643 }
644
645 return lwp_park(tsp, SCARG(uap, hint));
646 }
647
648 int
649 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap,
650 register_t *retval)
651 {
652 /* {
653 syscallarg(lwpid_t) target;
654 syscallarg(const void *) hint;
655 } */
656
657 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint));
658 }
659
660 int
661 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap,
662 register_t *retval)
663 {
664 /* {
665 syscallarg(const lwpid_t *) targets;
666 syscallarg(size_t) ntargets;
667 syscallarg(const void *) hint;
668 } */
669 struct proc *p;
670 struct lwp *t;
671 sleepq_t *sq;
672 wchan_t wchan;
673 lwpid_t targets[32], *tp, *tpp, *tmax, target;
674 int error;
675 kmutex_t *mp;
676 u_int ntargets;
677 size_t sz;
678
679 p = l->l_proc;
680 ntargets = SCARG(uap, ntargets);
681
682 if (SCARG(uap, targets) == NULL) {
683 /*
684 * Let the caller know how much we are willing to do, and
685 * let it unpark the LWPs in blocks.
686 */
687 *retval = LWP_UNPARK_MAX;
688 return 0;
689 }
690 if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
691 return EINVAL;
692
693 /*
694 * Copy in the target array. If it's a small number of LWPs, then
695 * place the numbers on the stack.
696 */
697 sz = sizeof(target) * ntargets;
698 if (sz <= sizeof(targets))
699 tp = targets;
700 else {
701 tp = kmem_alloc(sz, KM_SLEEP);
702 if (tp == NULL)
703 return ENOMEM;
704 }
705 error = copyin(SCARG(uap, targets), tp, sz);
706 if (error != 0) {
707 if (tp != targets) {
708 kmem_free(tp, sz);
709 }
710 return error;
711 }
712
713 wchan = lwp_park_wchan(p, SCARG(uap, hint));
714 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
715
716 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
717 target = *tpp;
718
719 /*
720 * Easy case: search for the LWP on the sleep queue. If
721 * it's parked, remove it from the queue and set running.
722 */
723 TAILQ_FOREACH(t, sq, l_sleepchain)
724 if (t->l_proc == p && t->l_lid == target)
725 break;
726
727 if (t != NULL) {
728 sleepq_remove(sq, t);
729 continue;
730 }
731
732 /*
733 * The LWP hasn't parked yet. Take the hit and
734 * mark the operation as pending.
735 */
736 mutex_spin_exit(mp);
737 mutex_enter(p->p_lock);
738 if ((t = lwp_find(p, target)) == NULL) {
739 mutex_exit(p->p_lock);
740 mutex_spin_enter(mp);
741 continue;
742 }
743 lwp_lock(t);
744
745 /*
746 * It may not have parked yet, we may have raced, or
747 * it is parked on a different user sync object.
748 */
749 if (t->l_syncobj == &lwp_park_sobj) {
750 /* Releases the LWP lock. */
751 lwp_unsleep(t, true);
752 } else {
753 /*
754 * Set the operation pending. The next call to
755 * _lwp_park will return early.
756 */
757 t->l_flag |= LW_UNPARKED;
758 lwp_unlock(t);
759 }
760
761 mutex_exit(p->p_lock);
762 mutex_spin_enter(mp);
763 }
764
765 mutex_spin_exit(mp);
766 if (tp != targets)
767 kmem_free(tp, sz);
768
769 return 0;
770 }
771
772 int
773 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap,
774 register_t *retval)
775 {
776 /* {
777 syscallarg(lwpid_t) target;
778 syscallarg(const char *) name;
779 } */
780 char *name, *oname;
781 lwpid_t target;
782 proc_t *p;
783 lwp_t *t;
784 int error;
785
786 if ((target = SCARG(uap, target)) == 0)
787 target = l->l_lid;
788
789 name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
790 if (name == NULL)
791 return ENOMEM;
792 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
793 switch (error) {
794 case ENAMETOOLONG:
795 case 0:
796 name[MAXCOMLEN - 1] = '\0';
797 break;
798 default:
799 kmem_free(name, MAXCOMLEN);
800 return error;
801 }
802
803 p = curproc;
804 mutex_enter(p->p_lock);
805 if ((t = lwp_find(p, target)) == NULL) {
806 mutex_exit(p->p_lock);
807 kmem_free(name, MAXCOMLEN);
808 return ESRCH;
809 }
810 lwp_lock(t);
811 oname = t->l_name;
812 t->l_name = name;
813 lwp_unlock(t);
814 mutex_exit(p->p_lock);
815
816 if (oname != NULL)
817 kmem_free(oname, MAXCOMLEN);
818
819 return 0;
820 }
821
822 int
823 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap,
824 register_t *retval)
825 {
826 /* {
827 syscallarg(lwpid_t) target;
828 syscallarg(char *) name;
829 syscallarg(size_t) len;
830 } */
831 char name[MAXCOMLEN];
832 lwpid_t target;
833 proc_t *p;
834 lwp_t *t;
835
836 if ((target = SCARG(uap, target)) == 0)
837 target = l->l_lid;
838
839 p = curproc;
840 mutex_enter(p->p_lock);
841 if ((t = lwp_find(p, target)) == NULL) {
842 mutex_exit(p->p_lock);
843 return ESRCH;
844 }
845 lwp_lock(t);
846 if (t->l_name == NULL)
847 name[0] = '\0';
848 else
849 strcpy(name, t->l_name);
850 lwp_unlock(t);
851 mutex_exit(p->p_lock);
852
853 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL);
854 }
855
856 int
857 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap,
858 register_t *retval)
859 {
860 /* {
861 syscallarg(int) features;
862 syscallarg(struct lwpctl **) address;
863 } */
864 int error, features;
865 vaddr_t vaddr;
866
867 features = SCARG(uap, features);
868 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
869 if (features != 0)
870 return ENODEV;
871 if ((error = lwp_ctl_alloc(&vaddr)) != 0)
872 return error;
873 return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
874 }
875