sys_lwp.c revision 1.88 1 /* $NetBSD: sys_lwp.c,v 1.88 2023/10/15 10:27:11 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007, 2008, 2019, 2020, 2023
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Nathan J. Williams, and Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Lightweight process (LWP) system calls. See kern_lwp.c for a description
35 * of LWPs.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.88 2023/10/15 10:27:11 riastradh Exp $");
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/pool.h>
44 #include <sys/proc.h>
45 #include <sys/types.h>
46 #include <sys/syscallargs.h>
47 #include <sys/kauth.h>
48 #include <sys/kmem.h>
49 #include <sys/ptrace.h>
50 #include <sys/sleepq.h>
51 #include <sys/lwpctl.h>
52 #include <sys/cpu.h>
53 #include <sys/pserialize.h>
54 #include <sys/syncobj.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #define LWP_UNPARK_MAX 1024
59
60 static const stack_t lwp_ss_init = SS_INIT;
61
62 /*
63 * Parked LWPs get no priority boost on awakening as they blocked on
64 * user space objects. Maybe revisit?
65 */
66 syncobj_t lwp_park_syncobj = {
67 .sobj_name = "lwp_park",
68 .sobj_flag = SOBJ_SLEEPQ_NULL,
69 .sobj_boostpri = PRI_USER,
70 .sobj_unsleep = sleepq_unsleep,
71 .sobj_changepri = sleepq_changepri,
72 .sobj_lendpri = sleepq_lendpri,
73 .sobj_owner = syncobj_noowner,
74 };
75
76 static void
77 mi_startlwp(void *arg)
78 {
79 struct lwp *l = curlwp;
80 struct proc *p = l->l_proc;
81
82 (p->p_emul->e_startlwp)(arg);
83
84 /* If the process is traced, report lwp creation to a debugger */
85 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) ==
86 (PSL_TRACED|PSL_TRACELWP_CREATE)) {
87 /* Paranoid check */
88 mutex_enter(&proc_lock);
89 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) !=
90 (PSL_TRACED|PSL_TRACELWP_CREATE)) {
91 mutex_exit(&proc_lock);
92 return;
93 }
94
95 mutex_enter(p->p_lock);
96 eventswitch(TRAP_LWP, PTRACE_LWP_CREATE, l->l_lid);
97 }
98 }
99
100 int
101 do_lwp_create(lwp_t *l, void *arg, u_long flags, lwp_t **l2,
102 const sigset_t *sigmask, const stack_t *sigstk)
103 {
104 struct proc *p = l->l_proc;
105 vaddr_t uaddr;
106 int error;
107
108 /* XXX check against resource limits */
109
110 uaddr = uvm_uarea_alloc();
111 if (__predict_false(uaddr == 0))
112 return ENOMEM;
113
114 error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0,
115 mi_startlwp, arg, l2, l->l_class, sigmask, &lwp_ss_init);
116 if (__predict_false(error)) {
117 uvm_uarea_free(uaddr);
118 return error;
119 }
120
121 return 0;
122 }
123
124 int
125 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap,
126 register_t *retval)
127 {
128 /* {
129 syscallarg(const ucontext_t *) ucp;
130 syscallarg(u_long) flags;
131 syscallarg(lwpid_t *) new_lwp;
132 } */
133 struct proc *p = l->l_proc;
134 ucontext_t *newuc;
135 lwp_t *l2;
136 int error;
137
138 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP);
139 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
140 if (error)
141 goto fail;
142
143 /* validate the ucontext */
144 if ((newuc->uc_flags & _UC_CPU) == 0) {
145 error = EINVAL;
146 goto fail;
147 }
148 error = cpu_mcontext_validate(l, &newuc->uc_mcontext);
149 if (error)
150 goto fail;
151
152 const sigset_t *sigmask = newuc->uc_flags & _UC_SIGMASK ?
153 &newuc->uc_sigmask : &l->l_sigmask;
154 error = do_lwp_create(l, newuc, SCARG(uap, flags), &l2, sigmask,
155 &SS_INIT);
156 if (error)
157 goto fail;
158
159 error = copyout(&l2->l_lid, SCARG(uap, new_lwp), sizeof(l2->l_lid));
160 if (error == 0) {
161 lwp_start(l2, SCARG(uap, flags));
162 return 0;
163 }
164 lwp_exit(l2);
165 fail:
166 kmem_free(newuc, sizeof(ucontext_t));
167 return error;
168 }
169
170 int
171 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
172 {
173
174 lwp_exit(l);
175 return 0;
176 }
177
178 int
179 sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
180 {
181
182 *retval = l->l_lid;
183 return 0;
184 }
185
186 int
187 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
188 {
189
190 *retval = (uintptr_t)l->l_private;
191 return 0;
192 }
193
194 int
195 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap,
196 register_t *retval)
197 {
198 /* {
199 syscallarg(void *) ptr;
200 } */
201
202 return lwp_setprivate(l, SCARG(uap, ptr));
203 }
204
205 int
206 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
207 register_t *retval)
208 {
209 /* {
210 syscallarg(lwpid_t) target;
211 } */
212 struct proc *p = l->l_proc;
213 struct lwp *t;
214 int error;
215
216 mutex_enter(p->p_lock);
217 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
218 mutex_exit(p->p_lock);
219 return ESRCH;
220 }
221
222 /*
223 * Check for deadlock, which is only possible when we're suspending
224 * ourself. XXX There is a short race here, as p_nrlwps is only
225 * incremented when an LWP suspends itself on the kernel/user
226 * boundary. It's still possible to kill -9 the process so we
227 * don't bother checking further.
228 */
229 lwp_lock(t);
230 if ((t == l && p->p_nrlwps == 1) ||
231 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
232 lwp_unlock(t);
233 mutex_exit(p->p_lock);
234 return EDEADLK;
235 }
236
237 /*
238 * Suspend the LWP. XXX If it's on a different CPU, we should wait
239 * for it to be preempted, where it will put itself to sleep.
240 *
241 * Suspension of the current LWP will happen on return to userspace.
242 */
243 error = lwp_suspend(l, t);
244 if (error) {
245 mutex_exit(p->p_lock);
246 return error;
247 }
248
249 /*
250 * Wait for:
251 * o process exiting
252 * o target LWP suspended
253 * o target LWP not suspended and L_WSUSPEND clear
254 * o target LWP exited
255 */
256 for (;;) {
257 error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
258 if (error) {
259 error = ERESTART;
260 break;
261 }
262 if (lwp_find(p, SCARG(uap, target)) == NULL) {
263 error = ESRCH;
264 break;
265 }
266 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
267 error = ERESTART;
268 break;
269 }
270 if (t->l_stat == LSSUSPENDED ||
271 (t->l_flag & LW_WSUSPEND) == 0)
272 break;
273 }
274 mutex_exit(p->p_lock);
275
276 return error;
277 }
278
279 int
280 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap,
281 register_t *retval)
282 {
283 /* {
284 syscallarg(lwpid_t) target;
285 } */
286 int error;
287 struct proc *p = l->l_proc;
288 struct lwp *t;
289
290 error = 0;
291
292 mutex_enter(p->p_lock);
293 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
294 mutex_exit(p->p_lock);
295 return ESRCH;
296 }
297
298 lwp_lock(t);
299 lwp_continue(t);
300 mutex_exit(p->p_lock);
301
302 return error;
303 }
304
305 int
306 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap,
307 register_t *retval)
308 {
309 /* {
310 syscallarg(lwpid_t) target;
311 } */
312 struct lwp *t;
313 struct proc *p;
314 int error;
315
316 p = l->l_proc;
317 mutex_enter(p->p_lock);
318
319 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
320 mutex_exit(p->p_lock);
321 return ESRCH;
322 }
323
324 lwp_lock(t);
325 t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
326
327 if (t->l_stat != LSSLEEP) {
328 lwp_unlock(t);
329 error = ENODEV;
330 } else if ((t->l_flag & LW_SINTR) == 0) {
331 lwp_unlock(t);
332 error = EBUSY;
333 } else {
334 /* Wake it up. lwp_unsleep() will release the LWP lock. */
335 lwp_unsleep(t, true);
336 error = 0;
337 }
338
339 mutex_exit(p->p_lock);
340
341 return error;
342 }
343
344 int
345 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap,
346 register_t *retval)
347 {
348 /* {
349 syscallarg(lwpid_t) wait_for;
350 syscallarg(lwpid_t *) departed;
351 } */
352 struct proc *p = l->l_proc;
353 int error;
354 lwpid_t dep;
355
356 mutex_enter(p->p_lock);
357 error = lwp_wait(l, SCARG(uap, wait_for), &dep, false);
358 mutex_exit(p->p_lock);
359
360 if (!error && SCARG(uap, departed)) {
361 error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
362 }
363
364 return error;
365 }
366
367 int
368 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap,
369 register_t *retval)
370 {
371 /* {
372 syscallarg(lwpid_t) target;
373 syscallarg(int) signo;
374 } */
375 struct proc *p = l->l_proc;
376 struct lwp *t;
377 ksiginfo_t ksi;
378 int signo = SCARG(uap, signo);
379 int error = 0;
380
381 if ((u_int)signo >= NSIG)
382 return EINVAL;
383
384 KSI_INIT(&ksi);
385 ksi.ksi_signo = signo;
386 ksi.ksi_code = SI_LWP;
387 ksi.ksi_pid = p->p_pid;
388 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
389 ksi.ksi_lid = SCARG(uap, target);
390
391 mutex_enter(&proc_lock);
392 mutex_enter(p->p_lock);
393 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
394 error = ESRCH;
395 else if (signo != 0)
396 kpsignal2(p, &ksi);
397 mutex_exit(p->p_lock);
398 mutex_exit(&proc_lock);
399
400 return error;
401 }
402
403 int
404 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap,
405 register_t *retval)
406 {
407 /* {
408 syscallarg(lwpid_t) target;
409 } */
410 struct proc *p;
411 struct lwp *t;
412 lwpid_t target;
413 int error;
414
415 target = SCARG(uap, target);
416 p = l->l_proc;
417
418 mutex_enter(p->p_lock);
419
420 if (l->l_lid == target)
421 t = l;
422 else {
423 /*
424 * We can't use lwp_find() here because the target might
425 * be a zombie.
426 */
427 t = proc_find_lwp(p, target);
428 KASSERT(t == NULL || t->l_lid == target);
429 }
430
431 /*
432 * If the LWP is already detached, there's nothing to do.
433 * If it's a zombie, we need to clean up after it. LSZOMB
434 * is visible with the proc mutex held.
435 *
436 * After we have detached or released the LWP, kick any
437 * other LWPs that may be sitting in _lwp_wait(), waiting
438 * for the target LWP to exit.
439 */
440 if (t != NULL && t->l_stat != LSIDL) {
441 if ((t->l_prflag & LPR_DETACHED) == 0) {
442 p->p_ndlwps++;
443 t->l_prflag |= LPR_DETACHED;
444 if (t->l_stat == LSZOMB) {
445 /* Releases proc mutex. */
446 lwp_free(t, false, false);
447 return 0;
448 }
449 error = 0;
450
451 /*
452 * Have any LWPs sleeping in lwp_wait() recheck
453 * for deadlock.
454 */
455 cv_broadcast(&p->p_lwpcv);
456 } else
457 error = EINVAL;
458 } else
459 error = ESRCH;
460
461 mutex_exit(p->p_lock);
462
463 return error;
464 }
465
466 int
467 lwp_unpark(const lwpid_t *tp, const u_int ntargets)
468 {
469 u_int target;
470 kmutex_t *mp;
471 int error, s;
472 proc_t *p;
473 lwp_t *t;
474
475 p = curproc;
476 error = 0;
477
478 s = pserialize_read_enter();
479 for (target = 0; target < ntargets; target++) {
480 t = proc_find_lwp_unlocked(p, tp[target]);
481 if (__predict_false(t == NULL)) {
482 error = ESRCH;
483 continue;
484 }
485
486 KASSERT(lwp_locked(t, NULL));
487
488 if (__predict_true(t->l_syncobj == &lwp_park_syncobj)) {
489 /* As expected it's parked, so wake it up. */
490 mp = t->l_mutex;
491 sleepq_remove(NULL, t, true);
492 mutex_spin_exit(mp);
493 } else if (__predict_false(t->l_stat == LSZOMB)) {
494 lwp_unlock(t);
495 error = ESRCH;
496 } else {
497 /*
498 * It hasn't parked yet because the wakeup side won
499 * the race, or something else has happened to make
500 * the thread not park. Why doesn't really matter.
501 * Set the operation pending, so that the next call
502 * to _lwp_park() in the LWP returns early. If it
503 * turns out to be a spurious wakeup, no harm done.
504 */
505 t->l_flag |= LW_UNPARKED;
506 lwp_unlock(t);
507 }
508 }
509 pserialize_read_exit(s);
510
511 return error;
512 }
513
514 int
515 lwp_park(clockid_t clock_id, int flags, struct timespec *ts)
516 {
517 int timo, error;
518 struct timespec start;
519 lwp_t *l;
520 bool timeremain = !(flags & TIMER_ABSTIME) && ts;
521
522 if (ts != NULL) {
523 if ((error = ts2timo(clock_id, flags, ts, &timo,
524 timeremain ? &start : NULL)) != 0)
525 return error;
526 KASSERT(timo != 0);
527 } else {
528 timo = 0;
529 }
530
531 /*
532 * Before going the full route and blocking, check to see if an
533 * unpark op is pending.
534 */
535 l = curlwp;
536 lwp_lock(l);
537 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
538 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
539 lwp_unlock(l);
540 return EALREADY;
541 }
542 sleepq_enqueue(NULL, l, "parked", &lwp_park_syncobj, true);
543 error = sleepq_block(timo, true, &lwp_park_syncobj, 0);
544 switch (error) {
545 case EWOULDBLOCK:
546 error = ETIMEDOUT;
547 if (timeremain)
548 memset(ts, 0, sizeof(*ts));
549 break;
550 case ERESTART:
551 error = EINTR;
552 /*FALLTHROUGH*/
553 default:
554 if (timeremain)
555 clock_timeleft(clock_id, ts, &start);
556 break;
557 }
558 return error;
559 }
560
561 /*
562 * 'park' an LWP waiting on a user-level synchronisation object. The LWP
563 * will remain parked until another LWP in the same process calls in and
564 * requests that it be unparked.
565 */
566 int
567 sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap,
568 register_t *retval)
569 {
570 /* {
571 syscallarg(clockid_t) clock_id;
572 syscallarg(int) flags;
573 syscallarg(struct timespec *) ts;
574 syscallarg(lwpid_t) unpark;
575 syscallarg(const void *) hint;
576 syscallarg(const void *) unparkhint;
577 } */
578 struct timespec ts, *tsp;
579 int error;
580
581 if (SCARG(uap, ts) == NULL)
582 tsp = NULL;
583 else {
584 error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
585 if (error != 0)
586 return error;
587 tsp = &ts;
588 }
589
590 if (SCARG(uap, unpark) != 0) {
591 error = lwp_unpark(&SCARG(uap, unpark), 1);
592 if (error != 0)
593 return error;
594 }
595
596 error = lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp);
597 if (SCARG(uap, ts) != NULL && (SCARG(uap, flags) & TIMER_ABSTIME) == 0)
598 (void)copyout(tsp, SCARG(uap, ts), sizeof(*tsp));
599 return error;
600 }
601
602 int
603 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap,
604 register_t *retval)
605 {
606 /* {
607 syscallarg(lwpid_t) target;
608 syscallarg(const void *) hint;
609 } */
610
611 return lwp_unpark(&SCARG(uap, target), 1);
612 }
613
614 int
615 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap,
616 register_t *retval)
617 {
618 /* {
619 syscallarg(const lwpid_t *) targets;
620 syscallarg(size_t) ntargets;
621 syscallarg(const void *) hint;
622 } */
623 lwpid_t targets[32], *tp;
624 int error;
625 u_int ntargets;
626 size_t sz;
627
628 ntargets = SCARG(uap, ntargets);
629 if (SCARG(uap, targets) == NULL) {
630 /*
631 * Let the caller know how much we are willing to do, and
632 * let it unpark the LWPs in blocks.
633 */
634 *retval = LWP_UNPARK_MAX;
635 return 0;
636 }
637 if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
638 return EINVAL;
639
640 /*
641 * Copy in the target array. If it's a small number of LWPs, then
642 * place the numbers on the stack.
643 */
644 sz = sizeof(lwpid_t) * ntargets;
645 if (sz <= sizeof(targets))
646 tp = targets;
647 else
648 tp = kmem_alloc(sz, KM_SLEEP);
649 error = copyin(SCARG(uap, targets), tp, sz);
650 if (error != 0) {
651 if (tp != targets) {
652 kmem_free(tp, sz);
653 }
654 return error;
655 }
656 error = lwp_unpark(tp, ntargets);
657 if (tp != targets)
658 kmem_free(tp, sz);
659 return error;
660 }
661
662 int
663 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap,
664 register_t *retval)
665 {
666 /* {
667 syscallarg(lwpid_t) target;
668 syscallarg(const char *) name;
669 } */
670 char *name, *oname;
671 lwpid_t target;
672 proc_t *p;
673 lwp_t *t;
674 int error;
675
676 if ((target = SCARG(uap, target)) == 0)
677 target = l->l_lid;
678
679 name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
680 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
681 switch (error) {
682 case ENAMETOOLONG:
683 case 0:
684 name[MAXCOMLEN - 1] = '\0';
685 break;
686 default:
687 kmem_free(name, MAXCOMLEN);
688 return error;
689 }
690
691 p = curproc;
692 mutex_enter(p->p_lock);
693 if ((t = lwp_find(p, target)) == NULL) {
694 mutex_exit(p->p_lock);
695 kmem_free(name, MAXCOMLEN);
696 return ESRCH;
697 }
698 lwp_lock(t);
699 oname = t->l_name;
700 t->l_name = name;
701 lwp_unlock(t);
702 mutex_exit(p->p_lock);
703
704 if (oname != NULL)
705 kmem_free(oname, MAXCOMLEN);
706
707 return 0;
708 }
709
710 int
711 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap,
712 register_t *retval)
713 {
714 /* {
715 syscallarg(lwpid_t) target;
716 syscallarg(char *) name;
717 syscallarg(size_t) len;
718 } */
719 char name[MAXCOMLEN];
720 lwpid_t target;
721 size_t len;
722 proc_t *p;
723 lwp_t *t;
724
725 if ((target = SCARG(uap, target)) == 0)
726 target = l->l_lid;
727
728 p = curproc;
729 mutex_enter(p->p_lock);
730 if ((t = lwp_find(p, target)) == NULL) {
731 mutex_exit(p->p_lock);
732 return ESRCH;
733 }
734 lwp_lock(t);
735 if (t->l_name == NULL)
736 name[0] = '\0';
737 else
738 strlcpy(name, t->l_name, sizeof(name));
739 lwp_unlock(t);
740 mutex_exit(p->p_lock);
741
742 len = uimin(SCARG(uap, len), sizeof(name));
743
744 return copyoutstr(name, SCARG(uap, name), len, NULL);
745 }
746
747 int
748 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap,
749 register_t *retval)
750 {
751 /* {
752 syscallarg(int) features;
753 syscallarg(struct lwpctl **) address;
754 } */
755 int error, features;
756 vaddr_t vaddr;
757
758 features = SCARG(uap, features);
759 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
760 if (features != 0)
761 return ENODEV;
762 if ((error = lwp_ctl_alloc(&vaddr)) != 0)
763 return error;
764 return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
765 }
766