sys_lwp.c revision 1.1.2.9 1 /* $NetBSD: sys_lwp.c,v 1.1.2.9 2007/01/25 20:20:28 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Lightweight process (LWP) system calls. See kern_lwp.c for a description
41 * of LWPs.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.1.2.9 2007/01/25 20:20:28 ad Exp $");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/pool.h>
50 #include <sys/proc.h>
51 #include <sys/sa.h>
52 #include <sys/savar.h>
53 #include <sys/types.h>
54 #include <sys/syscallargs.h>
55 #include <sys/kauth.h>
56 #include <sys/kmem.h>
57 #include <sys/sleepq.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #define LWP_UNPARK_MAX 1024
62
63 syncobj_t lwp_park_sobj = {
64 SOBJ_SLEEPQ_SORTED,
65 sleepq_unsleep,
66 sleepq_changepri
67 };
68
69 sleeptab_t lwp_park_tab;
70
71 #ifdef LWP_COUNTERS
72 struct evcnt lwp_ev_park_early = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
73 NULL, "_lwp_park", "unparked early");
74 struct evcnt lwp_ev_park_raced = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
75 NULL, "_lwp_park", "raced");
76 struct evcnt lwp_ev_park_miss = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
77 NULL, "_lwp_park", "not parked");
78 struct evcnt lwp_ev_park_bcast = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
79 NULL, "_lwp_park", "broadcast unpark");
80 struct evcnt lwp_ev_park_targ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
81 NULL, "_lwp_park", "targeted unpark");
82 struct evcnt lwp_ev_park = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
83 NULL, "_lwp_park", "parked");
84
85 #define LWP_COUNT(ev, val) (ev).ev_count += (val) /* XXXSMP */
86 #else
87 #define LWP_COUNT(ev, val) /* nothing */
88 #endif
89
90 void
91 lwp_sys_init(void)
92 {
93 sleeptab_init(&lwp_park_tab);
94 #ifdef LWP_COUNTERS
95 evcnt_attach_static(&lwp_ev_park_early);
96 evcnt_attach_static(&lwp_ev_park_raced);
97 evcnt_attach_static(&lwp_ev_park_miss);
98 evcnt_attach_static(&lwp_ev_park_bcast);
99 evcnt_attach_static(&lwp_ev_park_targ);
100 evcnt_attach_static(&lwp_ev_park);
101 #endif
102 }
103
104 /* ARGSUSED */
105 int
106 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
107 {
108 struct sys__lwp_create_args /* {
109 syscallarg(const ucontext_t *) ucp;
110 syscallarg(u_long) flags;
111 syscallarg(lwpid_t *) new_lwp;
112 } */ *uap = v;
113 struct proc *p = l->l_proc;
114 struct lwp *l2;
115 vaddr_t uaddr;
116 boolean_t inmem;
117 ucontext_t *newuc;
118 int error, lid;
119
120 mutex_enter(&p->p_smutex);
121 if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) {
122 mutex_exit(&p->p_smutex);
123 return EINVAL;
124 }
125 p->p_sflag |= PS_NOSA;
126 mutex_exit(&p->p_smutex);
127
128 newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
129
130 error = copyin(SCARG(uap, ucp), newuc,
131 l->l_proc->p_emul->e_sa->sae_ucsize);
132 if (error) {
133 pool_put(&lwp_uc_pool, newuc);
134 return error;
135 }
136
137 /* XXX check against resource limits */
138
139 inmem = uvm_uarea_alloc(&uaddr);
140 if (__predict_false(uaddr == 0)) {
141 pool_put(&lwp_uc_pool, newuc);
142 return ENOMEM;
143 }
144
145 newlwp(l, p, uaddr, inmem,
146 SCARG(uap, flags) & LWP_DETACHED,
147 NULL, 0, startlwp, newuc, &l2);
148
149 /*
150 * Set the new LWP running, unless the caller has requested that
151 * it be created in suspended state. If the process is stopping,
152 * then the LWP is created stopped.
153 */
154 mutex_enter(&p->p_smutex);
155 lwp_lock(l2);
156 lid = l2->l_lid;
157 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 &&
158 (l->l_flag & (L_WREBOOT | L_WSUSPEND | L_WEXIT)) == 0) {
159 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0)
160 l2->l_stat = LSSTOP;
161 else {
162 LOCK_ASSERT(lwp_locked(l2, &sched_mutex));
163 p->p_nrlwps++;
164 l2->l_stat = LSRUN;
165 setrunqueue(l2);
166 }
167 } else
168 l2->l_stat = LSSUSPENDED;
169 lwp_unlock(l2);
170 mutex_exit(&p->p_smutex);
171
172 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
173 if (error)
174 return error;
175
176 return 0;
177 }
178
179 int
180 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
181 {
182
183 lwp_exit(l);
184 return 0;
185 }
186
187 int
188 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
189 {
190
191 *retval = l->l_lid;
192 return 0;
193 }
194
195 int
196 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
197 {
198
199 *retval = (uintptr_t)l->l_private;
200 return 0;
201 }
202
203 int
204 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
205 {
206 struct sys__lwp_setprivate_args /* {
207 syscallarg(void *) ptr;
208 } */ *uap = v;
209
210 l->l_private = SCARG(uap, ptr);
211 return 0;
212 }
213
214 int
215 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
216 {
217 struct sys__lwp_suspend_args /* {
218 syscallarg(lwpid_t) target;
219 } */ *uap = v;
220 struct proc *p = l->l_proc;
221 struct lwp *t;
222 int error;
223
224 mutex_enter(&p->p_smutex);
225 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
226 mutex_exit(&p->p_smutex);
227 return ESRCH;
228 }
229
230 /*
231 * Check for deadlock, which is only possible when we're suspending
232 * ourself. XXX There is a short race here, as p_nrlwps is only
233 * incremented when an LWP suspends itself on the kernel/user
234 * boundary. It's still possible to kill -9 the process so we
235 * don't bother checking further.
236 */
237 lwp_lock(t);
238 if ((t == l && p->p_nrlwps == 1) ||
239 (l->l_flag & (L_WCORE | L_WEXIT)) != 0) {
240 lwp_unlock(t);
241 mutex_exit(&p->p_smutex);
242 return EDEADLK;
243 }
244
245 /*
246 * Suspend the LWP. XXX If it's on a different CPU, we should wait
247 * for it to be preempted, where it will put itself to sleep.
248 *
249 * Suspension of the current LWP will happen on return to userspace.
250 */
251 error = lwp_suspend(l, t);
252 mutex_exit(&p->p_smutex);
253
254 return error;
255 }
256
257 int
258 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
259 {
260 struct sys__lwp_continue_args /* {
261 syscallarg(lwpid_t) target;
262 } */ *uap = v;
263 int error;
264 struct proc *p = l->l_proc;
265 struct lwp *t;
266
267 error = 0;
268
269 mutex_enter(&p->p_smutex);
270
271 if ((p->p_sflag & PS_SA) != 0 || p->p_sa != NULL) {
272 mutex_exit(&p->p_smutex);
273 return EINVAL;
274 }
275
276 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
277 mutex_exit(&p->p_smutex);
278 return ESRCH;
279 }
280
281 lwp_lock(t);
282 lwp_continue(t);
283 mutex_exit(&p->p_smutex);
284
285 return error;
286 }
287
288 int
289 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
290 {
291 struct sys__lwp_wakeup_args /* {
292 syscallarg(lwpid_t) target;
293 } */ *uap = v;
294 struct lwp *t;
295 struct proc *p;
296 int error;
297
298 p = l->l_proc;
299 mutex_enter(&p->p_smutex);
300
301 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
302 mutex_exit(&p->p_smutex);
303 return ESRCH;
304 }
305
306 lwp_lock(t);
307
308 if (t->l_stat != LSSLEEP) {
309 error = ENODEV;
310 goto bad;
311 }
312
313 if ((t->l_flag & L_SINTR) == 0) {
314 error = EBUSY;
315 goto bad;
316 }
317
318 /* wake it up setrunnable() will release the LWP lock. */
319 t->l_flag |= L_CANCELLED;
320 setrunnable(t);
321 mutex_exit(&p->p_smutex);
322 return 0;
323
324 bad:
325 lwp_unlock(t);
326 mutex_exit(&p->p_smutex);
327 return error;
328 }
329
330 int
331 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
332 {
333 struct sys__lwp_wait_args /* {
334 syscallarg(lwpid_t) wait_for;
335 syscallarg(lwpid_t *) departed;
336 } */ *uap = v;
337 struct proc *p = l->l_proc;
338 int error;
339 lwpid_t dep;
340
341 mutex_enter(&p->p_smutex);
342 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
343 mutex_exit(&p->p_smutex);
344
345 if (error)
346 return error;
347
348 if (SCARG(uap, departed)) {
349 error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
350 if (error)
351 return error;
352 }
353
354 return 0;
355 }
356
357 /* ARGSUSED */
358 int
359 sys__lwp_kill(struct lwp *l, void *v, register_t *retval)
360 {
361 struct sys__lwp_kill_args /* {
362 syscallarg(lwpid_t) target;
363 syscallarg(int) signo;
364 } */ *uap = v;
365 struct proc *p = l->l_proc;
366 struct lwp *t;
367 ksiginfo_t ksi;
368 int signo = SCARG(uap, signo);
369 int error = 0;
370
371 if ((u_int)signo >= NSIG)
372 return EINVAL;
373
374 KSI_INIT(&ksi);
375 ksi.ksi_signo = signo;
376 ksi.ksi_code = SI_USER;
377 ksi.ksi_pid = p->p_pid;
378 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
379 ksi.ksi_lid = SCARG(uap, target);
380
381 mutex_enter(&proclist_mutex);
382 mutex_enter(&p->p_smutex);
383 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
384 error = ESRCH;
385 else if (signo != 0)
386 kpsignal2(p, &ksi);
387 mutex_exit(&p->p_smutex);
388 mutex_exit(&proclist_mutex);
389
390 return error;
391 }
392
393 int
394 sys__lwp_detach(struct lwp *l, void *v, register_t *retval)
395 {
396 struct sys__lwp_detach_args /* {
397 syscallarg(lwpid_t) target;
398 } */ *uap = v;
399 struct proc *p;
400 struct lwp *t;
401 lwpid_t target;
402 int error;
403
404 target = SCARG(uap, target);
405 p = l->l_proc;
406
407 mutex_enter(&p->p_smutex);
408
409 if (l->l_lid == target)
410 t = l;
411 else {
412 /*
413 * We can't use lwp_find() here because the target might
414 * be a zombie.
415 */
416 LIST_FOREACH(t, &p->p_lwps, l_sibling)
417 if (t->l_lid == target)
418 break;
419 }
420
421 /*
422 * If the LWP is already detached, there's nothing to do.
423 * If it's a zombie, we need to clean up after it. LSZOMB
424 * is visible with the proc mutex held.
425 *
426 * After we have detached or released the LWP, kick any
427 * other LWPs that may be sitting in _lwp_wait(), waiting
428 * for the target LWP to exit.
429 */
430 if (t != NULL && t->l_stat != LSIDL) {
431 if ((t->l_prflag & LPR_DETACHED) == 0) {
432 p->p_ndlwps++;
433 t->l_prflag |= LPR_DETACHED;
434 if (t->l_stat == LSZOMB) {
435 lwp_free(t, 0, 0); /* releases proc mutex */
436 cv_broadcast(&p->p_lwpcv);
437 return 0;
438 }
439 error = 0;
440 } else
441 error = EINVAL;
442 } else
443 error = ESRCH;
444
445 mutex_exit(&p->p_smutex);
446 cv_broadcast(&p->p_lwpcv);
447
448 return error;
449 }
450
451 static inline wchan_t
452 lwp_park_wchan(struct proc *p, const void *hint)
453 {
454 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
455 }
456
457 /*
458 * 'park' an LWP waiting on a user-level synchronisation object. The LWP
459 * will remain parked until another LWP in the same process calls in and
460 * requests that it be unparked.
461 */
462 int
463 sys__lwp_park(struct lwp *l, void *v, register_t *retval)
464 {
465 struct sys__lwp_park_args /* {
466 syscallarg(const struct timespec *) ts;
467 syscallarg(ucontext_t *) uc;
468 syscallarg(const void *) hint;
469 } */ *uap = v;
470 const struct timespec *tsp;
471 struct timespec ts, tsx;
472 struct timeval tv;
473 sleepq_t *sq;
474 wchan_t wchan;
475 int timo, error;
476
477 /* Fix up the given timeout value. */
478 if ((tsp = SCARG(uap, ts)) != NULL) {
479 if ((error = copyin(tsp, &ts, sizeof(ts))) != 0)
480 return error;
481 getnanotime(&tsx);
482 timespecsub(&ts, &tsx, &ts);
483 tv.tv_sec = ts.tv_sec;
484 tv.tv_usec = ts.tv_nsec / 1000;
485 if (tv.tv_sec < 0 || (tv.tv_sec == 0 && tv.tv_usec < 0))
486 return ETIMEDOUT;
487 if ((error = itimerfix(&tv)) != 0)
488 return error;
489 timo = tvtohz(&tv);
490 } else
491 timo = 0;
492
493 /* Find and lock the sleep queue. */
494 wchan = lwp_park_wchan(l->l_proc, SCARG(uap, hint));
495 sq = sleeptab_lookup(&lwp_park_tab, wchan);
496
497 /*
498 * Before going the full route and blocking, check to see if an
499 * unpark op is pending.
500 */
501 if ((l->l_flag & L_CANCELLED) != 0) {
502 sleepq_lwp_lock(l);
503 l->l_flag &= ~L_CANCELLED;
504 sleepq_lwp_unlock(l);
505 sleepq_unlock(sq);
506 LWP_COUNT(lwp_ev_park_early, 1);
507 return EALREADY;
508 }
509
510 /*
511 * For now we ignore the ucontext argument. In the future, we may
512 * put our stack up to be recycled. If it's binned, a trampoline
513 * function could call sleepq_unblock() on our behalf.
514 */
515 LWP_COUNT(lwp_ev_park, 1);
516 sleepq_enter(sq, l);
517 sleepq_block(sq, sched_kpri(l), wchan, "parked", timo, 1,
518 &lwp_park_sobj);
519 error = sleepq_unblock(timo, 1);
520 return error == EWOULDBLOCK ? ETIMEDOUT : error;
521 }
522
523 int
524 sys__lwp_unpark(struct lwp *l, void *v, register_t *retval)
525 {
526 struct sys__lwp_unpark_args /* {
527 syscallarg(lwpid_t) target;
528 syscallarg(const void *) hint;
529 } */ *uap = v;
530 struct proc *p;
531 struct lwp *t;
532 sleepq_t *sq;
533 lwpid_t target;
534 wchan_t wchan;
535 int swapin;
536
537 p = l->l_proc;
538 target = SCARG(uap, target);
539
540 /*
541 * Easy case: search for the LWP on the sleep queue. If
542 * it's parked, remove it from the queue and set running.
543 */
544 wchan = lwp_park_wchan(p, SCARG(uap, hint));
545 sq = sleeptab_lookup(&lwp_park_tab, wchan);
546
547 TAILQ_FOREACH(t, &sq->sq_queue, l_sleepchain)
548 if (t->l_proc == p && t->l_lid == target)
549 break;
550
551 if (t == NULL) {
552 /*
553 * The LWP hasn't parked yet. Take the hit
554 * and mark the operation as pending.
555 */
556 sleepq_unlock(sq);
557 mutex_enter(&p->p_smutex);
558 if ((t = lwp_find(p, target)) == NULL) {
559 mutex_exit(&p->p_smutex);
560 return ESRCH;
561 }
562 lwp_lock(t);
563 mutex_exit(&p->p_smutex);
564
565 if (t->l_sleepq == sq) {
566 /*
567 * We have raced, and the LWP is now parked.
568 * Wake it in the usual way.
569 */
570 KASSERT(t->l_syncobj == &lwp_park_sobj);
571 LOCK_ASSERT(lwp_locked(t, sq->sq_mutex));
572 LWP_COUNT(lwp_ev_park_raced, 1);
573 } else {
574 /*
575 * It many not have parked yet, or is parked
576 * on a different user sync object. The
577 * latter is an application error.
578 */
579 t->l_flag |= L_CANCELLED;
580 lwp_unlock(t);
581 return 0;
582 }
583 }
584
585 swapin = sleepq_remove(sq, t);
586 sleepq_unlock(sq);
587 if (swapin)
588 wakeup(&proc0);
589 LWP_COUNT(lwp_ev_park_targ, 1);
590 return 0;
591 }
592
593 int
594 sys__lwp_unpark_all(struct lwp *l, void *v, register_t *retval)
595 {
596 struct sys__lwp_unpark_all_args /* {
597 syscallarg(const lwpid_t *) targets;
598 syscallarg(size_t) ntargets;
599 syscallarg(const void *) hint;
600 } */ *uap = v;
601 struct proc *p;
602 struct lwp *t;
603 sleepq_t *sq;
604 wchan_t wchan;
605 lwpid_t targets[32], *tp, *tpp, *tmax, target;
606 int swapin, error;
607 u_int ntargets, unparked;
608 size_t sz;
609
610 p = l->l_proc;
611 ntargets = SCARG(uap, ntargets);
612
613 if (SCARG(uap, targets) == NULL) {
614 /*
615 * Let the caller know how much we are willing to do, and
616 * let it unpark the LWPs in blocks.
617 */
618 *retval = LWP_UNPARK_MAX;
619 return 0;
620 }
621 if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
622 return EINVAL;
623
624 /*
625 * Copy in the target array. If it's a small number of LWPs, then
626 * place the numbers on the stack.
627 */
628 sz = sizeof(target) * ntargets;
629 if (sz <= sizeof(targets))
630 tp = targets;
631 else if ((tp = kmem_alloc(sz, KM_SLEEP)) == NULL)
632 return ENOMEM;
633 error = copyin(SCARG(uap, targets), tp, sz);
634 if (error != 0) {
635 if (tp != targets)
636 kmem_free(tp, sz);
637 return error;
638 }
639
640 unparked = 0;
641 swapin = 0;
642 wchan = lwp_park_wchan(p, SCARG(uap, hint));
643 sq = sleeptab_lookup(&lwp_park_tab, wchan);
644
645 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
646 target = *tpp;
647
648 /*
649 * Easy case: search for the LWP on the sleep queue. If
650 * it's parked, remove it from the queue and set running.
651 */
652 TAILQ_FOREACH(t, &sq->sq_queue, l_sleepchain)
653 if (t->l_proc == p && t->l_lid == target)
654 break;
655
656 if (t != NULL) {
657 swapin |= sleepq_remove(sq, t);
658 unparked++;
659 continue;
660 }
661
662 /*
663 * The LWP hasn't parked yet. Take the hit and
664 * mark the operation as pending.
665 */
666 sleepq_unlock(sq);
667 mutex_enter(&p->p_smutex);
668 if ((t = lwp_find(p, target)) == NULL) {
669 mutex_exit(&p->p_smutex);
670 sleepq_lock(sq);
671 continue;
672 }
673 lwp_lock(t);
674 mutex_exit(&p->p_smutex);
675
676 if (t->l_sleepq == sq) {
677 /*
678 * We have raced, and the LWP is now parked.
679 * Wake it in the usual way.
680 */
681 KASSERT(t->l_syncobj == &lwp_park_sobj);
682 LOCK_ASSERT(lwp_locked(t, sq->sq_mutex));
683 LWP_COUNT(lwp_ev_park_raced, 1);
684 swapin |= sleepq_remove(sq, t);
685 unparked++;
686 } else {
687 /*
688 * It many not have parked yet, or is parked
689 * on a different user sync object. The
690 * latter is an application error.
691 */
692 t->l_flag |= L_CANCELLED;
693 lwp_unlock(t);
694 sleepq_lock(sq);
695 }
696 }
697
698 sleepq_unlock(sq);
699 if (tp != targets)
700 kmem_free(tp, sz);
701 if (swapin)
702 wakeup(&proc0);
703 LWP_COUNT(lwp_ev_park_bcast, unparked);
704 LWP_COUNT(lwp_ev_park_miss, (ntargets - unparked));
705 /* XXXAD return unparked; */
706 return 0;
707 }
708