sys_lwp.c revision 1.7 1 /* $NetBSD: sys_lwp.c,v 1.7 2007/02/26 09:20:54 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Lightweight process (LWP) system calls. See kern_lwp.c for a description
41 * of LWPs.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.7 2007/02/26 09:20:54 yamt Exp $");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/pool.h>
50 #include <sys/proc.h>
51 #include <sys/types.h>
52 #include <sys/syscallargs.h>
53 #include <sys/kauth.h>
54 #include <sys/kmem.h>
55 #include <sys/sleepq.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #define LWP_UNPARK_MAX 1024
60
61 syncobj_t lwp_park_sobj = {
62 SOBJ_SLEEPQ_SORTED,
63 sleepq_unsleep,
64 sleepq_changepri,
65 sleepq_lendpri,
66 syncobj_noowner,
67 };
68
69 sleeptab_t lwp_park_tab;
70
71 #ifdef LWP_COUNTERS
72 struct evcnt lwp_ev_park_early = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
73 NULL, "_lwp_park", "unparked early");
74 struct evcnt lwp_ev_park_raced = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
75 NULL, "_lwp_park", "raced");
76 struct evcnt lwp_ev_park_miss = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
77 NULL, "_lwp_park", "not parked");
78 struct evcnt lwp_ev_park_bcast = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
79 NULL, "_lwp_park", "broadcast unpark");
80 struct evcnt lwp_ev_park_targ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
81 NULL, "_lwp_park", "targeted unpark");
82 struct evcnt lwp_ev_park = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
83 NULL, "_lwp_park", "parked");
84
85 #define LWP_COUNT(ev, val) (ev).ev_count += (val) /* XXXSMP */
86 #else
87 #define LWP_COUNT(ev, val) /* nothing */
88 #endif
89
90 void
91 lwp_sys_init(void)
92 {
93 sleeptab_init(&lwp_park_tab);
94 #ifdef LWP_COUNTERS
95 evcnt_attach_static(&lwp_ev_park_early);
96 evcnt_attach_static(&lwp_ev_park_raced);
97 evcnt_attach_static(&lwp_ev_park_miss);
98 evcnt_attach_static(&lwp_ev_park_bcast);
99 evcnt_attach_static(&lwp_ev_park_targ);
100 evcnt_attach_static(&lwp_ev_park);
101 #endif
102 }
103
104 /* ARGSUSED */
105 int
106 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
107 {
108 struct sys__lwp_create_args /* {
109 syscallarg(const ucontext_t *) ucp;
110 syscallarg(u_long) flags;
111 syscallarg(lwpid_t *) new_lwp;
112 } */ *uap = v;
113 struct proc *p = l->l_proc;
114 struct lwp *l2;
115 vaddr_t uaddr;
116 bool inmem;
117 ucontext_t *newuc;
118 int error, lid;
119
120 newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
121
122 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
123 if (error) {
124 pool_put(&lwp_uc_pool, newuc);
125 return error;
126 }
127
128 /* XXX check against resource limits */
129
130 inmem = uvm_uarea_alloc(&uaddr);
131 if (__predict_false(uaddr == 0)) {
132 pool_put(&lwp_uc_pool, newuc);
133 return ENOMEM;
134 }
135
136 newlwp(l, p, uaddr, inmem,
137 SCARG(uap, flags) & LWP_DETACHED,
138 NULL, 0, p->p_emul->e_startlwp, newuc, &l2);
139
140 /*
141 * Set the new LWP running, unless the caller has requested that
142 * it be created in suspended state. If the process is stopping,
143 * then the LWP is created stopped.
144 */
145 mutex_enter(&p->p_smutex);
146 lwp_lock(l2);
147 lid = l2->l_lid;
148 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 &&
149 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) {
150 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0)
151 l2->l_stat = LSSTOP;
152 else {
153 LOCK_ASSERT(lwp_locked(l2, &sched_mutex));
154 p->p_nrlwps++;
155 l2->l_stat = LSRUN;
156 setrunqueue(l2);
157 }
158 } else
159 l2->l_stat = LSSUSPENDED;
160 lwp_unlock(l2);
161 mutex_exit(&p->p_smutex);
162
163 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
164 if (error)
165 return error;
166
167 return 0;
168 }
169
170 int
171 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
172 {
173
174 lwp_exit(l);
175 return 0;
176 }
177
178 int
179 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
180 {
181
182 *retval = l->l_lid;
183 return 0;
184 }
185
186 int
187 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
188 {
189
190 *retval = (uintptr_t)l->l_private;
191 return 0;
192 }
193
194 int
195 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
196 {
197 struct sys__lwp_setprivate_args /* {
198 syscallarg(void *) ptr;
199 } */ *uap = v;
200
201 l->l_private = SCARG(uap, ptr);
202 return 0;
203 }
204
205 int
206 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
207 {
208 struct sys__lwp_suspend_args /* {
209 syscallarg(lwpid_t) target;
210 } */ *uap = v;
211 struct proc *p = l->l_proc;
212 struct lwp *t;
213 int error;
214
215 mutex_enter(&p->p_smutex);
216 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
217 mutex_exit(&p->p_smutex);
218 return ESRCH;
219 }
220
221 /*
222 * Check for deadlock, which is only possible when we're suspending
223 * ourself. XXX There is a short race here, as p_nrlwps is only
224 * incremented when an LWP suspends itself on the kernel/user
225 * boundary. It's still possible to kill -9 the process so we
226 * don't bother checking further.
227 */
228 lwp_lock(t);
229 if ((t == l && p->p_nrlwps == 1) ||
230 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
231 lwp_unlock(t);
232 mutex_exit(&p->p_smutex);
233 return EDEADLK;
234 }
235
236 /*
237 * Suspend the LWP. XXX If it's on a different CPU, we should wait
238 * for it to be preempted, where it will put itself to sleep.
239 *
240 * Suspension of the current LWP will happen on return to userspace.
241 */
242 error = lwp_suspend(l, t);
243 mutex_exit(&p->p_smutex);
244
245 return error;
246 }
247
248 int
249 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
250 {
251 struct sys__lwp_continue_args /* {
252 syscallarg(lwpid_t) target;
253 } */ *uap = v;
254 int error;
255 struct proc *p = l->l_proc;
256 struct lwp *t;
257
258 error = 0;
259
260 mutex_enter(&p->p_smutex);
261 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
262 mutex_exit(&p->p_smutex);
263 return ESRCH;
264 }
265
266 lwp_lock(t);
267 lwp_continue(t);
268 mutex_exit(&p->p_smutex);
269
270 return error;
271 }
272
273 int
274 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
275 {
276 struct sys__lwp_wakeup_args /* {
277 syscallarg(lwpid_t) target;
278 } */ *uap = v;
279 struct lwp *t;
280 struct proc *p;
281 int error;
282
283 p = l->l_proc;
284 mutex_enter(&p->p_smutex);
285
286 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
287 mutex_exit(&p->p_smutex);
288 return ESRCH;
289 }
290
291 lwp_lock(t);
292
293 if (t->l_stat != LSSLEEP) {
294 error = ENODEV;
295 goto bad;
296 }
297
298 if ((t->l_flag & LW_SINTR) == 0) {
299 error = EBUSY;
300 goto bad;
301 }
302
303 /* wake it up setrunnable() will release the LWP lock. */
304 t->l_flag |= LW_CANCELLED;
305 setrunnable(t);
306 mutex_exit(&p->p_smutex);
307 return 0;
308
309 bad:
310 lwp_unlock(t);
311 mutex_exit(&p->p_smutex);
312 return error;
313 }
314
315 int
316 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
317 {
318 struct sys__lwp_wait_args /* {
319 syscallarg(lwpid_t) wait_for;
320 syscallarg(lwpid_t *) departed;
321 } */ *uap = v;
322 struct proc *p = l->l_proc;
323 int error;
324 lwpid_t dep;
325
326 mutex_enter(&p->p_smutex);
327 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
328 mutex_exit(&p->p_smutex);
329
330 if (error)
331 return error;
332
333 if (SCARG(uap, departed)) {
334 error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
335 if (error)
336 return error;
337 }
338
339 return 0;
340 }
341
342 /* ARGSUSED */
343 int
344 sys__lwp_kill(struct lwp *l, void *v, register_t *retval)
345 {
346 struct sys__lwp_kill_args /* {
347 syscallarg(lwpid_t) target;
348 syscallarg(int) signo;
349 } */ *uap = v;
350 struct proc *p = l->l_proc;
351 struct lwp *t;
352 ksiginfo_t ksi;
353 int signo = SCARG(uap, signo);
354 int error = 0;
355
356 if ((u_int)signo >= NSIG)
357 return EINVAL;
358
359 KSI_INIT(&ksi);
360 ksi.ksi_signo = signo;
361 ksi.ksi_code = SI_USER;
362 ksi.ksi_pid = p->p_pid;
363 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
364 ksi.ksi_lid = SCARG(uap, target);
365
366 mutex_enter(&proclist_mutex);
367 mutex_enter(&p->p_smutex);
368 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
369 error = ESRCH;
370 else if (signo != 0)
371 kpsignal2(p, &ksi);
372 mutex_exit(&p->p_smutex);
373 mutex_exit(&proclist_mutex);
374
375 return error;
376 }
377
378 int
379 sys__lwp_detach(struct lwp *l, void *v, register_t *retval)
380 {
381 struct sys__lwp_detach_args /* {
382 syscallarg(lwpid_t) target;
383 } */ *uap = v;
384 struct proc *p;
385 struct lwp *t;
386 lwpid_t target;
387 int error;
388
389 target = SCARG(uap, target);
390 p = l->l_proc;
391
392 mutex_enter(&p->p_smutex);
393
394 if (l->l_lid == target)
395 t = l;
396 else {
397 /*
398 * We can't use lwp_find() here because the target might
399 * be a zombie.
400 */
401 LIST_FOREACH(t, &p->p_lwps, l_sibling)
402 if (t->l_lid == target)
403 break;
404 }
405
406 /*
407 * If the LWP is already detached, there's nothing to do.
408 * If it's a zombie, we need to clean up after it. LSZOMB
409 * is visible with the proc mutex held.
410 *
411 * After we have detached or released the LWP, kick any
412 * other LWPs that may be sitting in _lwp_wait(), waiting
413 * for the target LWP to exit.
414 */
415 if (t != NULL && t->l_stat != LSIDL) {
416 if ((t->l_prflag & LPR_DETACHED) == 0) {
417 p->p_ndlwps++;
418 t->l_prflag |= LPR_DETACHED;
419 if (t->l_stat == LSZOMB) {
420 cv_broadcast(&p->p_lwpcv);
421 lwp_free(t, 0, 0); /* releases proc mutex */
422 return 0;
423 }
424 error = 0;
425 } else
426 error = EINVAL;
427 } else
428 error = ESRCH;
429
430 cv_broadcast(&p->p_lwpcv);
431 mutex_exit(&p->p_smutex);
432
433 return error;
434 }
435
436 static inline wchan_t
437 lwp_park_wchan(struct proc *p, const void *hint)
438 {
439 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
440 }
441
442 /*
443 * 'park' an LWP waiting on a user-level synchronisation object. The LWP
444 * will remain parked until another LWP in the same process calls in and
445 * requests that it be unparked.
446 */
447 int
448 sys__lwp_park(struct lwp *l, void *v, register_t *retval)
449 {
450 struct sys__lwp_park_args /* {
451 syscallarg(const struct timespec *) ts;
452 syscallarg(ucontext_t *) uc;
453 syscallarg(const void *) hint;
454 } */ *uap = v;
455 const struct timespec *tsp;
456 struct timespec ts, tsx;
457 struct timeval tv;
458 sleepq_t *sq;
459 wchan_t wchan;
460 int timo, error;
461
462 /* Fix up the given timeout value. */
463 if ((tsp = SCARG(uap, ts)) != NULL) {
464 if ((error = copyin(tsp, &ts, sizeof(ts))) != 0)
465 return error;
466 getnanotime(&tsx);
467 timespecsub(&ts, &tsx, &ts);
468 tv.tv_sec = ts.tv_sec;
469 tv.tv_usec = ts.tv_nsec / 1000;
470 if (tv.tv_sec < 0 || (tv.tv_sec == 0 && tv.tv_usec < 0))
471 return ETIMEDOUT;
472 if ((error = itimerfix(&tv)) != 0)
473 return error;
474 timo = tvtohz(&tv);
475 } else
476 timo = 0;
477
478 /* Find and lock the sleep queue. */
479 wchan = lwp_park_wchan(l->l_proc, SCARG(uap, hint));
480 sq = sleeptab_lookup(&lwp_park_tab, wchan);
481
482 /*
483 * Before going the full route and blocking, check to see if an
484 * unpark op is pending.
485 */
486 if ((l->l_flag & LW_CANCELLED) != 0) {
487 sleepq_lwp_lock(l);
488 l->l_flag &= ~LW_CANCELLED;
489 sleepq_lwp_unlock(l);
490 sleepq_unlock(sq);
491 LWP_COUNT(lwp_ev_park_early, 1);
492 return EALREADY;
493 }
494
495 /*
496 * For now we ignore the ucontext argument. In the future, we may
497 * put our stack up to be recycled. If it's binned, a trampoline
498 * function could call sleepq_unblock() on our behalf.
499 */
500 LWP_COUNT(lwp_ev_park, 1);
501 sleepq_enter(sq, l);
502 sleepq_block(sq, sched_kpri(l), wchan, "parked", timo, 1,
503 &lwp_park_sobj);
504 error = sleepq_unblock(timo, 1);
505 return error == EWOULDBLOCK ? ETIMEDOUT : error;
506 }
507
508 int
509 sys__lwp_unpark(struct lwp *l, void *v, register_t *retval)
510 {
511 struct sys__lwp_unpark_args /* {
512 syscallarg(lwpid_t) target;
513 syscallarg(const void *) hint;
514 } */ *uap = v;
515 struct proc *p;
516 struct lwp *t;
517 sleepq_t *sq;
518 lwpid_t target;
519 wchan_t wchan;
520 int swapin;
521
522 p = l->l_proc;
523 target = SCARG(uap, target);
524
525 /*
526 * Easy case: search for the LWP on the sleep queue. If
527 * it's parked, remove it from the queue and set running.
528 */
529 wchan = lwp_park_wchan(p, SCARG(uap, hint));
530 sq = sleeptab_lookup(&lwp_park_tab, wchan);
531
532 TAILQ_FOREACH(t, &sq->sq_queue, l_sleepchain)
533 if (t->l_proc == p && t->l_lid == target)
534 break;
535
536 if (t == NULL) {
537 /*
538 * The LWP hasn't parked yet. Take the hit
539 * and mark the operation as pending.
540 */
541 sleepq_unlock(sq);
542 mutex_enter(&p->p_smutex);
543 if ((t = lwp_find(p, target)) == NULL) {
544 mutex_exit(&p->p_smutex);
545 return ESRCH;
546 }
547 lwp_lock(t);
548 mutex_exit(&p->p_smutex);
549
550 if (t->l_sleepq == sq) {
551 /*
552 * We have raced, and the LWP is now parked.
553 * Wake it in the usual way.
554 */
555 KASSERT(t->l_syncobj == &lwp_park_sobj);
556 LOCK_ASSERT(lwp_locked(t, sq->sq_mutex));
557 LWP_COUNT(lwp_ev_park_raced, 1);
558 } else {
559 /*
560 * It many not have parked yet, or is parked
561 * on a different user sync object. The
562 * latter is an application error.
563 */
564 t->l_flag |= LW_CANCELLED;
565 lwp_unlock(t);
566 return 0;
567 }
568 }
569
570 swapin = sleepq_remove(sq, t);
571 sleepq_unlock(sq);
572 if (swapin)
573 uvm_kick_scheduler();
574 LWP_COUNT(lwp_ev_park_targ, 1);
575 return 0;
576 }
577
578 int
579 sys__lwp_unpark_all(struct lwp *l, void *v, register_t *retval)
580 {
581 struct sys__lwp_unpark_all_args /* {
582 syscallarg(const lwpid_t *) targets;
583 syscallarg(size_t) ntargets;
584 syscallarg(const void *) hint;
585 } */ *uap = v;
586 struct proc *p;
587 struct lwp *t;
588 sleepq_t *sq;
589 wchan_t wchan;
590 lwpid_t targets[32], *tp, *tpp, *tmax, target;
591 int swapin, error;
592 u_int ntargets, unparked;
593 size_t sz;
594
595 p = l->l_proc;
596 ntargets = SCARG(uap, ntargets);
597
598 if (SCARG(uap, targets) == NULL) {
599 /*
600 * Let the caller know how much we are willing to do, and
601 * let it unpark the LWPs in blocks.
602 */
603 *retval = LWP_UNPARK_MAX;
604 return 0;
605 }
606 if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
607 return EINVAL;
608
609 /*
610 * Copy in the target array. If it's a small number of LWPs, then
611 * place the numbers on the stack.
612 */
613 sz = sizeof(target) * ntargets;
614 if (sz <= sizeof(targets))
615 tp = targets;
616 else {
617 KERNEL_LOCK(1, l); /* XXXSMP */
618 tp = kmem_alloc(sz, KM_SLEEP);
619 KERNEL_UNLOCK_ONE(l); /* XXXSMP */
620 if (tp == NULL)
621 return ENOMEM;
622 }
623 error = copyin(SCARG(uap, targets), tp, sz);
624 if (error != 0) {
625 if (tp != targets) {
626 KERNEL_LOCK(1, l); /* XXXSMP */
627 kmem_free(tp, sz);
628 KERNEL_UNLOCK_ONE(l); /* XXXSMP */
629 }
630 return error;
631 }
632
633 unparked = 0;
634 swapin = 0;
635 wchan = lwp_park_wchan(p, SCARG(uap, hint));
636 sq = sleeptab_lookup(&lwp_park_tab, wchan);
637
638 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
639 target = *tpp;
640
641 /*
642 * Easy case: search for the LWP on the sleep queue. If
643 * it's parked, remove it from the queue and set running.
644 */
645 TAILQ_FOREACH(t, &sq->sq_queue, l_sleepchain)
646 if (t->l_proc == p && t->l_lid == target)
647 break;
648
649 if (t != NULL) {
650 swapin |= sleepq_remove(sq, t);
651 unparked++;
652 continue;
653 }
654
655 /*
656 * The LWP hasn't parked yet. Take the hit and
657 * mark the operation as pending.
658 */
659 sleepq_unlock(sq);
660 mutex_enter(&p->p_smutex);
661 if ((t = lwp_find(p, target)) == NULL) {
662 mutex_exit(&p->p_smutex);
663 sleepq_lock(sq);
664 continue;
665 }
666 lwp_lock(t);
667 mutex_exit(&p->p_smutex);
668
669 if (t->l_sleepq == sq) {
670 /*
671 * We have raced, and the LWP is now parked.
672 * Wake it in the usual way.
673 */
674 KASSERT(t->l_syncobj == &lwp_park_sobj);
675 LOCK_ASSERT(lwp_locked(t, sq->sq_mutex));
676 LWP_COUNT(lwp_ev_park_raced, 1);
677 swapin |= sleepq_remove(sq, t);
678 unparked++;
679 } else {
680 /*
681 * It many not have parked yet, or is parked
682 * on a different user sync object. The
683 * latter is an application error.
684 */
685 t->l_flag |= LW_CANCELLED;
686 lwp_unlock(t);
687 sleepq_lock(sq);
688 }
689 }
690
691 sleepq_unlock(sq);
692 if (tp != targets) {
693 KERNEL_LOCK(1, l); /* XXXSMP */
694 kmem_free(tp, sz);
695 KERNEL_UNLOCK_ONE(l); /* XXXSMP */
696 }
697 if (swapin)
698 uvm_kick_scheduler();
699 LWP_COUNT(lwp_ev_park_bcast, unparked);
700 LWP_COUNT(lwp_ev_park_miss, (ntargets - unparked));
701 /* XXXAD return unparked; */
702 return 0;
703 }
704