rump.c revision 1.64 1 /* $NetBSD: rump.c,v 1.64 2008/10/10 13:14:41 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/atomic.h>
32 #include <sys/cpu.h>
33 #include <sys/filedesc.h>
34 #include <sys/kauth.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/mount.h>
38 #include <sys/namei.h>
39 #include <sys/queue.h>
40 #include <sys/resourcevar.h>
41 #include <sys/select.h>
42 #include <sys/vnode.h>
43 #include <sys/vfs_syscalls.h>
44 #include <sys/wapbl.h>
45 #include <sys/sysctl.h>
46
47 #include <miscfs/specfs/specdev.h>
48
49 #include <rump/rumpuser.h>
50
51 #include "rump_private.h"
52
53 struct proc proc0;
54 struct cwdinfo rump_cwdi;
55 struct pstats rump_stats;
56 struct plimit rump_limits;
57 struct cpu_info rump_cpu;
58 struct filedesc rump_filedesc0;
59 struct proclist allproc;
60 char machine[] = "rump";
61 static kauth_cred_t rump_susercred;
62
63 kmutex_t rump_giantlock;
64
65 sigset_t sigcantmask;
66
67 #ifdef RUMP_WITHOUT_THREADS
68 int rump_threads = 0;
69 #else
70 int rump_threads = 1;
71 #endif
72
73 struct fakeblk {
74 char path[MAXPATHLEN];
75 LIST_ENTRY(fakeblk) entries;
76 };
77
78 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
79
80 static void
81 rump_aiodone_worker(struct work *wk, void *dummy)
82 {
83 struct buf *bp = (struct buf *)wk;
84
85 KASSERT(&bp->b_work == wk);
86 bp->b_iodone(bp);
87 }
88
89 static int rump_inited;
90 static struct emul emul_rump;
91
92 int
93 _rump_init(int rump_version)
94 {
95 extern char hostname[];
96 extern size_t hostnamelen;
97 char buf[256];
98 struct proc *p;
99 struct lwp *l;
100 int error;
101
102 /* XXX */
103 if (rump_inited)
104 return 0;
105 rump_inited = 1;
106
107 if (rump_version != RUMP_VERSION) {
108 printf("rump version mismatch, %d vs. %d\n",
109 rump_version, RUMP_VERSION);
110 return EPROGMISMATCH;
111 }
112
113 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
114 desiredvnodes = strtoul(buf, NULL, 10);
115 } else {
116 desiredvnodes = 1<<16;
117 }
118 if (rumpuser_getenv("RUMP_THREADS", buf, sizeof(buf), &error) == 0) {
119 rump_threads = *buf != '0';
120 }
121
122 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
123
124 rumpvm_init();
125 rump_sleepers_init();
126 #ifdef RUMP_USE_REAL_KMEM
127 kmem_init();
128 #endif
129
130 kauth_init();
131 rump_susercred = rump_cred_create(0, 0, 0, NULL);
132
133 cache_cpu_init(&rump_cpu);
134 rw_init(&rump_cwdi.cwdi_lock);
135
136 l = &lwp0;
137 p = &proc0;
138 p->p_stats = &rump_stats;
139 p->p_cwdi = &rump_cwdi;
140 p->p_limit = &rump_limits;
141 p->p_pid = 0;
142 p->p_fd = &rump_filedesc0;
143 p->p_vmspace = &rump_vmspace;
144 p->p_emul = &emul_rump;
145 l->l_cred = rump_cred_suserget();
146 l->l_proc = p;
147 l->l_lid = 1;
148 LIST_INSERT_HEAD(&allproc, p, p_list);
149
150 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
151 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
152
153 syncdelay = 0;
154 dovfsusermount = 1;
155
156 rumpuser_thrinit();
157
158 fd_sys_init();
159 module_init();
160 sysctl_init();
161 vfsinit();
162 bufinit();
163 wapbl_init();
164 softint_init(&rump_cpu);
165
166 rumpvfs_init();
167
168 /* aieeeedondest */
169 if (rump_threads) {
170 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
171 rump_aiodone_worker, NULL, 0, 0, 0))
172 panic("aiodoned");
173 }
174
175 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
176 hostnamelen = strlen(hostname);
177
178 sigemptyset(&sigcantmask);
179
180 lwp0.l_fd = proc0.p_fd = fd_init(&rump_filedesc0);
181 rump_cwdi.cwdi_cdir = rootvnode;
182
183 return 0;
184 }
185
186 struct mount *
187 rump_mnt_init(struct vfsops *vfsops, int mntflags)
188 {
189 struct mount *mp;
190
191 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
192
193 mp->mnt_op = vfsops;
194 mp->mnt_flag = mntflags;
195 TAILQ_INIT(&mp->mnt_vnodelist);
196 rw_init(&mp->mnt_unmounting);
197 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
198 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
199 mp->mnt_refcnt = 1;
200
201 mount_initspecific(mp);
202
203 return mp;
204 }
205
206 int
207 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
208 {
209 struct vnode *rvp;
210 int rv;
211
212 rv = VFS_MOUNT(mp, path, data, dlen);
213 if (rv)
214 return rv;
215
216 (void) VFS_STATVFS(mp, &mp->mnt_stat);
217 rv = VFS_START(mp, 0);
218 if (rv)
219 VFS_UNMOUNT(mp, MNT_FORCE);
220
221 /*
222 * XXX: set a root for lwp0. This is strictly not correct,
223 * but makes things works for single fs case without having
224 * to manually call rump_rcvp_set().
225 */
226 VFS_ROOT(mp, &rvp);
227 rump_rcvp_set(rvp, rvp);
228 vput(rvp);
229
230 return rv;
231 }
232
233 void
234 rump_mnt_destroy(struct mount *mp)
235 {
236
237 mount_finispecific(mp);
238 kmem_free(mp, sizeof(*mp));
239 }
240
241 struct componentname *
242 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
243 kauth_cred_t creds, struct lwp *l)
244 {
245 struct componentname *cnp;
246 const char *cp = NULL;
247
248 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
249
250 cnp->cn_nameiop = nameiop;
251 cnp->cn_flags = flags | HASBUF;
252
253 cnp->cn_pnbuf = PNBUF_GET();
254 strcpy(cnp->cn_pnbuf, name);
255 cnp->cn_nameptr = cnp->cn_pnbuf;
256 cnp->cn_namelen = namelen;
257 cnp->cn_hash = namei_hash(name, &cp);
258
259 cnp->cn_cred = creds;
260
261 return cnp;
262 }
263
264 void
265 rump_freecn(struct componentname *cnp, int flags)
266 {
267
268 if (flags & RUMPCN_FREECRED)
269 rump_cred_destroy(cnp->cn_cred);
270
271 if ((flags & RUMPCN_HASNTBUF) == 0) {
272 if (cnp->cn_flags & SAVENAME) {
273 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
274 PNBUF_PUT(cnp->cn_pnbuf);
275 } else {
276 PNBUF_PUT(cnp->cn_pnbuf);
277 }
278 }
279 kmem_free(cnp, sizeof(*cnp));
280 }
281
282 /* hey baby, what's your namei? */
283 int
284 rump_namei(uint32_t op, uint32_t flags, const char *namep,
285 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
286 {
287 struct nameidata nd;
288 int rv;
289
290 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
291 rv = namei(&nd);
292 if (rv)
293 return rv;
294
295 if (dvpp) {
296 KASSERT(flags & LOCKPARENT);
297 *dvpp = nd.ni_dvp;
298 } else {
299 KASSERT((flags & LOCKPARENT) == 0);
300 }
301
302 if (vpp) {
303 *vpp = nd.ni_vp;
304 } else {
305 if (nd.ni_vp) {
306 if (flags & LOCKLEAF)
307 vput(nd.ni_vp);
308 else
309 vrele(nd.ni_vp);
310 }
311 }
312
313 if (cnpp) {
314 struct componentname *cnp;
315
316 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
317 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
318 *cnpp = cnp;
319 } else if (nd.ni_cnd.cn_flags & HASBUF) {
320 panic("%s: pathbuf mismatch", __func__);
321 }
322
323 return rv;
324 }
325
326 static struct fakeblk *
327 _rump_fakeblk_find(const char *path)
328 {
329 char buf[MAXPATHLEN];
330 struct fakeblk *fblk;
331 int error;
332
333 if (rumpuser_realpath(path, buf, &error) == NULL)
334 return NULL;
335
336 LIST_FOREACH(fblk, &fakeblks, entries)
337 if (strcmp(fblk->path, buf) == 0)
338 return fblk;
339
340 return NULL;
341 }
342
343 int
344 rump_fakeblk_register(const char *path)
345 {
346 char buf[MAXPATHLEN];
347 struct fakeblk *fblk;
348 int error;
349
350 if (_rump_fakeblk_find(path))
351 return EEXIST;
352
353 if (rumpuser_realpath(path, buf, &error) == NULL)
354 return error;
355
356 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
357 if (fblk == NULL)
358 return ENOMEM;
359
360 strlcpy(fblk->path, buf, MAXPATHLEN);
361 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
362
363 return 0;
364 }
365
366 int
367 rump_fakeblk_find(const char *path)
368 {
369
370 return _rump_fakeblk_find(path) != NULL;
371 }
372
373 void
374 rump_fakeblk_deregister(const char *path)
375 {
376 struct fakeblk *fblk;
377
378 fblk = _rump_fakeblk_find(path);
379 if (fblk == NULL)
380 return;
381
382 LIST_REMOVE(fblk, entries);
383 kmem_free(fblk, sizeof(*fblk));
384 }
385
386 void
387 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
388 {
389
390 *vtype = vp->v_type;
391 *vsize = vp->v_size;
392 if (vp->v_specnode)
393 *vdev = vp->v_rdev;
394 else
395 *vdev = 0;
396 }
397
398 struct vfsops *
399 rump_vfslist_iterate(struct vfsops *ops)
400 {
401
402 if (ops == NULL)
403 return LIST_FIRST(&vfs_list);
404 else
405 return LIST_NEXT(ops, vfs_list);
406 }
407
408 struct vfsops *
409 rump_vfs_getopsbyname(const char *name)
410 {
411
412 return vfs_getopsbyname(name);
413 }
414
415 struct vattr*
416 rump_vattr_init()
417 {
418 struct vattr *vap;
419
420 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
421 vattr_null(vap);
422
423 return vap;
424 }
425
426 void
427 rump_vattr_settype(struct vattr *vap, enum vtype vt)
428 {
429
430 vap->va_type = vt;
431 }
432
433 void
434 rump_vattr_setmode(struct vattr *vap, mode_t mode)
435 {
436
437 vap->va_mode = mode;
438 }
439
440 void
441 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
442 {
443
444 vap->va_rdev = dev;
445 }
446
447 void
448 rump_vattr_free(struct vattr *vap)
449 {
450
451 kmem_free(vap, sizeof(*vap));
452 }
453
454 void
455 rump_vp_incref(struct vnode *vp)
456 {
457
458 mutex_enter(&vp->v_interlock);
459 ++vp->v_usecount;
460 mutex_exit(&vp->v_interlock);
461 }
462
463 int
464 rump_vp_getref(struct vnode *vp)
465 {
466
467 return vp->v_usecount;
468 }
469
470 void
471 rump_vp_decref(struct vnode *vp)
472 {
473
474 mutex_enter(&vp->v_interlock);
475 --vp->v_usecount;
476 mutex_exit(&vp->v_interlock);
477 }
478
479 /*
480 * Really really recycle with a cherry on top. We should be
481 * extra-sure we can do this. For example with p2k there is
482 * no problem, since puffs in the kernel takes care of refcounting
483 * for us.
484 */
485 void
486 rump_vp_recycle_nokidding(struct vnode *vp)
487 {
488
489 mutex_enter(&vp->v_interlock);
490 vp->v_usecount = 1;
491 /*
492 * XXX: NFS holds a reference to the root vnode, so don't clean
493 * it out. This is very wrong, but fixing it properly would
494 * take too much effort for now
495 */
496 if (vp->v_tag == VT_NFS && vp->v_vflag & VV_ROOT) {
497 mutex_exit(&vp->v_interlock);
498 return;
499 }
500 vclean(vp, DOCLOSE);
501 vrelel(vp, 0);
502 }
503
504 void
505 rump_vp_rele(struct vnode *vp)
506 {
507
508 vrele(vp);
509 }
510
511 struct uio *
512 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
513 {
514 struct uio *uio;
515 enum uio_rw uiorw;
516
517 switch (rw) {
518 case RUMPUIO_READ:
519 uiorw = UIO_READ;
520 break;
521 case RUMPUIO_WRITE:
522 uiorw = UIO_WRITE;
523 break;
524 default:
525 panic("%s: invalid rw %d", __func__, rw);
526 }
527
528 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
529 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
530
531 uio->uio_iov->iov_base = buf;
532 uio->uio_iov->iov_len = bufsize;
533
534 uio->uio_iovcnt = 1;
535 uio->uio_offset = offset;
536 uio->uio_resid = bufsize;
537 uio->uio_rw = uiorw;
538 uio->uio_vmspace = UIO_VMSPACE_SYS;
539
540 return uio;
541 }
542
543 size_t
544 rump_uio_getresid(struct uio *uio)
545 {
546
547 return uio->uio_resid;
548 }
549
550 off_t
551 rump_uio_getoff(struct uio *uio)
552 {
553
554 return uio->uio_offset;
555 }
556
557 size_t
558 rump_uio_free(struct uio *uio)
559 {
560 size_t resid;
561
562 resid = uio->uio_resid;
563 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
564 kmem_free(uio, sizeof(*uio));
565
566 return resid;
567 }
568
569 void
570 rump_vp_lock_exclusive(struct vnode *vp)
571 {
572
573 /* we can skip vn_lock() */
574 VOP_LOCK(vp, LK_EXCLUSIVE);
575 }
576
577 void
578 rump_vp_lock_shared(struct vnode *vp)
579 {
580
581 VOP_LOCK(vp, LK_SHARED);
582 }
583
584 void
585 rump_vp_unlock(struct vnode *vp)
586 {
587
588 VOP_UNLOCK(vp, 0);
589 }
590
591 int
592 rump_vp_islocked(struct vnode *vp)
593 {
594
595 return VOP_ISLOCKED(vp);
596 }
597
598 void
599 rump_vp_interlock(struct vnode *vp)
600 {
601
602 mutex_enter(&vp->v_interlock);
603 }
604
605 int
606 rump_vfs_unmount(struct mount *mp, int mntflags)
607 {
608
609 return VFS_UNMOUNT(mp, mntflags);
610 }
611
612 int
613 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
614 {
615 int rv;
616
617 rv = VFS_ROOT(mp, vpp);
618 if (rv)
619 return rv;
620
621 if (!lock)
622 VOP_UNLOCK(*vpp, 0);
623
624 return 0;
625 }
626
627 int
628 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
629 {
630
631 return VFS_STATVFS(mp, sbp);
632 }
633
634 int
635 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
636 {
637
638 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
639 }
640
641 int
642 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
643 {
644
645 return VFS_FHTOVP(mp, fid, vpp);
646 }
647
648 int
649 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
650 {
651
652 return VFS_VPTOFH(vp, fid, fidsize);
653 }
654
655 /*ARGSUSED*/
656 void
657 rump_vfs_syncwait(struct mount *mp)
658 {
659 int n;
660
661 n = buf_syncwait();
662 if (n)
663 printf("syncwait: unsynced buffers: %d\n", n);
664 }
665
666 int
667 rump_vfs_load(struct modinfo **mi)
668 {
669
670 if (!module_compatible((*mi)->mi_version, __NetBSD_Version__))
671 return EPROGMISMATCH;
672
673 return (*mi)->mi_modcmd(MODULE_CMD_INIT, NULL);
674 }
675
676 void
677 rump_bioops_sync()
678 {
679
680 if (bioopsp)
681 bioopsp->io_sync(NULL);
682 }
683
684 struct lwp *
685 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
686 {
687 struct lwp *l;
688 struct proc *p;
689
690 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
691 if (pid != 0) {
692 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
693 p->p_cwdi = cwdinit();
694
695 p->p_stats = &rump_stats;
696 p->p_limit = &rump_limits;
697 p->p_pid = pid;
698 p->p_vmspace = &rump_vmspace;
699 p->p_fd = fd_init(NULL);
700 } else {
701 p = &proc0;
702 }
703
704 l->l_cred = rump_cred_suserget();
705 l->l_proc = p;
706 l->l_lid = lid;
707 l->l_fd = p->p_fd;
708 l->l_mutex = RUMP_LMUTEX_MAGIC;
709
710 if (set)
711 rumpuser_set_curlwp(l);
712
713 return l;
714 }
715
716 void
717 rump_clear_curlwp()
718 {
719 struct lwp *l;
720
721 l = rumpuser_get_curlwp();
722 if (l->l_proc->p_pid != 0) {
723 fd_free();
724 cwdfree(l->l_proc->p_cwdi);
725 rump_cred_destroy(l->l_cred);
726 kmem_free(l->l_proc, sizeof(*l->l_proc));
727 }
728 kmem_free(l, sizeof(*l));
729 rumpuser_set_curlwp(NULL);
730 }
731
732 struct lwp *
733 rump_get_curlwp()
734 {
735 struct lwp *l;
736
737 l = rumpuser_get_curlwp();
738 if (l == NULL)
739 l = &lwp0;
740
741 return l;
742 }
743
744 int
745 rump_splfoo()
746 {
747
748 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
749 rumpuser_rw_enter(&rumpspl, 0);
750 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
751 }
752
753 return 0;
754 }
755
756 static void
757 rump_intr_enter(void)
758 {
759
760 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
761 rumpuser_rw_enter(&rumpspl, 1);
762 }
763
764 static void
765 rump_intr_exit(void)
766 {
767
768 rumpuser_rw_exit(&rumpspl);
769 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
770 }
771
772 void
773 rump_splx(int dummy)
774 {
775
776 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
777 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
778 rumpuser_rw_exit(&rumpspl);
779 }
780 }
781
782 void
783 rump_biodone(void *arg, size_t count, int error)
784 {
785 struct buf *bp = arg;
786
787 bp->b_resid = bp->b_bcount - count;
788 KASSERT(bp->b_resid >= 0);
789 bp->b_error = error;
790
791 rump_intr_enter();
792 biodone(bp);
793 rump_intr_exit();
794 }
795
796 kauth_cred_t
797 rump_cred_create(uid_t uid, gid_t gid, size_t ngroups, gid_t *groups)
798 {
799 kauth_cred_t cred;
800 int rv;
801
802 cred = kauth_cred_alloc();
803 kauth_cred_setuid(cred, uid);
804 kauth_cred_seteuid(cred, uid);
805 kauth_cred_setsvuid(cred, uid);
806 kauth_cred_setgid(cred, gid);
807 kauth_cred_setgid(cred, gid);
808 kauth_cred_setegid(cred, gid);
809 kauth_cred_setsvgid(cred, gid);
810 rv = kauth_cred_setgroups(cred, groups, ngroups, 0, UIO_SYSSPACE);
811 /* oh this is silly. and by "this" I mean kauth_cred_setgroups() */
812 assert(rv == 0);
813
814 return cred;
815 }
816
817 void
818 rump_cred_destroy(kauth_cred_t cred)
819 {
820
821 kauth_cred_free(cred);
822 }
823
824 kauth_cred_t
825 rump_cred_suserget()
826 {
827
828 kauth_cred_hold(rump_susercred);
829 return rump_susercred;
830 }
831
832 /* XXX: if they overflow, we're screwed */
833 lwpid_t
834 rump_nextlid()
835 {
836 static unsigned lwpid = 2;
837
838 do {
839 lwpid = atomic_inc_uint_nv(&lwpid);
840 } while (lwpid == 0);
841
842 return (lwpid_t)lwpid;
843 }
844
845 int _syspuffs_stub(int, int *);
846 int
847 _syspuffs_stub(int fd, int *newfd)
848 {
849
850 return ENODEV;
851 }
852
853 __weak_alias(syspuffs_glueinit,_syspuffs_stub);
854