rump.c revision 1.65 1 /* $NetBSD: rump.c,v 1.65 2008/10/10 20:24:10 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/atomic.h>
32 #include <sys/cpu.h>
33 #include <sys/filedesc.h>
34 #include <sys/kauth.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/mount.h>
38 #include <sys/namei.h>
39 #include <sys/percpu.h>
40 #include <sys/queue.h>
41 #include <sys/resourcevar.h>
42 #include <sys/select.h>
43 #include <sys/vnode.h>
44 #include <sys/vfs_syscalls.h>
45 #include <sys/wapbl.h>
46 #include <sys/sysctl.h>
47
48 #include <miscfs/specfs/specdev.h>
49
50 #include <rump/rumpuser.h>
51
52 #include "rump_private.h"
53
54 struct proc proc0;
55 struct cwdinfo rump_cwdi;
56 struct pstats rump_stats;
57 struct plimit rump_limits;
58 struct cpu_info rump_cpu;
59 struct filedesc rump_filedesc0;
60 struct proclist allproc;
61 char machine[] = "rump";
62 static kauth_cred_t rump_susercred;
63
64 kmutex_t rump_giantlock;
65
66 sigset_t sigcantmask;
67
68 #ifdef RUMP_WITHOUT_THREADS
69 int rump_threads = 0;
70 #else
71 int rump_threads = 1;
72 #endif
73
74 struct fakeblk {
75 char path[MAXPATHLEN];
76 LIST_ENTRY(fakeblk) entries;
77 };
78
79 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
80
81 static void
82 rump_aiodone_worker(struct work *wk, void *dummy)
83 {
84 struct buf *bp = (struct buf *)wk;
85
86 KASSERT(&bp->b_work == wk);
87 bp->b_iodone(bp);
88 }
89
90 static int rump_inited;
91 static struct emul emul_rump;
92
93 int
94 _rump_init(int rump_version)
95 {
96 extern char hostname[];
97 extern size_t hostnamelen;
98 char buf[256];
99 struct proc *p;
100 struct lwp *l;
101 int error;
102
103 /* XXX */
104 if (rump_inited)
105 return 0;
106 rump_inited = 1;
107
108 if (rump_version != RUMP_VERSION) {
109 printf("rump version mismatch, %d vs. %d\n",
110 rump_version, RUMP_VERSION);
111 return EPROGMISMATCH;
112 }
113
114 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
115 desiredvnodes = strtoul(buf, NULL, 10);
116 } else {
117 desiredvnodes = 1<<16;
118 }
119 if (rumpuser_getenv("RUMP_THREADS", buf, sizeof(buf), &error) == 0) {
120 rump_threads = *buf != '0';
121 }
122
123 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
124
125 rumpvm_init();
126 rump_sleepers_init();
127 #ifdef RUMP_USE_REAL_KMEM
128 kmem_init();
129 #endif
130
131 kauth_init();
132 rump_susercred = rump_cred_create(0, 0, 0, NULL);
133
134 cache_cpu_init(&rump_cpu);
135 rw_init(&rump_cwdi.cwdi_lock);
136
137 l = &lwp0;
138 p = &proc0;
139 p->p_stats = &rump_stats;
140 p->p_cwdi = &rump_cwdi;
141 p->p_limit = &rump_limits;
142 p->p_pid = 0;
143 p->p_fd = &rump_filedesc0;
144 p->p_vmspace = &rump_vmspace;
145 p->p_emul = &emul_rump;
146 l->l_cred = rump_cred_suserget();
147 l->l_proc = p;
148 l->l_lid = 1;
149 LIST_INSERT_HEAD(&allproc, p, p_list);
150
151 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
152 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
153
154 syncdelay = 0;
155 dovfsusermount = 1;
156
157 rumpuser_thrinit();
158
159 percpu_init();
160 fd_sys_init();
161 module_init();
162 sysctl_init();
163 vfsinit();
164 bufinit();
165 wapbl_init();
166 softint_init(&rump_cpu);
167
168 rumpvfs_init();
169
170 /* aieeeedondest */
171 if (rump_threads) {
172 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
173 rump_aiodone_worker, NULL, 0, 0, 0))
174 panic("aiodoned");
175 }
176
177 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
178 hostnamelen = strlen(hostname);
179
180 sigemptyset(&sigcantmask);
181
182 lwp0.l_fd = proc0.p_fd = fd_init(&rump_filedesc0);
183 rump_cwdi.cwdi_cdir = rootvnode;
184
185 return 0;
186 }
187
188 struct mount *
189 rump_mnt_init(struct vfsops *vfsops, int mntflags)
190 {
191 struct mount *mp;
192
193 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
194
195 mp->mnt_op = vfsops;
196 mp->mnt_flag = mntflags;
197 TAILQ_INIT(&mp->mnt_vnodelist);
198 rw_init(&mp->mnt_unmounting);
199 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
200 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
201 mp->mnt_refcnt = 1;
202
203 mount_initspecific(mp);
204
205 return mp;
206 }
207
208 int
209 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
210 {
211 struct vnode *rvp;
212 int rv;
213
214 rv = VFS_MOUNT(mp, path, data, dlen);
215 if (rv)
216 return rv;
217
218 (void) VFS_STATVFS(mp, &mp->mnt_stat);
219 rv = VFS_START(mp, 0);
220 if (rv)
221 VFS_UNMOUNT(mp, MNT_FORCE);
222
223 /*
224 * XXX: set a root for lwp0. This is strictly not correct,
225 * but makes things works for single fs case without having
226 * to manually call rump_rcvp_set().
227 */
228 VFS_ROOT(mp, &rvp);
229 rump_rcvp_set(rvp, rvp);
230 vput(rvp);
231
232 return rv;
233 }
234
235 void
236 rump_mnt_destroy(struct mount *mp)
237 {
238
239 mount_finispecific(mp);
240 kmem_free(mp, sizeof(*mp));
241 }
242
243 struct componentname *
244 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
245 kauth_cred_t creds, struct lwp *l)
246 {
247 struct componentname *cnp;
248 const char *cp = NULL;
249
250 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
251
252 cnp->cn_nameiop = nameiop;
253 cnp->cn_flags = flags | HASBUF;
254
255 cnp->cn_pnbuf = PNBUF_GET();
256 strcpy(cnp->cn_pnbuf, name);
257 cnp->cn_nameptr = cnp->cn_pnbuf;
258 cnp->cn_namelen = namelen;
259 cnp->cn_hash = namei_hash(name, &cp);
260
261 cnp->cn_cred = creds;
262
263 return cnp;
264 }
265
266 void
267 rump_freecn(struct componentname *cnp, int flags)
268 {
269
270 if (flags & RUMPCN_FREECRED)
271 rump_cred_destroy(cnp->cn_cred);
272
273 if ((flags & RUMPCN_HASNTBUF) == 0) {
274 if (cnp->cn_flags & SAVENAME) {
275 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
276 PNBUF_PUT(cnp->cn_pnbuf);
277 } else {
278 PNBUF_PUT(cnp->cn_pnbuf);
279 }
280 }
281 kmem_free(cnp, sizeof(*cnp));
282 }
283
284 /* hey baby, what's your namei? */
285 int
286 rump_namei(uint32_t op, uint32_t flags, const char *namep,
287 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
288 {
289 struct nameidata nd;
290 int rv;
291
292 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
293 rv = namei(&nd);
294 if (rv)
295 return rv;
296
297 if (dvpp) {
298 KASSERT(flags & LOCKPARENT);
299 *dvpp = nd.ni_dvp;
300 } else {
301 KASSERT((flags & LOCKPARENT) == 0);
302 }
303
304 if (vpp) {
305 *vpp = nd.ni_vp;
306 } else {
307 if (nd.ni_vp) {
308 if (flags & LOCKLEAF)
309 vput(nd.ni_vp);
310 else
311 vrele(nd.ni_vp);
312 }
313 }
314
315 if (cnpp) {
316 struct componentname *cnp;
317
318 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
319 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
320 *cnpp = cnp;
321 } else if (nd.ni_cnd.cn_flags & HASBUF) {
322 panic("%s: pathbuf mismatch", __func__);
323 }
324
325 return rv;
326 }
327
328 static struct fakeblk *
329 _rump_fakeblk_find(const char *path)
330 {
331 char buf[MAXPATHLEN];
332 struct fakeblk *fblk;
333 int error;
334
335 if (rumpuser_realpath(path, buf, &error) == NULL)
336 return NULL;
337
338 LIST_FOREACH(fblk, &fakeblks, entries)
339 if (strcmp(fblk->path, buf) == 0)
340 return fblk;
341
342 return NULL;
343 }
344
345 int
346 rump_fakeblk_register(const char *path)
347 {
348 char buf[MAXPATHLEN];
349 struct fakeblk *fblk;
350 int error;
351
352 if (_rump_fakeblk_find(path))
353 return EEXIST;
354
355 if (rumpuser_realpath(path, buf, &error) == NULL)
356 return error;
357
358 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
359 if (fblk == NULL)
360 return ENOMEM;
361
362 strlcpy(fblk->path, buf, MAXPATHLEN);
363 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
364
365 return 0;
366 }
367
368 int
369 rump_fakeblk_find(const char *path)
370 {
371
372 return _rump_fakeblk_find(path) != NULL;
373 }
374
375 void
376 rump_fakeblk_deregister(const char *path)
377 {
378 struct fakeblk *fblk;
379
380 fblk = _rump_fakeblk_find(path);
381 if (fblk == NULL)
382 return;
383
384 LIST_REMOVE(fblk, entries);
385 kmem_free(fblk, sizeof(*fblk));
386 }
387
388 void
389 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
390 {
391
392 *vtype = vp->v_type;
393 *vsize = vp->v_size;
394 if (vp->v_specnode)
395 *vdev = vp->v_rdev;
396 else
397 *vdev = 0;
398 }
399
400 struct vfsops *
401 rump_vfslist_iterate(struct vfsops *ops)
402 {
403
404 if (ops == NULL)
405 return LIST_FIRST(&vfs_list);
406 else
407 return LIST_NEXT(ops, vfs_list);
408 }
409
410 struct vfsops *
411 rump_vfs_getopsbyname(const char *name)
412 {
413
414 return vfs_getopsbyname(name);
415 }
416
417 struct vattr*
418 rump_vattr_init()
419 {
420 struct vattr *vap;
421
422 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
423 vattr_null(vap);
424
425 return vap;
426 }
427
428 void
429 rump_vattr_settype(struct vattr *vap, enum vtype vt)
430 {
431
432 vap->va_type = vt;
433 }
434
435 void
436 rump_vattr_setmode(struct vattr *vap, mode_t mode)
437 {
438
439 vap->va_mode = mode;
440 }
441
442 void
443 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
444 {
445
446 vap->va_rdev = dev;
447 }
448
449 void
450 rump_vattr_free(struct vattr *vap)
451 {
452
453 kmem_free(vap, sizeof(*vap));
454 }
455
456 void
457 rump_vp_incref(struct vnode *vp)
458 {
459
460 mutex_enter(&vp->v_interlock);
461 ++vp->v_usecount;
462 mutex_exit(&vp->v_interlock);
463 }
464
465 int
466 rump_vp_getref(struct vnode *vp)
467 {
468
469 return vp->v_usecount;
470 }
471
472 void
473 rump_vp_decref(struct vnode *vp)
474 {
475
476 mutex_enter(&vp->v_interlock);
477 --vp->v_usecount;
478 mutex_exit(&vp->v_interlock);
479 }
480
481 /*
482 * Really really recycle with a cherry on top. We should be
483 * extra-sure we can do this. For example with p2k there is
484 * no problem, since puffs in the kernel takes care of refcounting
485 * for us.
486 */
487 void
488 rump_vp_recycle_nokidding(struct vnode *vp)
489 {
490
491 mutex_enter(&vp->v_interlock);
492 vp->v_usecount = 1;
493 /*
494 * XXX: NFS holds a reference to the root vnode, so don't clean
495 * it out. This is very wrong, but fixing it properly would
496 * take too much effort for now
497 */
498 if (vp->v_tag == VT_NFS && vp->v_vflag & VV_ROOT) {
499 mutex_exit(&vp->v_interlock);
500 return;
501 }
502 vclean(vp, DOCLOSE);
503 vrelel(vp, 0);
504 }
505
506 void
507 rump_vp_rele(struct vnode *vp)
508 {
509
510 vrele(vp);
511 }
512
513 struct uio *
514 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
515 {
516 struct uio *uio;
517 enum uio_rw uiorw;
518
519 switch (rw) {
520 case RUMPUIO_READ:
521 uiorw = UIO_READ;
522 break;
523 case RUMPUIO_WRITE:
524 uiorw = UIO_WRITE;
525 break;
526 default:
527 panic("%s: invalid rw %d", __func__, rw);
528 }
529
530 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
531 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
532
533 uio->uio_iov->iov_base = buf;
534 uio->uio_iov->iov_len = bufsize;
535
536 uio->uio_iovcnt = 1;
537 uio->uio_offset = offset;
538 uio->uio_resid = bufsize;
539 uio->uio_rw = uiorw;
540 uio->uio_vmspace = UIO_VMSPACE_SYS;
541
542 return uio;
543 }
544
545 size_t
546 rump_uio_getresid(struct uio *uio)
547 {
548
549 return uio->uio_resid;
550 }
551
552 off_t
553 rump_uio_getoff(struct uio *uio)
554 {
555
556 return uio->uio_offset;
557 }
558
559 size_t
560 rump_uio_free(struct uio *uio)
561 {
562 size_t resid;
563
564 resid = uio->uio_resid;
565 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
566 kmem_free(uio, sizeof(*uio));
567
568 return resid;
569 }
570
571 void
572 rump_vp_lock_exclusive(struct vnode *vp)
573 {
574
575 /* we can skip vn_lock() */
576 VOP_LOCK(vp, LK_EXCLUSIVE);
577 }
578
579 void
580 rump_vp_lock_shared(struct vnode *vp)
581 {
582
583 VOP_LOCK(vp, LK_SHARED);
584 }
585
586 void
587 rump_vp_unlock(struct vnode *vp)
588 {
589
590 VOP_UNLOCK(vp, 0);
591 }
592
593 int
594 rump_vp_islocked(struct vnode *vp)
595 {
596
597 return VOP_ISLOCKED(vp);
598 }
599
600 void
601 rump_vp_interlock(struct vnode *vp)
602 {
603
604 mutex_enter(&vp->v_interlock);
605 }
606
607 int
608 rump_vfs_unmount(struct mount *mp, int mntflags)
609 {
610
611 return VFS_UNMOUNT(mp, mntflags);
612 }
613
614 int
615 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
616 {
617 int rv;
618
619 rv = VFS_ROOT(mp, vpp);
620 if (rv)
621 return rv;
622
623 if (!lock)
624 VOP_UNLOCK(*vpp, 0);
625
626 return 0;
627 }
628
629 int
630 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
631 {
632
633 return VFS_STATVFS(mp, sbp);
634 }
635
636 int
637 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
638 {
639
640 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
641 }
642
643 int
644 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
645 {
646
647 return VFS_FHTOVP(mp, fid, vpp);
648 }
649
650 int
651 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
652 {
653
654 return VFS_VPTOFH(vp, fid, fidsize);
655 }
656
657 /*ARGSUSED*/
658 void
659 rump_vfs_syncwait(struct mount *mp)
660 {
661 int n;
662
663 n = buf_syncwait();
664 if (n)
665 printf("syncwait: unsynced buffers: %d\n", n);
666 }
667
668 int
669 rump_vfs_load(struct modinfo **mi)
670 {
671
672 if (!module_compatible((*mi)->mi_version, __NetBSD_Version__))
673 return EPROGMISMATCH;
674
675 return (*mi)->mi_modcmd(MODULE_CMD_INIT, NULL);
676 }
677
678 void
679 rump_bioops_sync()
680 {
681
682 if (bioopsp)
683 bioopsp->io_sync(NULL);
684 }
685
686 struct lwp *
687 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
688 {
689 struct lwp *l;
690 struct proc *p;
691
692 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
693 if (pid != 0) {
694 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
695 p->p_cwdi = cwdinit();
696
697 p->p_stats = &rump_stats;
698 p->p_limit = &rump_limits;
699 p->p_pid = pid;
700 p->p_vmspace = &rump_vmspace;
701 p->p_fd = fd_init(NULL);
702 } else {
703 p = &proc0;
704 }
705
706 l->l_cred = rump_cred_suserget();
707 l->l_proc = p;
708 l->l_lid = lid;
709 l->l_fd = p->p_fd;
710 l->l_mutex = RUMP_LMUTEX_MAGIC;
711
712 if (set)
713 rumpuser_set_curlwp(l);
714
715 return l;
716 }
717
718 void
719 rump_clear_curlwp()
720 {
721 struct lwp *l;
722
723 l = rumpuser_get_curlwp();
724 if (l->l_proc->p_pid != 0) {
725 fd_free();
726 cwdfree(l->l_proc->p_cwdi);
727 rump_cred_destroy(l->l_cred);
728 kmem_free(l->l_proc, sizeof(*l->l_proc));
729 }
730 kmem_free(l, sizeof(*l));
731 rumpuser_set_curlwp(NULL);
732 }
733
734 struct lwp *
735 rump_get_curlwp()
736 {
737 struct lwp *l;
738
739 l = rumpuser_get_curlwp();
740 if (l == NULL)
741 l = &lwp0;
742
743 return l;
744 }
745
746 int
747 rump_splfoo()
748 {
749
750 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
751 rumpuser_rw_enter(&rumpspl, 0);
752 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
753 }
754
755 return 0;
756 }
757
758 static void
759 rump_intr_enter(void)
760 {
761
762 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
763 rumpuser_rw_enter(&rumpspl, 1);
764 }
765
766 static void
767 rump_intr_exit(void)
768 {
769
770 rumpuser_rw_exit(&rumpspl);
771 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
772 }
773
774 void
775 rump_splx(int dummy)
776 {
777
778 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
779 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
780 rumpuser_rw_exit(&rumpspl);
781 }
782 }
783
784 void
785 rump_biodone(void *arg, size_t count, int error)
786 {
787 struct buf *bp = arg;
788
789 bp->b_resid = bp->b_bcount - count;
790 KASSERT(bp->b_resid >= 0);
791 bp->b_error = error;
792
793 rump_intr_enter();
794 biodone(bp);
795 rump_intr_exit();
796 }
797
798 kauth_cred_t
799 rump_cred_create(uid_t uid, gid_t gid, size_t ngroups, gid_t *groups)
800 {
801 kauth_cred_t cred;
802 int rv;
803
804 cred = kauth_cred_alloc();
805 kauth_cred_setuid(cred, uid);
806 kauth_cred_seteuid(cred, uid);
807 kauth_cred_setsvuid(cred, uid);
808 kauth_cred_setgid(cred, gid);
809 kauth_cred_setgid(cred, gid);
810 kauth_cred_setegid(cred, gid);
811 kauth_cred_setsvgid(cred, gid);
812 rv = kauth_cred_setgroups(cred, groups, ngroups, 0, UIO_SYSSPACE);
813 /* oh this is silly. and by "this" I mean kauth_cred_setgroups() */
814 assert(rv == 0);
815
816 return cred;
817 }
818
819 void
820 rump_cred_destroy(kauth_cred_t cred)
821 {
822
823 kauth_cred_free(cred);
824 }
825
826 kauth_cred_t
827 rump_cred_suserget()
828 {
829
830 kauth_cred_hold(rump_susercred);
831 return rump_susercred;
832 }
833
834 /* XXX: if they overflow, we're screwed */
835 lwpid_t
836 rump_nextlid()
837 {
838 static unsigned lwpid = 2;
839
840 do {
841 lwpid = atomic_inc_uint_nv(&lwpid);
842 } while (lwpid == 0);
843
844 return (lwpid_t)lwpid;
845 }
846
847 int _syspuffs_stub(int, int *);
848 int
849 _syspuffs_stub(int fd, int *newfd)
850 {
851
852 return ENODEV;
853 }
854
855 __weak_alias(syspuffs_glueinit,_syspuffs_stub);
856