rump.c revision 1.66 1 /* $NetBSD: rump.c,v 1.66 2008/10/12 18:52:56 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/atomic.h>
32 #include <sys/callout.h>
33 #include <sys/cpu.h>
34 #include <sys/filedesc.h>
35 #include <sys/kauth.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mount.h>
39 #include <sys/namei.h>
40 #include <sys/percpu.h>
41 #include <sys/queue.h>
42 #include <sys/resourcevar.h>
43 #include <sys/select.h>
44 #include <sys/vnode.h>
45 #include <sys/vfs_syscalls.h>
46 #include <sys/wapbl.h>
47 #include <sys/sysctl.h>
48
49 #include <miscfs/specfs/specdev.h>
50
51 #include <rump/rumpuser.h>
52
53 #include "rump_private.h"
54
55 struct proc proc0;
56 struct cwdinfo rump_cwdi;
57 struct pstats rump_stats;
58 struct plimit rump_limits;
59 struct cpu_info rump_cpu;
60 struct filedesc rump_filedesc0;
61 struct proclist allproc;
62 char machine[] = "rump";
63 static kauth_cred_t rump_susercred;
64
65 kmutex_t rump_giantlock;
66
67 sigset_t sigcantmask;
68
69 #ifdef RUMP_WITHOUT_THREADS
70 int rump_threads = 0;
71 #else
72 int rump_threads = 1;
73 #endif
74
75 struct fakeblk {
76 char path[MAXPATHLEN];
77 LIST_ENTRY(fakeblk) entries;
78 };
79
80 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
81
82 static void
83 rump_aiodone_worker(struct work *wk, void *dummy)
84 {
85 struct buf *bp = (struct buf *)wk;
86
87 KASSERT(&bp->b_work == wk);
88 bp->b_iodone(bp);
89 }
90
91 static int rump_inited;
92 static struct emul emul_rump;
93
94 int
95 _rump_init(int rump_version)
96 {
97 extern char hostname[];
98 extern size_t hostnamelen;
99 char buf[256];
100 struct proc *p;
101 struct lwp *l;
102 int error;
103
104 /* XXX */
105 if (rump_inited)
106 return 0;
107 rump_inited = 1;
108
109 if (rump_version != RUMP_VERSION) {
110 printf("rump version mismatch, %d vs. %d\n",
111 rump_version, RUMP_VERSION);
112 return EPROGMISMATCH;
113 }
114
115 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
116 desiredvnodes = strtoul(buf, NULL, 10);
117 } else {
118 desiredvnodes = 1<<16;
119 }
120 if (rumpuser_getenv("RUMP_THREADS", buf, sizeof(buf), &error) == 0) {
121 rump_threads = *buf != '0';
122 }
123
124 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
125
126 rumpvm_init();
127 rump_sleepers_init();
128 #ifdef RUMP_USE_REAL_KMEM
129 kmem_init();
130 #endif
131
132 kauth_init();
133 rump_susercred = rump_cred_create(0, 0, 0, NULL);
134
135 cache_cpu_init(&rump_cpu);
136 rw_init(&rump_cwdi.cwdi_lock);
137
138 l = &lwp0;
139 p = &proc0;
140 p->p_stats = &rump_stats;
141 p->p_cwdi = &rump_cwdi;
142 p->p_limit = &rump_limits;
143 p->p_pid = 0;
144 p->p_fd = &rump_filedesc0;
145 p->p_vmspace = &rump_vmspace;
146 p->p_emul = &emul_rump;
147 l->l_cred = rump_cred_suserget();
148 l->l_proc = p;
149 l->l_lid = 1;
150 LIST_INSERT_HEAD(&allproc, p, p_list);
151
152 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
153 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
154
155 syncdelay = 0;
156 dovfsusermount = 1;
157
158 rumpuser_thrinit();
159 callout_startup();
160 callout_init_cpu(&rump_cpu);
161
162 percpu_init();
163 fd_sys_init();
164 module_init();
165 sysctl_init();
166 vfsinit();
167 bufinit();
168 wapbl_init();
169 softint_init(&rump_cpu);
170
171 rumpvfs_init();
172
173 /* aieeeedondest */
174 if (rump_threads) {
175 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
176 rump_aiodone_worker, NULL, 0, 0, 0))
177 panic("aiodoned");
178 }
179
180 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
181 hostnamelen = strlen(hostname);
182
183 sigemptyset(&sigcantmask);
184
185 lwp0.l_fd = proc0.p_fd = fd_init(&rump_filedesc0);
186 rump_cwdi.cwdi_cdir = rootvnode;
187
188 return 0;
189 }
190
191 struct mount *
192 rump_mnt_init(struct vfsops *vfsops, int mntflags)
193 {
194 struct mount *mp;
195
196 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
197
198 mp->mnt_op = vfsops;
199 mp->mnt_flag = mntflags;
200 TAILQ_INIT(&mp->mnt_vnodelist);
201 rw_init(&mp->mnt_unmounting);
202 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
203 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
204 mp->mnt_refcnt = 1;
205
206 mount_initspecific(mp);
207
208 return mp;
209 }
210
211 int
212 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
213 {
214 struct vnode *rvp;
215 int rv;
216
217 rv = VFS_MOUNT(mp, path, data, dlen);
218 if (rv)
219 return rv;
220
221 (void) VFS_STATVFS(mp, &mp->mnt_stat);
222 rv = VFS_START(mp, 0);
223 if (rv)
224 VFS_UNMOUNT(mp, MNT_FORCE);
225
226 /*
227 * XXX: set a root for lwp0. This is strictly not correct,
228 * but makes things works for single fs case without having
229 * to manually call rump_rcvp_set().
230 */
231 VFS_ROOT(mp, &rvp);
232 rump_rcvp_set(rvp, rvp);
233 vput(rvp);
234
235 return rv;
236 }
237
238 void
239 rump_mnt_destroy(struct mount *mp)
240 {
241
242 mount_finispecific(mp);
243 kmem_free(mp, sizeof(*mp));
244 }
245
246 struct componentname *
247 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
248 kauth_cred_t creds, struct lwp *l)
249 {
250 struct componentname *cnp;
251 const char *cp = NULL;
252
253 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
254
255 cnp->cn_nameiop = nameiop;
256 cnp->cn_flags = flags | HASBUF;
257
258 cnp->cn_pnbuf = PNBUF_GET();
259 strcpy(cnp->cn_pnbuf, name);
260 cnp->cn_nameptr = cnp->cn_pnbuf;
261 cnp->cn_namelen = namelen;
262 cnp->cn_hash = namei_hash(name, &cp);
263
264 cnp->cn_cred = creds;
265
266 return cnp;
267 }
268
269 void
270 rump_freecn(struct componentname *cnp, int flags)
271 {
272
273 if (flags & RUMPCN_FREECRED)
274 rump_cred_destroy(cnp->cn_cred);
275
276 if ((flags & RUMPCN_HASNTBUF) == 0) {
277 if (cnp->cn_flags & SAVENAME) {
278 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
279 PNBUF_PUT(cnp->cn_pnbuf);
280 } else {
281 PNBUF_PUT(cnp->cn_pnbuf);
282 }
283 }
284 kmem_free(cnp, sizeof(*cnp));
285 }
286
287 /* hey baby, what's your namei? */
288 int
289 rump_namei(uint32_t op, uint32_t flags, const char *namep,
290 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
291 {
292 struct nameidata nd;
293 int rv;
294
295 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
296 rv = namei(&nd);
297 if (rv)
298 return rv;
299
300 if (dvpp) {
301 KASSERT(flags & LOCKPARENT);
302 *dvpp = nd.ni_dvp;
303 } else {
304 KASSERT((flags & LOCKPARENT) == 0);
305 }
306
307 if (vpp) {
308 *vpp = nd.ni_vp;
309 } else {
310 if (nd.ni_vp) {
311 if (flags & LOCKLEAF)
312 vput(nd.ni_vp);
313 else
314 vrele(nd.ni_vp);
315 }
316 }
317
318 if (cnpp) {
319 struct componentname *cnp;
320
321 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
322 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
323 *cnpp = cnp;
324 } else if (nd.ni_cnd.cn_flags & HASBUF) {
325 panic("%s: pathbuf mismatch", __func__);
326 }
327
328 return rv;
329 }
330
331 static struct fakeblk *
332 _rump_fakeblk_find(const char *path)
333 {
334 char buf[MAXPATHLEN];
335 struct fakeblk *fblk;
336 int error;
337
338 if (rumpuser_realpath(path, buf, &error) == NULL)
339 return NULL;
340
341 LIST_FOREACH(fblk, &fakeblks, entries)
342 if (strcmp(fblk->path, buf) == 0)
343 return fblk;
344
345 return NULL;
346 }
347
348 int
349 rump_fakeblk_register(const char *path)
350 {
351 char buf[MAXPATHLEN];
352 struct fakeblk *fblk;
353 int error;
354
355 if (_rump_fakeblk_find(path))
356 return EEXIST;
357
358 if (rumpuser_realpath(path, buf, &error) == NULL)
359 return error;
360
361 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
362 if (fblk == NULL)
363 return ENOMEM;
364
365 strlcpy(fblk->path, buf, MAXPATHLEN);
366 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
367
368 return 0;
369 }
370
371 int
372 rump_fakeblk_find(const char *path)
373 {
374
375 return _rump_fakeblk_find(path) != NULL;
376 }
377
378 void
379 rump_fakeblk_deregister(const char *path)
380 {
381 struct fakeblk *fblk;
382
383 fblk = _rump_fakeblk_find(path);
384 if (fblk == NULL)
385 return;
386
387 LIST_REMOVE(fblk, entries);
388 kmem_free(fblk, sizeof(*fblk));
389 }
390
391 void
392 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
393 {
394
395 *vtype = vp->v_type;
396 *vsize = vp->v_size;
397 if (vp->v_specnode)
398 *vdev = vp->v_rdev;
399 else
400 *vdev = 0;
401 }
402
403 struct vfsops *
404 rump_vfslist_iterate(struct vfsops *ops)
405 {
406
407 if (ops == NULL)
408 return LIST_FIRST(&vfs_list);
409 else
410 return LIST_NEXT(ops, vfs_list);
411 }
412
413 struct vfsops *
414 rump_vfs_getopsbyname(const char *name)
415 {
416
417 return vfs_getopsbyname(name);
418 }
419
420 struct vattr*
421 rump_vattr_init()
422 {
423 struct vattr *vap;
424
425 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
426 vattr_null(vap);
427
428 return vap;
429 }
430
431 void
432 rump_vattr_settype(struct vattr *vap, enum vtype vt)
433 {
434
435 vap->va_type = vt;
436 }
437
438 void
439 rump_vattr_setmode(struct vattr *vap, mode_t mode)
440 {
441
442 vap->va_mode = mode;
443 }
444
445 void
446 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
447 {
448
449 vap->va_rdev = dev;
450 }
451
452 void
453 rump_vattr_free(struct vattr *vap)
454 {
455
456 kmem_free(vap, sizeof(*vap));
457 }
458
459 void
460 rump_vp_incref(struct vnode *vp)
461 {
462
463 mutex_enter(&vp->v_interlock);
464 ++vp->v_usecount;
465 mutex_exit(&vp->v_interlock);
466 }
467
468 int
469 rump_vp_getref(struct vnode *vp)
470 {
471
472 return vp->v_usecount;
473 }
474
475 void
476 rump_vp_decref(struct vnode *vp)
477 {
478
479 mutex_enter(&vp->v_interlock);
480 --vp->v_usecount;
481 mutex_exit(&vp->v_interlock);
482 }
483
484 /*
485 * Really really recycle with a cherry on top. We should be
486 * extra-sure we can do this. For example with p2k there is
487 * no problem, since puffs in the kernel takes care of refcounting
488 * for us.
489 */
490 void
491 rump_vp_recycle_nokidding(struct vnode *vp)
492 {
493
494 mutex_enter(&vp->v_interlock);
495 vp->v_usecount = 1;
496 /*
497 * XXX: NFS holds a reference to the root vnode, so don't clean
498 * it out. This is very wrong, but fixing it properly would
499 * take too much effort for now
500 */
501 if (vp->v_tag == VT_NFS && vp->v_vflag & VV_ROOT) {
502 mutex_exit(&vp->v_interlock);
503 return;
504 }
505 vclean(vp, DOCLOSE);
506 vrelel(vp, 0);
507 }
508
509 void
510 rump_vp_rele(struct vnode *vp)
511 {
512
513 vrele(vp);
514 }
515
516 struct uio *
517 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
518 {
519 struct uio *uio;
520 enum uio_rw uiorw;
521
522 switch (rw) {
523 case RUMPUIO_READ:
524 uiorw = UIO_READ;
525 break;
526 case RUMPUIO_WRITE:
527 uiorw = UIO_WRITE;
528 break;
529 default:
530 panic("%s: invalid rw %d", __func__, rw);
531 }
532
533 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
534 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
535
536 uio->uio_iov->iov_base = buf;
537 uio->uio_iov->iov_len = bufsize;
538
539 uio->uio_iovcnt = 1;
540 uio->uio_offset = offset;
541 uio->uio_resid = bufsize;
542 uio->uio_rw = uiorw;
543 uio->uio_vmspace = UIO_VMSPACE_SYS;
544
545 return uio;
546 }
547
548 size_t
549 rump_uio_getresid(struct uio *uio)
550 {
551
552 return uio->uio_resid;
553 }
554
555 off_t
556 rump_uio_getoff(struct uio *uio)
557 {
558
559 return uio->uio_offset;
560 }
561
562 size_t
563 rump_uio_free(struct uio *uio)
564 {
565 size_t resid;
566
567 resid = uio->uio_resid;
568 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
569 kmem_free(uio, sizeof(*uio));
570
571 return resid;
572 }
573
574 void
575 rump_vp_lock_exclusive(struct vnode *vp)
576 {
577
578 /* we can skip vn_lock() */
579 VOP_LOCK(vp, LK_EXCLUSIVE);
580 }
581
582 void
583 rump_vp_lock_shared(struct vnode *vp)
584 {
585
586 VOP_LOCK(vp, LK_SHARED);
587 }
588
589 void
590 rump_vp_unlock(struct vnode *vp)
591 {
592
593 VOP_UNLOCK(vp, 0);
594 }
595
596 int
597 rump_vp_islocked(struct vnode *vp)
598 {
599
600 return VOP_ISLOCKED(vp);
601 }
602
603 void
604 rump_vp_interlock(struct vnode *vp)
605 {
606
607 mutex_enter(&vp->v_interlock);
608 }
609
610 int
611 rump_vfs_unmount(struct mount *mp, int mntflags)
612 {
613
614 return VFS_UNMOUNT(mp, mntflags);
615 }
616
617 int
618 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
619 {
620 int rv;
621
622 rv = VFS_ROOT(mp, vpp);
623 if (rv)
624 return rv;
625
626 if (!lock)
627 VOP_UNLOCK(*vpp, 0);
628
629 return 0;
630 }
631
632 int
633 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
634 {
635
636 return VFS_STATVFS(mp, sbp);
637 }
638
639 int
640 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
641 {
642
643 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
644 }
645
646 int
647 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
648 {
649
650 return VFS_FHTOVP(mp, fid, vpp);
651 }
652
653 int
654 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
655 {
656
657 return VFS_VPTOFH(vp, fid, fidsize);
658 }
659
660 /*ARGSUSED*/
661 void
662 rump_vfs_syncwait(struct mount *mp)
663 {
664 int n;
665
666 n = buf_syncwait();
667 if (n)
668 printf("syncwait: unsynced buffers: %d\n", n);
669 }
670
671 int
672 rump_vfs_load(struct modinfo **mi)
673 {
674
675 if (!module_compatible((*mi)->mi_version, __NetBSD_Version__))
676 return EPROGMISMATCH;
677
678 return (*mi)->mi_modcmd(MODULE_CMD_INIT, NULL);
679 }
680
681 void
682 rump_bioops_sync()
683 {
684
685 if (bioopsp)
686 bioopsp->io_sync(NULL);
687 }
688
689 struct lwp *
690 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
691 {
692 struct lwp *l;
693 struct proc *p;
694
695 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
696 if (pid != 0) {
697 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
698 p->p_cwdi = cwdinit();
699
700 p->p_stats = &rump_stats;
701 p->p_limit = &rump_limits;
702 p->p_pid = pid;
703 p->p_vmspace = &rump_vmspace;
704 p->p_fd = fd_init(NULL);
705 } else {
706 p = &proc0;
707 }
708
709 l->l_cred = rump_cred_suserget();
710 l->l_proc = p;
711 l->l_lid = lid;
712 l->l_fd = p->p_fd;
713 l->l_mutex = RUMP_LMUTEX_MAGIC;
714
715 if (set)
716 rumpuser_set_curlwp(l);
717
718 return l;
719 }
720
721 void
722 rump_clear_curlwp()
723 {
724 struct lwp *l;
725
726 l = rumpuser_get_curlwp();
727 if (l->l_proc->p_pid != 0) {
728 fd_free();
729 cwdfree(l->l_proc->p_cwdi);
730 rump_cred_destroy(l->l_cred);
731 kmem_free(l->l_proc, sizeof(*l->l_proc));
732 }
733 kmem_free(l, sizeof(*l));
734 rumpuser_set_curlwp(NULL);
735 }
736
737 struct lwp *
738 rump_get_curlwp()
739 {
740 struct lwp *l;
741
742 l = rumpuser_get_curlwp();
743 if (l == NULL)
744 l = &lwp0;
745
746 return l;
747 }
748
749 int
750 rump_splfoo()
751 {
752
753 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
754 rumpuser_rw_enter(&rumpspl, 0);
755 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
756 }
757
758 return 0;
759 }
760
761 static void
762 rump_intr_enter(void)
763 {
764
765 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
766 rumpuser_rw_enter(&rumpspl, 1);
767 }
768
769 static void
770 rump_intr_exit(void)
771 {
772
773 rumpuser_rw_exit(&rumpspl);
774 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
775 }
776
777 void
778 rump_splx(int dummy)
779 {
780
781 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
782 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
783 rumpuser_rw_exit(&rumpspl);
784 }
785 }
786
787 void
788 rump_biodone(void *arg, size_t count, int error)
789 {
790 struct buf *bp = arg;
791
792 bp->b_resid = bp->b_bcount - count;
793 KASSERT(bp->b_resid >= 0);
794 bp->b_error = error;
795
796 rump_intr_enter();
797 biodone(bp);
798 rump_intr_exit();
799 }
800
801 kauth_cred_t
802 rump_cred_create(uid_t uid, gid_t gid, size_t ngroups, gid_t *groups)
803 {
804 kauth_cred_t cred;
805 int rv;
806
807 cred = kauth_cred_alloc();
808 kauth_cred_setuid(cred, uid);
809 kauth_cred_seteuid(cred, uid);
810 kauth_cred_setsvuid(cred, uid);
811 kauth_cred_setgid(cred, gid);
812 kauth_cred_setgid(cred, gid);
813 kauth_cred_setegid(cred, gid);
814 kauth_cred_setsvgid(cred, gid);
815 rv = kauth_cred_setgroups(cred, groups, ngroups, 0, UIO_SYSSPACE);
816 /* oh this is silly. and by "this" I mean kauth_cred_setgroups() */
817 assert(rv == 0);
818
819 return cred;
820 }
821
822 void
823 rump_cred_destroy(kauth_cred_t cred)
824 {
825
826 kauth_cred_free(cred);
827 }
828
829 kauth_cred_t
830 rump_cred_suserget()
831 {
832
833 kauth_cred_hold(rump_susercred);
834 return rump_susercred;
835 }
836
837 /* XXX: if they overflow, we're screwed */
838 lwpid_t
839 rump_nextlid()
840 {
841 static unsigned lwpid = 2;
842
843 do {
844 lwpid = atomic_inc_uint_nv(&lwpid);
845 } while (lwpid == 0);
846
847 return (lwpid_t)lwpid;
848 }
849
850 int _syspuffs_stub(int, int *);
851 int
852 _syspuffs_stub(int fd, int *newfd)
853 {
854
855 return ENODEV;
856 }
857
858 __weak_alias(syspuffs_glueinit,_syspuffs_stub);
859