rump.c revision 1.71 1 /* $NetBSD: rump.c,v 1.71 2008/10/15 13:04:26 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/atomic.h>
32 #include <sys/callout.h>
33 #include <sys/cpu.h>
34 #include <sys/filedesc.h>
35 #include <sys/kauth.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mount.h>
39 #include <sys/namei.h>
40 #include <sys/once.h>
41 #include <sys/percpu.h>
42 #include <sys/queue.h>
43 #include <sys/resourcevar.h>
44 #include <sys/select.h>
45 #include <sys/uidinfo.h>
46 #include <sys/vnode.h>
47 #include <sys/vfs_syscalls.h>
48 #include <sys/wapbl.h>
49 #include <sys/sysctl.h>
50
51 #include <miscfs/specfs/specdev.h>
52
53 #include <rump/rumpuser.h>
54
55 #include "rump_private.h"
56 #include "rump_net_private.h"
57
58 struct proc proc0;
59 struct cwdinfo rump_cwdi;
60 struct pstats rump_stats;
61 struct plimit rump_limits;
62 struct cpu_info rump_cpu;
63 struct filedesc rump_filedesc0;
64 struct proclist allproc;
65 char machine[] = "rump";
66 static kauth_cred_t rump_susercred;
67
68 kmutex_t rump_giantlock;
69
70 sigset_t sigcantmask;
71
72 #ifdef RUMP_WITHOUT_THREADS
73 int rump_threads = 0;
74 #else
75 int rump_threads = 1;
76 #endif
77
78 struct fakeblk {
79 char path[MAXPATHLEN];
80 LIST_ENTRY(fakeblk) entries;
81 };
82
83 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
84
85 static void
86 rump_aiodone_worker(struct work *wk, void *dummy)
87 {
88 struct buf *bp = (struct buf *)wk;
89
90 KASSERT(&bp->b_work == wk);
91 bp->b_iodone(bp);
92 }
93
94 static int rump_inited;
95 static struct emul emul_rump;
96
97 void __rump_net_unavailable(void);
98 void __rump_net_unavailable() {}
99 __weak_alias(rump_net_init,__rump_net_unavailable);
100
101 void __rump_vfs_unavailable(void);
102 void __rump_vfs_unavailable() {}
103 __weak_alias(rump_vfs_init,__rump_vfs_unavailable);
104
105 int
106 _rump_init(int rump_version)
107 {
108 extern char hostname[];
109 extern size_t hostnamelen;
110 char buf[256];
111 struct proc *p;
112 struct lwp *l;
113 int error;
114
115 /* XXX */
116 if (rump_inited)
117 return 0;
118 rump_inited = 1;
119
120 if (rump_version != RUMP_VERSION) {
121 printf("rump version mismatch, %d vs. %d\n",
122 rump_version, RUMP_VERSION);
123 return EPROGMISMATCH;
124 }
125
126 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
127 desiredvnodes = strtoul(buf, NULL, 10);
128 } else {
129 desiredvnodes = 1<<16;
130 }
131 if (rumpuser_getenv("RUMP_THREADS", buf, sizeof(buf), &error) == 0) {
132 rump_threads = *buf != '0';
133 }
134
135 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
136
137 rumpvm_init();
138 rump_sleepers_init();
139 #ifdef RUMP_USE_REAL_KMEM
140 kmem_init();
141 #endif
142
143 kauth_init();
144 rump_susercred = rump_cred_create(0, 0, 0, NULL);
145
146 cache_cpu_init(&rump_cpu);
147 rw_init(&rump_cwdi.cwdi_lock);
148
149 l = &lwp0;
150 p = &proc0;
151 p->p_stats = &rump_stats;
152 p->p_cwdi = &rump_cwdi;
153 p->p_limit = &rump_limits;
154 p->p_pid = 0;
155 p->p_fd = &rump_filedesc0;
156 p->p_vmspace = &rump_vmspace;
157 p->p_emul = &emul_rump;
158 l->l_cred = rump_cred_suserget();
159 l->l_proc = p;
160 l->l_lid = 1;
161 LIST_INSERT_HEAD(&allproc, p, p_list);
162
163 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
164 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
165 rump_limits.pl_rlimit[RLIMIT_SBSIZE].rlim_cur = RLIM_INFINITY;
166
167 syncdelay = 0;
168 dovfsusermount = 1;
169
170 rumpuser_thrinit();
171 callout_startup();
172 callout_init_cpu(&rump_cpu);
173
174 once_init();
175 uid_init();
176 percpu_init();
177 fd_sys_init();
178 module_init();
179 sysctl_init();
180 vfsinit();
181 bufinit();
182 wapbl_init();
183 softint_init(&rump_cpu);
184 rumpvfs_init();
185
186 if (rump_net_init != __rump_net_unavailable)
187 rump_net_init();
188
189 /* aieeeedondest */
190 if (rump_threads) {
191 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
192 rump_aiodone_worker, NULL, 0, 0, 0))
193 panic("aiodoned");
194 }
195
196 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
197 hostnamelen = strlen(hostname);
198
199 sigemptyset(&sigcantmask);
200
201 lwp0.l_fd = proc0.p_fd = fd_init(&rump_filedesc0);
202 rump_cwdi.cwdi_cdir = rootvnode;
203
204 return 0;
205 }
206
207 struct mount *
208 rump_mnt_init(struct vfsops *vfsops, int mntflags)
209 {
210 struct mount *mp;
211
212 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
213
214 mp->mnt_op = vfsops;
215 mp->mnt_flag = mntflags;
216 TAILQ_INIT(&mp->mnt_vnodelist);
217 rw_init(&mp->mnt_unmounting);
218 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
219 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
220 mp->mnt_refcnt = 1;
221
222 mount_initspecific(mp);
223
224 return mp;
225 }
226
227 int
228 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
229 {
230 struct vnode *rvp;
231 int rv;
232
233 rv = VFS_MOUNT(mp, path, data, dlen);
234 if (rv)
235 return rv;
236
237 (void) VFS_STATVFS(mp, &mp->mnt_stat);
238 rv = VFS_START(mp, 0);
239 if (rv)
240 VFS_UNMOUNT(mp, MNT_FORCE);
241
242 /*
243 * XXX: set a root for lwp0. This is strictly not correct,
244 * but makes things works for single fs case without having
245 * to manually call rump_rcvp_set().
246 */
247 VFS_ROOT(mp, &rvp);
248 rump_rcvp_set(rvp, rvp);
249 vput(rvp);
250
251 return rv;
252 }
253
254 void
255 rump_mnt_destroy(struct mount *mp)
256 {
257
258 mount_finispecific(mp);
259 kmem_free(mp, sizeof(*mp));
260 }
261
262 struct componentname *
263 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
264 kauth_cred_t creds, struct lwp *l)
265 {
266 struct componentname *cnp;
267 const char *cp = NULL;
268
269 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
270
271 cnp->cn_nameiop = nameiop;
272 cnp->cn_flags = flags | HASBUF;
273
274 cnp->cn_pnbuf = PNBUF_GET();
275 strcpy(cnp->cn_pnbuf, name);
276 cnp->cn_nameptr = cnp->cn_pnbuf;
277 cnp->cn_namelen = namelen;
278 cnp->cn_hash = namei_hash(name, &cp);
279
280 cnp->cn_cred = creds;
281
282 return cnp;
283 }
284
285 void
286 rump_freecn(struct componentname *cnp, int flags)
287 {
288
289 if (flags & RUMPCN_FREECRED)
290 rump_cred_destroy(cnp->cn_cred);
291
292 if ((flags & RUMPCN_HASNTBUF) == 0) {
293 if (cnp->cn_flags & SAVENAME) {
294 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
295 PNBUF_PUT(cnp->cn_pnbuf);
296 } else {
297 PNBUF_PUT(cnp->cn_pnbuf);
298 }
299 }
300 kmem_free(cnp, sizeof(*cnp));
301 }
302
303 /* hey baby, what's your namei? */
304 int
305 rump_namei(uint32_t op, uint32_t flags, const char *namep,
306 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
307 {
308 struct nameidata nd;
309 int rv;
310
311 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
312 rv = namei(&nd);
313 if (rv)
314 return rv;
315
316 if (dvpp) {
317 KASSERT(flags & LOCKPARENT);
318 *dvpp = nd.ni_dvp;
319 } else {
320 KASSERT((flags & LOCKPARENT) == 0);
321 }
322
323 if (vpp) {
324 *vpp = nd.ni_vp;
325 } else {
326 if (nd.ni_vp) {
327 if (flags & LOCKLEAF)
328 vput(nd.ni_vp);
329 else
330 vrele(nd.ni_vp);
331 }
332 }
333
334 if (cnpp) {
335 struct componentname *cnp;
336
337 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
338 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
339 *cnpp = cnp;
340 } else if (nd.ni_cnd.cn_flags & HASBUF) {
341 panic("%s: pathbuf mismatch", __func__);
342 }
343
344 return rv;
345 }
346
347 static struct fakeblk *
348 _rump_fakeblk_find(const char *path)
349 {
350 char buf[MAXPATHLEN];
351 struct fakeblk *fblk;
352 int error;
353
354 if (rumpuser_realpath(path, buf, &error) == NULL)
355 return NULL;
356
357 LIST_FOREACH(fblk, &fakeblks, entries)
358 if (strcmp(fblk->path, buf) == 0)
359 return fblk;
360
361 return NULL;
362 }
363
364 int
365 rump_fakeblk_register(const char *path)
366 {
367 char buf[MAXPATHLEN];
368 struct fakeblk *fblk;
369 int error;
370
371 if (_rump_fakeblk_find(path))
372 return EEXIST;
373
374 if (rumpuser_realpath(path, buf, &error) == NULL)
375 return error;
376
377 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
378 if (fblk == NULL)
379 return ENOMEM;
380
381 strlcpy(fblk->path, buf, MAXPATHLEN);
382 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
383
384 return 0;
385 }
386
387 int
388 rump_fakeblk_find(const char *path)
389 {
390
391 return _rump_fakeblk_find(path) != NULL;
392 }
393
394 void
395 rump_fakeblk_deregister(const char *path)
396 {
397 struct fakeblk *fblk;
398
399 fblk = _rump_fakeblk_find(path);
400 if (fblk == NULL)
401 return;
402
403 LIST_REMOVE(fblk, entries);
404 kmem_free(fblk, sizeof(*fblk));
405 }
406
407 void
408 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
409 {
410
411 *vtype = vp->v_type;
412 *vsize = vp->v_size;
413 if (vp->v_specnode)
414 *vdev = vp->v_rdev;
415 else
416 *vdev = 0;
417 }
418
419 struct vfsops *
420 rump_vfslist_iterate(struct vfsops *ops)
421 {
422
423 if (ops == NULL)
424 return LIST_FIRST(&vfs_list);
425 else
426 return LIST_NEXT(ops, vfs_list);
427 }
428
429 struct vfsops *
430 rump_vfs_getopsbyname(const char *name)
431 {
432
433 return vfs_getopsbyname(name);
434 }
435
436 struct vattr*
437 rump_vattr_init()
438 {
439 struct vattr *vap;
440
441 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
442 vattr_null(vap);
443
444 return vap;
445 }
446
447 void
448 rump_vattr_settype(struct vattr *vap, enum vtype vt)
449 {
450
451 vap->va_type = vt;
452 }
453
454 void
455 rump_vattr_setmode(struct vattr *vap, mode_t mode)
456 {
457
458 vap->va_mode = mode;
459 }
460
461 void
462 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
463 {
464
465 vap->va_rdev = dev;
466 }
467
468 void
469 rump_vattr_free(struct vattr *vap)
470 {
471
472 kmem_free(vap, sizeof(*vap));
473 }
474
475 void
476 rump_vp_incref(struct vnode *vp)
477 {
478
479 mutex_enter(&vp->v_interlock);
480 ++vp->v_usecount;
481 mutex_exit(&vp->v_interlock);
482 }
483
484 int
485 rump_vp_getref(struct vnode *vp)
486 {
487
488 return vp->v_usecount;
489 }
490
491 void
492 rump_vp_decref(struct vnode *vp)
493 {
494
495 mutex_enter(&vp->v_interlock);
496 --vp->v_usecount;
497 mutex_exit(&vp->v_interlock);
498 }
499
500 /*
501 * Really really recycle with a cherry on top. We should be
502 * extra-sure we can do this. For example with p2k there is
503 * no problem, since puffs in the kernel takes care of refcounting
504 * for us.
505 */
506 void
507 rump_vp_recycle_nokidding(struct vnode *vp)
508 {
509
510 mutex_enter(&vp->v_interlock);
511 vp->v_usecount = 1;
512 /*
513 * XXX: NFS holds a reference to the root vnode, so don't clean
514 * it out. This is very wrong, but fixing it properly would
515 * take too much effort for now
516 */
517 if (vp->v_tag == VT_NFS && vp->v_vflag & VV_ROOT) {
518 mutex_exit(&vp->v_interlock);
519 return;
520 }
521 vclean(vp, DOCLOSE);
522 vrelel(vp, 0);
523 }
524
525 void
526 rump_vp_rele(struct vnode *vp)
527 {
528
529 vrele(vp);
530 }
531
532 struct uio *
533 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
534 {
535 struct uio *uio;
536 enum uio_rw uiorw;
537
538 switch (rw) {
539 case RUMPUIO_READ:
540 uiorw = UIO_READ;
541 break;
542 case RUMPUIO_WRITE:
543 uiorw = UIO_WRITE;
544 break;
545 default:
546 panic("%s: invalid rw %d", __func__, rw);
547 }
548
549 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
550 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
551
552 uio->uio_iov->iov_base = buf;
553 uio->uio_iov->iov_len = bufsize;
554
555 uio->uio_iovcnt = 1;
556 uio->uio_offset = offset;
557 uio->uio_resid = bufsize;
558 uio->uio_rw = uiorw;
559 uio->uio_vmspace = UIO_VMSPACE_SYS;
560
561 return uio;
562 }
563
564 size_t
565 rump_uio_getresid(struct uio *uio)
566 {
567
568 return uio->uio_resid;
569 }
570
571 off_t
572 rump_uio_getoff(struct uio *uio)
573 {
574
575 return uio->uio_offset;
576 }
577
578 size_t
579 rump_uio_free(struct uio *uio)
580 {
581 size_t resid;
582
583 resid = uio->uio_resid;
584 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
585 kmem_free(uio, sizeof(*uio));
586
587 return resid;
588 }
589
590 void
591 rump_vp_lock_exclusive(struct vnode *vp)
592 {
593
594 /* we can skip vn_lock() */
595 VOP_LOCK(vp, LK_EXCLUSIVE);
596 }
597
598 void
599 rump_vp_lock_shared(struct vnode *vp)
600 {
601
602 VOP_LOCK(vp, LK_SHARED);
603 }
604
605 void
606 rump_vp_unlock(struct vnode *vp)
607 {
608
609 VOP_UNLOCK(vp, 0);
610 }
611
612 int
613 rump_vp_islocked(struct vnode *vp)
614 {
615
616 return VOP_ISLOCKED(vp);
617 }
618
619 void
620 rump_vp_interlock(struct vnode *vp)
621 {
622
623 mutex_enter(&vp->v_interlock);
624 }
625
626 int
627 rump_vfs_unmount(struct mount *mp, int mntflags)
628 {
629
630 return VFS_UNMOUNT(mp, mntflags);
631 }
632
633 int
634 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
635 {
636 int rv;
637
638 rv = VFS_ROOT(mp, vpp);
639 if (rv)
640 return rv;
641
642 if (!lock)
643 VOP_UNLOCK(*vpp, 0);
644
645 return 0;
646 }
647
648 int
649 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
650 {
651
652 return VFS_STATVFS(mp, sbp);
653 }
654
655 int
656 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
657 {
658
659 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
660 }
661
662 int
663 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
664 {
665
666 return VFS_FHTOVP(mp, fid, vpp);
667 }
668
669 int
670 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
671 {
672
673 return VFS_VPTOFH(vp, fid, fidsize);
674 }
675
676 /*ARGSUSED*/
677 void
678 rump_vfs_syncwait(struct mount *mp)
679 {
680 int n;
681
682 n = buf_syncwait();
683 if (n)
684 printf("syncwait: unsynced buffers: %d\n", n);
685 }
686
687 int
688 rump_vfs_load(struct modinfo **mi)
689 {
690
691 if (!module_compatible((*mi)->mi_version, __NetBSD_Version__))
692 return EPROGMISMATCH;
693
694 return (*mi)->mi_modcmd(MODULE_CMD_INIT, NULL);
695 }
696
697 void
698 rump_bioops_sync()
699 {
700
701 if (bioopsp)
702 bioopsp->io_sync(NULL);
703 }
704
705 struct lwp *
706 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
707 {
708 struct lwp *l;
709 struct proc *p;
710
711 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
712 if (pid != 0) {
713 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
714 p->p_cwdi = cwdinit();
715
716 p->p_stats = &rump_stats;
717 p->p_limit = &rump_limits;
718 p->p_pid = pid;
719 p->p_vmspace = &rump_vmspace;
720 p->p_fd = fd_init(NULL);
721 } else {
722 p = &proc0;
723 }
724
725 l->l_cred = rump_cred_suserget();
726 l->l_proc = p;
727 l->l_lid = lid;
728 l->l_fd = p->p_fd;
729 l->l_mutex = RUMP_LMUTEX_MAGIC;
730 l->l_cpu = &rump_cpu;
731
732 if (set)
733 rumpuser_set_curlwp(l);
734
735 return l;
736 }
737
738 void
739 rump_clear_curlwp()
740 {
741 struct lwp *l;
742
743 l = rumpuser_get_curlwp();
744 if (l->l_proc->p_pid != 0) {
745 fd_free();
746 cwdfree(l->l_proc->p_cwdi);
747 rump_cred_destroy(l->l_cred);
748 kmem_free(l->l_proc, sizeof(*l->l_proc));
749 }
750 kmem_free(l, sizeof(*l));
751 rumpuser_set_curlwp(NULL);
752 }
753
754 struct lwp *
755 rump_get_curlwp()
756 {
757 struct lwp *l;
758
759 l = rumpuser_get_curlwp();
760 if (l == NULL)
761 l = &lwp0;
762
763 return l;
764 }
765
766 int
767 rump_splfoo()
768 {
769
770 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
771 rumpuser_rw_enter(&rumpspl, 0);
772 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
773 }
774
775 return 0;
776 }
777
778 static void
779 rump_intr_enter(void)
780 {
781
782 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
783 rumpuser_rw_enter(&rumpspl, 1);
784 }
785
786 static void
787 rump_intr_exit(void)
788 {
789
790 rumpuser_rw_exit(&rumpspl);
791 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
792 }
793
794 void
795 rump_splx(int dummy)
796 {
797
798 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
799 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
800 rumpuser_rw_exit(&rumpspl);
801 }
802 }
803
804 void
805 rump_biodone(void *arg, size_t count, int error)
806 {
807 struct buf *bp = arg;
808
809 bp->b_resid = bp->b_bcount - count;
810 KASSERT(bp->b_resid >= 0);
811 bp->b_error = error;
812
813 rump_intr_enter();
814 biodone(bp);
815 rump_intr_exit();
816 }
817
818 kauth_cred_t
819 rump_cred_create(uid_t uid, gid_t gid, size_t ngroups, gid_t *groups)
820 {
821 kauth_cred_t cred;
822 int rv;
823
824 cred = kauth_cred_alloc();
825 kauth_cred_setuid(cred, uid);
826 kauth_cred_seteuid(cred, uid);
827 kauth_cred_setsvuid(cred, uid);
828 kauth_cred_setgid(cred, gid);
829 kauth_cred_setgid(cred, gid);
830 kauth_cred_setegid(cred, gid);
831 kauth_cred_setsvgid(cred, gid);
832 rv = kauth_cred_setgroups(cred, groups, ngroups, 0, UIO_SYSSPACE);
833 /* oh this is silly. and by "this" I mean kauth_cred_setgroups() */
834 assert(rv == 0);
835
836 return cred;
837 }
838
839 void
840 rump_cred_destroy(kauth_cred_t cred)
841 {
842
843 kauth_cred_free(cred);
844 }
845
846 kauth_cred_t
847 rump_cred_suserget()
848 {
849
850 kauth_cred_hold(rump_susercred);
851 return rump_susercred;
852 }
853
854 /* XXX: if they overflow, we're screwed */
855 lwpid_t
856 rump_nextlid()
857 {
858 static unsigned lwpid = 2;
859
860 do {
861 lwpid = atomic_inc_uint_nv(&lwpid);
862 } while (lwpid == 0);
863
864 return (lwpid_t)lwpid;
865 }
866
867 int _syspuffs_stub(int, int *);
868 int
869 _syspuffs_stub(int fd, int *newfd)
870 {
871
872 return ENODEV;
873 }
874 __weak_alias(syspuffs_glueinit,_syspuffs_stub);
875