rump.c revision 1.58 1 /* $NetBSD: rump.c,v 1.58 2008/09/30 16:51:26 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/filedesc.h>
33 #include <sys/kauth.h>
34 #include <sys/kmem.h>
35 #include <sys/module.h>
36 #include <sys/mount.h>
37 #include <sys/namei.h>
38 #include <sys/queue.h>
39 #include <sys/resourcevar.h>
40 #include <sys/select.h>
41 #include <sys/vnode.h>
42 #include <sys/vfs_syscalls.h>
43 #include <sys/wapbl.h>
44 #include <sys/sysctl.h>
45
46 #include <miscfs/specfs/specdev.h>
47
48 #include <rump/rumpuser.h>
49
50 #include "rump_private.h"
51
52 struct proc proc0;
53 struct cwdinfo rump_cwdi;
54 struct pstats rump_stats;
55 struct plimit rump_limits;
56 kauth_cred_t rump_cred = RUMPCRED_SUSER;
57 struct cpu_info rump_cpu;
58 struct filedesc rump_filedesc0;
59 struct proclist allproc;
60 char machine[] = "rump";
61
62 kmutex_t rump_giantlock;
63
64 sigset_t sigcantmask;
65
66 #ifdef RUMP_WITHOUT_THREADS
67 int rump_threads = 0;
68 #else
69 int rump_threads = 1;
70 #endif
71
72 struct fakeblk {
73 char path[MAXPATHLEN];
74 LIST_ENTRY(fakeblk) entries;
75 };
76
77 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
78
79 static void
80 rump_aiodone_worker(struct work *wk, void *dummy)
81 {
82 struct buf *bp = (struct buf *)wk;
83
84 KASSERT(&bp->b_work == wk);
85 bp->b_iodone(bp);
86 }
87
88 static int rump_inited;
89 static struct emul emul_rump;
90
91 void
92 rump_init()
93 {
94 extern char hostname[];
95 extern size_t hostnamelen;
96 extern kmutex_t rump_atomic_lock;
97 char buf[256];
98 struct proc *p;
99 struct lwp *l;
100 int error;
101
102 /* XXX */
103 if (rump_inited)
104 return;
105 rump_inited = 1;
106
107 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
108 desiredvnodes = strtoul(buf, NULL, 10);
109 } else {
110 desiredvnodes = 1<<16;
111 }
112 if (rumpuser_getenv("RUMP_THREADS", buf, sizeof(buf), &error) == 0) {
113 rump_threads = *buf != '0';
114 }
115
116 rumpvm_init();
117 rump_sleepers_init();
118 #ifdef RUMP_USE_REAL_KMEM
119 kmem_init();
120 #endif
121
122 cache_cpu_init(&rump_cpu);
123 rw_init(&rump_cwdi.cwdi_lock);
124 l = &lwp0;
125 p = &proc0;
126 p->p_stats = &rump_stats;
127 p->p_cwdi = &rump_cwdi;
128 p->p_limit = &rump_limits;
129 p->p_pid = 0;
130 p->p_fd = &rump_filedesc0;
131 p->p_vmspace = &rump_vmspace;
132 p->p_emul = &emul_rump;
133 l->l_cred = rump_cred;
134 l->l_proc = p;
135 l->l_lid = 1;
136
137 LIST_INSERT_HEAD(&allproc, p, p_list);
138
139 mutex_init(&rump_atomic_lock, MUTEX_DEFAULT, IPL_NONE);
140
141 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
142 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
143
144 syncdelay = 0;
145 dovfsusermount = 1;
146
147 rumpuser_thrinit();
148
149 fd_sys_init();
150 module_init();
151 sysctl_init();
152 vfsinit();
153 bufinit();
154 wapbl_init();
155
156 rumpvfs_init();
157
158 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
159
160 /* aieeeedondest */
161 if (rump_threads) {
162 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
163 rump_aiodone_worker, NULL, 0, 0, 0))
164 panic("aiodoned");
165 }
166
167 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
168 hostnamelen = strlen(hostname);
169
170 sigemptyset(&sigcantmask);
171
172 lwp0.l_fd = proc0.p_fd = fd_init(&rump_filedesc0);
173 rump_cwdi.cwdi_cdir = rootvnode;
174 }
175
176 struct mount *
177 rump_mnt_init(struct vfsops *vfsops, int mntflags)
178 {
179 struct mount *mp;
180
181 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
182
183 mp->mnt_op = vfsops;
184 mp->mnt_flag = mntflags;
185 TAILQ_INIT(&mp->mnt_vnodelist);
186 rw_init(&mp->mnt_unmounting);
187 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
188 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
189 mp->mnt_refcnt = 1;
190
191 mount_initspecific(mp);
192
193 return mp;
194 }
195
196 int
197 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
198 {
199 struct vnode *rvp;
200 int rv;
201
202 rv = VFS_MOUNT(mp, path, data, dlen);
203 if (rv)
204 return rv;
205
206 (void) VFS_STATVFS(mp, &mp->mnt_stat);
207 rv = VFS_START(mp, 0);
208 if (rv)
209 VFS_UNMOUNT(mp, MNT_FORCE);
210
211 /*
212 * XXX: set a root for lwp0. This is strictly not correct,
213 * but makes things works for single fs case without having
214 * to manually call rump_rcvp_set().
215 */
216 VFS_ROOT(mp, &rvp);
217 rump_rcvp_set(rvp, rvp);
218 vput(rvp);
219
220 return rv;
221 }
222
223 void
224 rump_mnt_destroy(struct mount *mp)
225 {
226
227 mount_finispecific(mp);
228 kmem_free(mp, sizeof(*mp));
229 }
230
231 struct componentname *
232 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
233 kauth_cred_t creds, struct lwp *l)
234 {
235 struct componentname *cnp;
236 const char *cp = NULL;
237
238 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
239
240 cnp->cn_nameiop = nameiop;
241 cnp->cn_flags = flags | HASBUF;
242
243 cnp->cn_pnbuf = PNBUF_GET();
244 strcpy(cnp->cn_pnbuf, name);
245 cnp->cn_nameptr = cnp->cn_pnbuf;
246 cnp->cn_namelen = namelen;
247 cnp->cn_hash = namei_hash(name, &cp);
248
249 cnp->cn_cred = creds;
250
251 return cnp;
252 }
253
254 void
255 rump_freecn(struct componentname *cnp, int flags)
256 {
257
258 if (flags & RUMPCN_FREECRED)
259 rump_cred_destroy(cnp->cn_cred);
260
261 if ((flags & RUMPCN_HASNTBUF) == 0) {
262 if (cnp->cn_flags & SAVENAME) {
263 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
264 PNBUF_PUT(cnp->cn_pnbuf);
265 } else {
266 PNBUF_PUT(cnp->cn_pnbuf);
267 }
268 }
269 kmem_free(cnp, sizeof(*cnp));
270 }
271
272 /* hey baby, what's your namei? */
273 int
274 rump_namei(uint32_t op, uint32_t flags, const char *namep,
275 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
276 {
277 struct nameidata nd;
278 int rv;
279
280 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
281 rv = namei(&nd);
282 if (rv)
283 return rv;
284
285 if (dvpp) {
286 KASSERT(flags & LOCKPARENT);
287 *dvpp = nd.ni_dvp;
288 } else {
289 KASSERT((flags & LOCKPARENT) == 0);
290 }
291
292 if (vpp) {
293 *vpp = nd.ni_vp;
294 } else {
295 if (nd.ni_vp) {
296 if (flags & LOCKLEAF)
297 vput(nd.ni_vp);
298 else
299 vrele(nd.ni_vp);
300 }
301 }
302
303 if (cnpp) {
304 struct componentname *cnp;
305
306 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
307 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
308 *cnpp = cnp;
309 } else if (nd.ni_cnd.cn_flags & HASBUF) {
310 panic("%s: pathbuf mismatch", __func__);
311 }
312
313 return rv;
314 }
315
316 static struct fakeblk *
317 _rump_fakeblk_find(const char *path)
318 {
319 char buf[MAXPATHLEN];
320 struct fakeblk *fblk;
321 int error;
322
323 if (rumpuser_realpath(path, buf, &error) == NULL)
324 return NULL;
325
326 LIST_FOREACH(fblk, &fakeblks, entries)
327 if (strcmp(fblk->path, buf) == 0)
328 return fblk;
329
330 return NULL;
331 }
332
333 int
334 rump_fakeblk_register(const char *path)
335 {
336 char buf[MAXPATHLEN];
337 struct fakeblk *fblk;
338 int error;
339
340 if (_rump_fakeblk_find(path))
341 return EEXIST;
342
343 if (rumpuser_realpath(path, buf, &error) == NULL)
344 return error;
345
346 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
347 if (fblk == NULL)
348 return ENOMEM;
349
350 strlcpy(fblk->path, buf, MAXPATHLEN);
351 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
352
353 return 0;
354 }
355
356 int
357 rump_fakeblk_find(const char *path)
358 {
359
360 return _rump_fakeblk_find(path) != NULL;
361 }
362
363 void
364 rump_fakeblk_deregister(const char *path)
365 {
366 struct fakeblk *fblk;
367
368 fblk = _rump_fakeblk_find(path);
369 if (fblk == NULL)
370 return;
371
372 LIST_REMOVE(fblk, entries);
373 kmem_free(fblk, sizeof(*fblk));
374 }
375
376 void
377 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
378 {
379
380 *vtype = vp->v_type;
381 *vsize = vp->v_size;
382 if (vp->v_specnode)
383 *vdev = vp->v_rdev;
384 else
385 *vdev = 0;
386 }
387
388 struct vfsops *
389 rump_vfslist_iterate(struct vfsops *ops)
390 {
391
392 if (ops == NULL)
393 return LIST_FIRST(&vfs_list);
394 else
395 return LIST_NEXT(ops, vfs_list);
396 }
397
398 struct vfsops *
399 rump_vfs_getopsbyname(const char *name)
400 {
401
402 return vfs_getopsbyname(name);
403 }
404
405 struct vattr*
406 rump_vattr_init()
407 {
408 struct vattr *vap;
409
410 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
411 vattr_null(vap);
412
413 return vap;
414 }
415
416 void
417 rump_vattr_settype(struct vattr *vap, enum vtype vt)
418 {
419
420 vap->va_type = vt;
421 }
422
423 void
424 rump_vattr_setmode(struct vattr *vap, mode_t mode)
425 {
426
427 vap->va_mode = mode;
428 }
429
430 void
431 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
432 {
433
434 vap->va_rdev = dev;
435 }
436
437 void
438 rump_vattr_free(struct vattr *vap)
439 {
440
441 kmem_free(vap, sizeof(*vap));
442 }
443
444 void
445 rump_vp_incref(struct vnode *vp)
446 {
447
448 mutex_enter(&vp->v_interlock);
449 ++vp->v_usecount;
450 mutex_exit(&vp->v_interlock);
451 }
452
453 int
454 rump_vp_getref(struct vnode *vp)
455 {
456
457 return vp->v_usecount;
458 }
459
460 void
461 rump_vp_decref(struct vnode *vp)
462 {
463
464 mutex_enter(&vp->v_interlock);
465 --vp->v_usecount;
466 mutex_exit(&vp->v_interlock);
467 }
468
469 /*
470 * Really really recycle with a cherry on top. We should be
471 * extra-sure we can do this. For example with p2k there is
472 * no problem, since puffs in the kernel takes care of refcounting
473 * for us.
474 */
475 void
476 rump_vp_recycle_nokidding(struct vnode *vp)
477 {
478
479 mutex_enter(&vp->v_interlock);
480 vp->v_usecount = 1;
481 /*
482 * XXX: NFS holds a reference to the root vnode, so don't clean
483 * it out. This is very wrong, but fixing it properly would
484 * take too much effort for now
485 */
486 if (vp->v_tag == VT_NFS) {
487 mutex_exit(&vp->v_interlock);
488 return;
489 }
490 vclean(vp, DOCLOSE);
491 vrelel(vp, 0);
492 }
493
494 void
495 rump_vp_rele(struct vnode *vp)
496 {
497
498 vrele(vp);
499 }
500
501 struct uio *
502 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
503 {
504 struct uio *uio;
505 enum uio_rw uiorw;
506
507 switch (rw) {
508 case RUMPUIO_READ:
509 uiorw = UIO_READ;
510 break;
511 case RUMPUIO_WRITE:
512 uiorw = UIO_WRITE;
513 break;
514 default:
515 panic("%s: invalid rw %d", __func__, rw);
516 }
517
518 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
519 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
520
521 uio->uio_iov->iov_base = buf;
522 uio->uio_iov->iov_len = bufsize;
523
524 uio->uio_iovcnt = 1;
525 uio->uio_offset = offset;
526 uio->uio_resid = bufsize;
527 uio->uio_rw = uiorw;
528 uio->uio_vmspace = UIO_VMSPACE_SYS;
529
530 return uio;
531 }
532
533 size_t
534 rump_uio_getresid(struct uio *uio)
535 {
536
537 return uio->uio_resid;
538 }
539
540 off_t
541 rump_uio_getoff(struct uio *uio)
542 {
543
544 return uio->uio_offset;
545 }
546
547 size_t
548 rump_uio_free(struct uio *uio)
549 {
550 size_t resid;
551
552 resid = uio->uio_resid;
553 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
554 kmem_free(uio, sizeof(*uio));
555
556 return resid;
557 }
558
559 void
560 rump_vp_lock_exclusive(struct vnode *vp)
561 {
562
563 /* we can skip vn_lock() */
564 VOP_LOCK(vp, LK_EXCLUSIVE);
565 }
566
567 void
568 rump_vp_lock_shared(struct vnode *vp)
569 {
570
571 VOP_LOCK(vp, LK_SHARED);
572 }
573
574 void
575 rump_vp_unlock(struct vnode *vp)
576 {
577
578 VOP_UNLOCK(vp, 0);
579 }
580
581 int
582 rump_vp_islocked(struct vnode *vp)
583 {
584
585 return VOP_ISLOCKED(vp);
586 }
587
588 void
589 rump_vp_interlock(struct vnode *vp)
590 {
591
592 mutex_enter(&vp->v_interlock);
593 }
594
595 int
596 rump_vfs_unmount(struct mount *mp, int mntflags)
597 {
598
599 return VFS_UNMOUNT(mp, mntflags);
600 }
601
602 int
603 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
604 {
605 int rv;
606
607 rv = VFS_ROOT(mp, vpp);
608 if (rv)
609 return rv;
610
611 if (!lock)
612 VOP_UNLOCK(*vpp, 0);
613
614 return 0;
615 }
616
617 int
618 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
619 {
620
621 return VFS_STATVFS(mp, sbp);
622 }
623
624 int
625 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
626 {
627
628 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
629 }
630
631 int
632 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
633 {
634
635 return VFS_FHTOVP(mp, fid, vpp);
636 }
637
638 int
639 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
640 {
641
642 return VFS_VPTOFH(vp, fid, fidsize);
643 }
644
645 /*ARGSUSED*/
646 void
647 rump_vfs_syncwait(struct mount *mp)
648 {
649 int n;
650
651 n = buf_syncwait();
652 if (n)
653 printf("syncwait: unsynced buffers: %d\n", n);
654 }
655
656 int
657 rump_vfs_load(struct modinfo **mi)
658 {
659
660 if (!module_compatible((*mi)->mi_version, __NetBSD_Version__))
661 return EPROGMISMATCH;
662
663 return (*mi)->mi_modcmd(MODULE_CMD_INIT, NULL);
664 }
665
666 void
667 rump_bioops_sync()
668 {
669
670 if (bioopsp)
671 bioopsp->io_sync(NULL);
672 }
673
674 struct lwp *
675 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
676 {
677 struct lwp *l;
678 struct proc *p;
679
680 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
681 if (pid != 0) {
682 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
683 p->p_cwdi = cwdinit();
684
685 p->p_stats = &rump_stats;
686 p->p_limit = &rump_limits;
687 p->p_pid = pid;
688 p->p_vmspace = &rump_vmspace;
689 p->p_fd = fd_init(NULL);
690 } else {
691 p = &proc0;
692 }
693
694 l->l_cred = rump_cred;
695 l->l_proc = p;
696 l->l_lid = lid;
697 l->l_fd = p->p_fd;
698
699 if (set)
700 rumpuser_set_curlwp(l);
701
702 return l;
703 }
704
705 void
706 rump_clear_curlwp()
707 {
708 struct lwp *l;
709
710 l = rumpuser_get_curlwp();
711 if (l->l_proc->p_pid != 0) {
712 fd_free();
713 cwdfree(l->l_proc->p_cwdi);
714 kmem_free(l->l_proc, sizeof(*l->l_proc));
715 }
716 kmem_free(l, sizeof(*l));
717 rumpuser_set_curlwp(NULL);
718 }
719
720 struct lwp *
721 rump_get_curlwp()
722 {
723 struct lwp *l;
724
725 l = rumpuser_get_curlwp();
726 if (l == NULL)
727 l = &lwp0;
728
729 return l;
730 }
731
732 int
733 rump_splfoo()
734 {
735
736 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
737 rumpuser_rw_enter(&rumpspl, 0);
738 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
739 }
740
741 return 0;
742 }
743
744 static void
745 rump_intr_enter(void)
746 {
747
748 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
749 rumpuser_rw_enter(&rumpspl, 1);
750 }
751
752 static void
753 rump_intr_exit(void)
754 {
755
756 rumpuser_rw_exit(&rumpspl);
757 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
758 }
759
760 void
761 rump_splx(int dummy)
762 {
763
764 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
765 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
766 rumpuser_rw_exit(&rumpspl);
767 }
768 }
769
770 void
771 rump_biodone(void *arg, size_t count, int error)
772 {
773 struct buf *bp = arg;
774
775 bp->b_resid = bp->b_bcount - count;
776 KASSERT(bp->b_resid >= 0);
777 bp->b_error = error;
778
779 rump_intr_enter();
780 biodone(bp);
781 rump_intr_exit();
782 }
783
784 int _syspuffs_stub(int, int *);
785 int
786 _syspuffs_stub(int fd, int *newfd)
787 {
788
789 return ENODEV;
790 }
791
792 __weak_alias(syspuffs_glueinit,_syspuffs_stub);
793