rump.c revision 1.53 1 /* $NetBSD: rump.c,v 1.53 2008/08/08 14:40:07 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/filedesc.h>
33 #include <sys/kauth.h>
34 #include <sys/kmem.h>
35 #include <sys/module.h>
36 #include <sys/mount.h>
37 #include <sys/namei.h>
38 #include <sys/queue.h>
39 #include <sys/resourcevar.h>
40 #include <sys/select.h>
41 #include <sys/vnode.h>
42 #include <sys/vfs_syscalls.h>
43 #include <sys/wapbl.h>
44 #include <sys/sysctl.h>
45
46 #include <miscfs/specfs/specdev.h>
47
48 #include <rump/rumpuser.h>
49
50 #include "rump_private.h"
51
52 struct proc proc0;
53 struct cwdinfo rump_cwdi;
54 struct pstats rump_stats;
55 struct plimit rump_limits;
56 kauth_cred_t rump_cred = RUMPCRED_SUSER;
57 struct cpu_info rump_cpu;
58 struct filedesc rump_filedesc0;
59 struct proclist allproc;
60 char machine[] = "rump";
61
62 kmutex_t rump_giantlock;
63
64 sigset_t sigcantmask;
65
66 struct fakeblk {
67 char path[MAXPATHLEN];
68 LIST_ENTRY(fakeblk) entries;
69 };
70
71 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
72
73 #ifndef RUMP_WITHOUT_THREADS
74 static void
75 rump_aiodone_worker(struct work *wk, void *dummy)
76 {
77 struct buf *bp = (struct buf *)wk;
78
79 KASSERT(&bp->b_work == wk);
80 bp->b_iodone(bp);
81 }
82 #endif /* RUMP_WITHOUT_THREADS */
83
84 static int rump_inited;
85 static struct emul emul_rump;
86
87 void
88 rump_init()
89 {
90 extern char hostname[];
91 extern size_t hostnamelen;
92 extern kmutex_t rump_atomic_lock;
93 char buf[256];
94 struct proc *p;
95 struct lwp *l;
96 int error;
97
98 /* XXX */
99 if (rump_inited)
100 return;
101 rump_inited = 1;
102
103 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
104 desiredvnodes = strtoul(buf, NULL, 10);
105 } else {
106 desiredvnodes = 1<<16;
107 }
108
109 rumpvm_init();
110 rump_sleepers_init();
111 #ifdef RUMP_USE_REAL_KMEM
112 kmem_init();
113 #endif
114
115 cache_cpu_init(&rump_cpu);
116 rw_init(&rump_cwdi.cwdi_lock);
117 l = &lwp0;
118 p = &proc0;
119 p->p_stats = &rump_stats;
120 p->p_cwdi = &rump_cwdi;
121 p->p_limit = &rump_limits;
122 p->p_pid = 0;
123 p->p_fd = &rump_filedesc0;
124 p->p_vmspace = &rump_vmspace;
125 p->p_emul = &emul_rump;
126 l->l_cred = rump_cred;
127 l->l_proc = p;
128 l->l_lid = 1;
129
130 LIST_INSERT_HEAD(&allproc, p, p_list);
131
132 mutex_init(&rump_atomic_lock, MUTEX_DEFAULT, IPL_NONE);
133
134 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
135 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
136
137 syncdelay = 0;
138 dovfsusermount = 1;
139
140 rumpuser_thrinit();
141
142 fd_sys_init();
143 module_init();
144 sysctl_init();
145 vfsinit();
146 bufinit();
147 wapbl_init();
148
149 rumpvfs_init();
150
151 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
152
153 #ifndef RUMP_WITHOUT_THREADS
154 /* aieeeedondest */
155 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
156 rump_aiodone_worker, NULL, 0, 0, 0))
157 panic("aiodoned");
158 #endif /* RUMP_WITHOUT_THREADS */
159
160 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
161 hostnamelen = strlen(hostname);
162
163 sigemptyset(&sigcantmask);
164
165 fd_init(&rump_filedesc0);
166 rump_cwdi.cwdi_cdir = rootvnode;
167 }
168
169 struct mount *
170 rump_mnt_init(struct vfsops *vfsops, int mntflags)
171 {
172 struct mount *mp;
173
174 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
175
176 mp->mnt_op = vfsops;
177 mp->mnt_flag = mntflags;
178 TAILQ_INIT(&mp->mnt_vnodelist);
179 rw_init(&mp->mnt_unmounting);
180 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
181 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
182 mp->mnt_refcnt = 1;
183
184 mount_initspecific(mp);
185
186 return mp;
187 }
188
189 int
190 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
191 {
192 int rv;
193
194 rv = VFS_MOUNT(mp, path, data, dlen);
195 if (rv)
196 return rv;
197
198 (void) VFS_STATVFS(mp, &mp->mnt_stat);
199 rv = VFS_START(mp, 0);
200 if (rv)
201 VFS_UNMOUNT(mp, MNT_FORCE);
202
203 return rv;
204 }
205
206 void
207 rump_mnt_destroy(struct mount *mp)
208 {
209
210 mount_finispecific(mp);
211 kmem_free(mp, sizeof(*mp));
212 }
213
214 struct componentname *
215 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
216 kauth_cred_t creds, struct lwp *l)
217 {
218 struct componentname *cnp;
219 const char *cp = NULL;
220
221 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
222
223 cnp->cn_nameiop = nameiop;
224 cnp->cn_flags = flags;
225
226 cnp->cn_pnbuf = PNBUF_GET();
227 strcpy(cnp->cn_pnbuf, name);
228 cnp->cn_nameptr = cnp->cn_pnbuf;
229 cnp->cn_namelen = namelen;
230 cnp->cn_hash = namei_hash(name, &cp);
231
232 cnp->cn_cred = creds;
233
234 return cnp;
235 }
236
237 void
238 rump_freecn(struct componentname *cnp, int flags)
239 {
240
241 if (flags & RUMPCN_FREECRED)
242 rump_cred_destroy(cnp->cn_cred);
243
244 if ((flags & RUMPCN_HASNTBUF) == 0) {
245 if (cnp->cn_flags & SAVENAME) {
246 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
247 PNBUF_PUT(cnp->cn_pnbuf);
248 } else {
249 PNBUF_PUT(cnp->cn_pnbuf);
250 }
251 }
252 kmem_free(cnp, sizeof(*cnp));
253 }
254
255 /* hey baby, what's your namei? */
256 int
257 rump_namei(uint32_t op, uint32_t flags, const char *namep,
258 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
259 {
260 struct nameidata nd;
261 int rv;
262
263 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
264 rv = namei(&nd);
265 if (rv)
266 return rv;
267
268 if (dvpp) {
269 KASSERT(flags & LOCKPARENT);
270 *dvpp = nd.ni_dvp;
271 } else {
272 KASSERT((flags & LOCKPARENT) == 0);
273 }
274
275 if (vpp) {
276 *vpp = nd.ni_vp;
277 } else {
278 if (nd.ni_vp) {
279 if (flags & LOCKLEAF)
280 vput(nd.ni_vp);
281 else
282 vrele(nd.ni_vp);
283 }
284 }
285
286 if (cnpp) {
287 struct componentname *cnp;
288
289 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
290 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
291 *cnpp = cnp;
292 } else if (nd.ni_cnd.cn_flags & HASBUF) {
293 panic("%s: pathbuf mismatch", __func__);
294 }
295
296 return rv;
297 }
298
299 static struct fakeblk *
300 _rump_fakeblk_find(const char *path)
301 {
302 char buf[MAXPATHLEN];
303 struct fakeblk *fblk;
304 int error;
305
306 if (rumpuser_realpath(path, buf, &error) == NULL)
307 return NULL;
308
309 LIST_FOREACH(fblk, &fakeblks, entries)
310 if (strcmp(fblk->path, buf) == 0)
311 return fblk;
312
313 return NULL;
314 }
315
316 int
317 rump_fakeblk_register(const char *path)
318 {
319 char buf[MAXPATHLEN];
320 struct fakeblk *fblk;
321 int error;
322
323 if (_rump_fakeblk_find(path))
324 return EEXIST;
325
326 if (rumpuser_realpath(path, buf, &error) == NULL)
327 return error;
328
329 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
330 if (fblk == NULL)
331 return ENOMEM;
332
333 strlcpy(fblk->path, buf, MAXPATHLEN);
334 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
335
336 return 0;
337 }
338
339 int
340 rump_fakeblk_find(const char *path)
341 {
342
343 return _rump_fakeblk_find(path) != NULL;
344 }
345
346 void
347 rump_fakeblk_deregister(const char *path)
348 {
349 struct fakeblk *fblk;
350
351 fblk = _rump_fakeblk_find(path);
352 if (fblk == NULL)
353 return;
354
355 LIST_REMOVE(fblk, entries);
356 kmem_free(fblk, sizeof(*fblk));
357 }
358
359 void
360 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
361 {
362
363 *vtype = vp->v_type;
364 *vsize = vp->v_size;
365 if (vp->v_specnode)
366 *vdev = vp->v_rdev;
367 else
368 *vdev = 0;
369 }
370
371 struct vfsops *
372 rump_vfslist_iterate(struct vfsops *ops)
373 {
374
375 if (ops == NULL)
376 return LIST_FIRST(&vfs_list);
377 else
378 return LIST_NEXT(ops, vfs_list);
379 }
380
381 struct vfsops *
382 rump_vfs_getopsbyname(const char *name)
383 {
384
385 return vfs_getopsbyname(name);
386 }
387
388 struct vattr*
389 rump_vattr_init()
390 {
391 struct vattr *vap;
392
393 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
394 vattr_null(vap);
395
396 return vap;
397 }
398
399 void
400 rump_vattr_settype(struct vattr *vap, enum vtype vt)
401 {
402
403 vap->va_type = vt;
404 }
405
406 void
407 rump_vattr_setmode(struct vattr *vap, mode_t mode)
408 {
409
410 vap->va_mode = mode;
411 }
412
413 void
414 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
415 {
416
417 vap->va_rdev = dev;
418 }
419
420 void
421 rump_vattr_free(struct vattr *vap)
422 {
423
424 kmem_free(vap, sizeof(*vap));
425 }
426
427 void
428 rump_vp_incref(struct vnode *vp)
429 {
430
431 mutex_enter(&vp->v_interlock);
432 ++vp->v_usecount;
433 mutex_exit(&vp->v_interlock);
434 }
435
436 int
437 rump_vp_getref(struct vnode *vp)
438 {
439
440 return vp->v_usecount;
441 }
442
443 void
444 rump_vp_decref(struct vnode *vp)
445 {
446
447 mutex_enter(&vp->v_interlock);
448 --vp->v_usecount;
449 mutex_exit(&vp->v_interlock);
450 }
451
452 /*
453 * Really really recycle with a cherry on top. We should be
454 * extra-sure we can do this. For example with p2k there is
455 * no problem, since puffs in the kernel takes care of refcounting
456 * for us.
457 */
458 void
459 rump_vp_recycle_nokidding(struct vnode *vp)
460 {
461
462 mutex_enter(&vp->v_interlock);
463 vp->v_usecount = 1;
464 vclean(vp, DOCLOSE);
465 vrelel(vp, 0);
466 }
467
468 void
469 rump_vp_rele(struct vnode *vp)
470 {
471
472 vrele(vp);
473 }
474
475 struct uio *
476 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
477 {
478 struct uio *uio;
479 enum uio_rw uiorw;
480
481 switch (rw) {
482 case RUMPUIO_READ:
483 uiorw = UIO_READ;
484 break;
485 case RUMPUIO_WRITE:
486 uiorw = UIO_WRITE;
487 break;
488 default:
489 panic("%s: invalid rw %d", __func__, rw);
490 }
491
492 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
493 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
494
495 uio->uio_iov->iov_base = buf;
496 uio->uio_iov->iov_len = bufsize;
497
498 uio->uio_iovcnt = 1;
499 uio->uio_offset = offset;
500 uio->uio_resid = bufsize;
501 uio->uio_rw = uiorw;
502 uio->uio_vmspace = UIO_VMSPACE_SYS;
503
504 return uio;
505 }
506
507 size_t
508 rump_uio_getresid(struct uio *uio)
509 {
510
511 return uio->uio_resid;
512 }
513
514 off_t
515 rump_uio_getoff(struct uio *uio)
516 {
517
518 return uio->uio_offset;
519 }
520
521 size_t
522 rump_uio_free(struct uio *uio)
523 {
524 size_t resid;
525
526 resid = uio->uio_resid;
527 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
528 kmem_free(uio, sizeof(*uio));
529
530 return resid;
531 }
532
533 void
534 rump_vp_lock_exclusive(struct vnode *vp)
535 {
536
537 /* we can skip vn_lock() */
538 VOP_LOCK(vp, LK_EXCLUSIVE);
539 }
540
541 void
542 rump_vp_lock_shared(struct vnode *vp)
543 {
544
545 VOP_LOCK(vp, LK_SHARED);
546 }
547
548 void
549 rump_vp_unlock(struct vnode *vp)
550 {
551
552 VOP_UNLOCK(vp, 0);
553 }
554
555 int
556 rump_vp_islocked(struct vnode *vp)
557 {
558
559 return VOP_ISLOCKED(vp);
560 }
561
562 void
563 rump_vp_interlock(struct vnode *vp)
564 {
565
566 mutex_enter(&vp->v_interlock);
567 }
568
569 int
570 rump_vfs_unmount(struct mount *mp, int mntflags)
571 {
572
573 return VFS_UNMOUNT(mp, mntflags);
574 }
575
576 int
577 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
578 {
579 int rv;
580
581 rv = VFS_ROOT(mp, vpp);
582 if (rv)
583 return rv;
584
585 if (!lock)
586 VOP_UNLOCK(*vpp, 0);
587
588 return 0;
589 }
590
591 int
592 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
593 {
594
595 return VFS_STATVFS(mp, sbp);
596 }
597
598 int
599 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
600 {
601
602 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
603 }
604
605 int
606 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
607 {
608
609 return VFS_FHTOVP(mp, fid, vpp);
610 }
611
612 int
613 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
614 {
615
616 return VFS_VPTOFH(vp, fid, fidsize);
617 }
618
619 /*ARGSUSED*/
620 void
621 rump_vfs_syncwait(struct mount *mp)
622 {
623 int n;
624
625 n = buf_syncwait();
626 if (n)
627 printf("syncwait: unsynced buffers: %d\n", n);
628 }
629
630 int
631 rump_vfs_load(struct modinfo **mi)
632 {
633
634 if (!module_compatible((*mi)->mi_version, __NetBSD_Version__))
635 return EPROGMISMATCH;
636
637 return (*mi)->mi_modcmd(MODULE_CMD_INIT, NULL);
638 }
639
640 void
641 rump_bioops_sync()
642 {
643
644 if (bioopsp)
645 bioopsp->io_sync(NULL);
646 }
647
648 struct lwp *
649 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
650 {
651 struct lwp *l;
652 struct proc *p;
653
654 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
655 if (pid != 0) {
656 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
657 p->p_cwdi = cwdinit();
658
659 p->p_stats = &rump_stats;
660 p->p_limit = &rump_limits;
661 p->p_pid = pid;
662 p->p_vmspace = &rump_vmspace;
663 p->p_fd = fd_init(NULL);
664 } else {
665 p = &proc0;
666 }
667
668 l->l_cred = rump_cred;
669 l->l_proc = p;
670 l->l_lid = lid;
671 l->l_fd = p->p_fd;
672
673 if (set)
674 rumpuser_set_curlwp(l);
675
676 return l;
677 }
678
679 void
680 rump_clear_curlwp()
681 {
682 struct lwp *l;
683
684 l = rumpuser_get_curlwp();
685 if (l->l_proc->p_pid != 0) {
686 fd_free();
687 cwdfree(l->l_proc->p_cwdi);
688 kmem_free(l->l_proc, sizeof(*l->l_proc));
689 }
690 kmem_free(l, sizeof(*l));
691 rumpuser_set_curlwp(NULL);
692 }
693
694 struct lwp *
695 rump_get_curlwp()
696 {
697 struct lwp *l;
698
699 l = rumpuser_get_curlwp();
700 if (l == NULL)
701 l = &lwp0;
702
703 return l;
704 }
705
706 int
707 rump_splfoo()
708 {
709
710 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
711 rumpuser_rw_enter(&rumpspl, 0);
712 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
713 }
714
715 return 0;
716 }
717
718 static void
719 rump_intr_enter(void)
720 {
721
722 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
723 rumpuser_rw_enter(&rumpspl, 1);
724 }
725
726 static void
727 rump_intr_exit(void)
728 {
729
730 rumpuser_rw_exit(&rumpspl);
731 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
732 }
733
734 void
735 rump_splx(int dummy)
736 {
737
738 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
739 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
740 rumpuser_rw_exit(&rumpspl);
741 }
742 }
743
744 void
745 rump_biodone(void *arg, size_t count, int error)
746 {
747 struct buf *bp = arg;
748
749 bp->b_resid = bp->b_bcount - count;
750 KASSERT(bp->b_resid >= 0);
751 bp->b_error = error;
752
753 rump_intr_enter();
754 biodone(bp);
755 rump_intr_exit();
756 }
757