rump.c revision 1.50 1 /* $NetBSD: rump.c,v 1.50 2008/08/01 14:47:28 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/filedesc.h>
33 #include <sys/kauth.h>
34 #include <sys/kmem.h>
35 #include <sys/module.h>
36 #include <sys/mount.h>
37 #include <sys/namei.h>
38 #include <sys/queue.h>
39 #include <sys/resourcevar.h>
40 #include <sys/select.h>
41 #include <sys/vnode.h>
42 #include <sys/vfs_syscalls.h>
43 #include <sys/wapbl.h>
44
45 #include <miscfs/specfs/specdev.h>
46
47 #include <rump/rumpuser.h>
48
49 #include "rump_private.h"
50
51 struct proc proc0;
52 struct cwdinfo rump_cwdi;
53 struct pstats rump_stats;
54 struct plimit rump_limits;
55 kauth_cred_t rump_cred = RUMPCRED_SUSER;
56 struct cpu_info rump_cpu;
57 struct filedesc rump_filedesc0;
58 struct proclist allproc;
59 char machine[] = "rump";
60
61 kmutex_t rump_giantlock;
62
63 sigset_t sigcantmask;
64
65 struct fakeblk {
66 char path[MAXPATHLEN];
67 LIST_ENTRY(fakeblk) entries;
68 };
69
70 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
71
72 #ifndef RUMP_WITHOUT_THREADS
73 static void
74 rump_aiodone_worker(struct work *wk, void *dummy)
75 {
76 struct buf *bp = (struct buf *)wk;
77
78 KASSERT(&bp->b_work == wk);
79 bp->b_iodone(bp);
80 }
81 #endif /* RUMP_WITHOUT_THREADS */
82
83 int rump_inited;
84
85 void
86 rump_init()
87 {
88 extern char hostname[];
89 extern size_t hostnamelen;
90 extern kmutex_t rump_atomic_lock;
91 char buf[256];
92 struct proc *p;
93 struct lwp *l;
94 int error;
95
96 /* XXX */
97 if (rump_inited)
98 return;
99 rump_inited = 1;
100
101 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
102 desiredvnodes = strtoul(buf, NULL, 10);
103 } else {
104 desiredvnodes = 1<<16;
105 }
106
107 cache_cpu_init(&rump_cpu);
108 rw_init(&rump_cwdi.cwdi_lock);
109 l = &lwp0;
110 p = &proc0;
111 p->p_stats = &rump_stats;
112 p->p_cwdi = &rump_cwdi;
113 p->p_limit = &rump_limits;
114 p->p_pid = 0;
115 p->p_fd = &rump_filedesc0;
116 p->p_vmspace = &rump_vmspace;
117 l->l_cred = rump_cred;
118 l->l_proc = p;
119 l->l_lid = 1;
120
121 LIST_INSERT_HEAD(&allproc, p, p_list);
122
123 mutex_init(&rump_atomic_lock, MUTEX_DEFAULT, IPL_NONE);
124 rumpvm_init();
125
126 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
127 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
128
129 syncdelay = 0;
130 dovfsusermount = 1;
131
132 rump_sleepers_init();
133 rumpuser_thrinit();
134
135 fd_sys_init();
136 module_init();
137 vfsinit();
138 bufinit();
139 wapbl_init();
140
141 rumpvfs_init();
142
143 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
144
145 #ifndef RUMP_WITHOUT_THREADS
146 /* aieeeedondest */
147 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
148 rump_aiodone_worker, NULL, 0, 0, 0))
149 panic("aiodoned");
150 #endif /* RUMP_WITHOUT_THREADS */
151
152 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
153 hostnamelen = strlen(hostname);
154
155 sigemptyset(&sigcantmask);
156
157 fd_init(&rump_filedesc0);
158 rump_cwdi.cwdi_cdir = rootvnode;
159 }
160
161 struct mount *
162 rump_mnt_init(struct vfsops *vfsops, int mntflags)
163 {
164 struct mount *mp;
165
166 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
167
168 mp->mnt_op = vfsops;
169 mp->mnt_flag = mntflags;
170 TAILQ_INIT(&mp->mnt_vnodelist);
171 rw_init(&mp->mnt_unmounting);
172 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
173 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
174 mp->mnt_refcnt = 1;
175
176 mount_initspecific(mp);
177
178 return mp;
179 }
180
181 int
182 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
183 {
184 int rv;
185
186 rv = VFS_MOUNT(mp, path, data, dlen);
187 if (rv)
188 return rv;
189
190 (void) VFS_STATVFS(mp, &mp->mnt_stat);
191 rv = VFS_START(mp, 0);
192 if (rv)
193 VFS_UNMOUNT(mp, MNT_FORCE);
194
195 return rv;
196 }
197
198 void
199 rump_mnt_destroy(struct mount *mp)
200 {
201
202 mount_finispecific(mp);
203 kmem_free(mp, sizeof(*mp));
204 }
205
206 struct componentname *
207 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
208 kauth_cred_t creds, struct lwp *l)
209 {
210 struct componentname *cnp;
211 const char *cp = NULL;
212
213 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
214
215 cnp->cn_nameiop = nameiop;
216 cnp->cn_flags = flags;
217
218 cnp->cn_pnbuf = PNBUF_GET();
219 strcpy(cnp->cn_pnbuf, name);
220 cnp->cn_nameptr = cnp->cn_pnbuf;
221 cnp->cn_namelen = namelen;
222 cnp->cn_hash = namei_hash(name, &cp);
223
224 cnp->cn_cred = creds;
225
226 return cnp;
227 }
228
229 void
230 rump_freecn(struct componentname *cnp, int flags)
231 {
232
233 if (flags & RUMPCN_FREECRED)
234 rump_cred_destroy(cnp->cn_cred);
235
236 if ((flags & RUMPCN_HASNTBUF) == 0) {
237 if (cnp->cn_flags & SAVENAME) {
238 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
239 PNBUF_PUT(cnp->cn_pnbuf);
240 } else {
241 PNBUF_PUT(cnp->cn_pnbuf);
242 }
243 }
244 kmem_free(cnp, sizeof(*cnp));
245 }
246
247 /* hey baby, what's your namei? */
248 int
249 rump_namei(uint32_t op, uint32_t flags, const char *namep,
250 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
251 {
252 struct nameidata nd;
253 int rv;
254
255 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
256 rv = namei(&nd);
257 if (rv)
258 return rv;
259
260 if (dvpp) {
261 KASSERT(flags & LOCKPARENT);
262 *dvpp = nd.ni_dvp;
263 } else {
264 KASSERT((flags & LOCKPARENT) == 0);
265 }
266
267 if (vpp) {
268 *vpp = nd.ni_vp;
269 } else {
270 if (nd.ni_vp) {
271 if (flags & LOCKLEAF)
272 vput(nd.ni_vp);
273 else
274 vrele(nd.ni_vp);
275 }
276 }
277
278 if (cnpp) {
279 struct componentname *cnp;
280
281 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
282 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
283 *cnpp = cnp;
284 } else if (nd.ni_cnd.cn_flags & HASBUF) {
285 panic("%s: pathbuf mismatch", __func__);
286 }
287
288 return rv;
289 }
290
291 static struct fakeblk *
292 _rump_fakeblk_find(const char *path)
293 {
294 char buf[MAXPATHLEN];
295 struct fakeblk *fblk;
296 int error;
297
298 if (rumpuser_realpath(path, buf, &error) == NULL)
299 return NULL;
300
301 LIST_FOREACH(fblk, &fakeblks, entries)
302 if (strcmp(fblk->path, buf) == 0)
303 return fblk;
304
305 return NULL;
306 }
307
308 int
309 rump_fakeblk_register(const char *path)
310 {
311 char buf[MAXPATHLEN];
312 struct fakeblk *fblk;
313 int error;
314
315 if (_rump_fakeblk_find(path))
316 return EEXIST;
317
318 if (rumpuser_realpath(path, buf, &error) == NULL)
319 return error;
320
321 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
322 if (fblk == NULL)
323 return ENOMEM;
324
325 strlcpy(fblk->path, buf, MAXPATHLEN);
326 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
327
328 return 0;
329 }
330
331 int
332 rump_fakeblk_find(const char *path)
333 {
334
335 return _rump_fakeblk_find(path) != NULL;
336 }
337
338 void
339 rump_fakeblk_deregister(const char *path)
340 {
341 struct fakeblk *fblk;
342
343 fblk = _rump_fakeblk_find(path);
344 if (fblk == NULL)
345 return;
346
347 LIST_REMOVE(fblk, entries);
348 kmem_free(fblk, sizeof(*fblk));
349 }
350
351 void
352 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
353 {
354
355 *vtype = vp->v_type;
356 *vsize = vp->v_size;
357 if (vp->v_specnode)
358 *vdev = vp->v_rdev;
359 else
360 *vdev = 0;
361 }
362
363 struct vfsops *
364 rump_vfslist_iterate(struct vfsops *ops)
365 {
366
367 if (ops == NULL)
368 return LIST_FIRST(&vfs_list);
369 else
370 return LIST_NEXT(ops, vfs_list);
371 }
372
373 struct vfsops *
374 rump_vfs_getopsbyname(const char *name)
375 {
376
377 return vfs_getopsbyname(name);
378 }
379
380 struct vattr*
381 rump_vattr_init()
382 {
383 struct vattr *vap;
384
385 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
386 vattr_null(vap);
387
388 return vap;
389 }
390
391 void
392 rump_vattr_settype(struct vattr *vap, enum vtype vt)
393 {
394
395 vap->va_type = vt;
396 }
397
398 void
399 rump_vattr_setmode(struct vattr *vap, mode_t mode)
400 {
401
402 vap->va_mode = mode;
403 }
404
405 void
406 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
407 {
408
409 vap->va_rdev = dev;
410 }
411
412 void
413 rump_vattr_free(struct vattr *vap)
414 {
415
416 kmem_free(vap, sizeof(*vap));
417 }
418
419 void
420 rump_vp_incref(struct vnode *vp)
421 {
422
423 mutex_enter(&vp->v_interlock);
424 ++vp->v_usecount;
425 mutex_exit(&vp->v_interlock);
426 }
427
428 int
429 rump_vp_getref(struct vnode *vp)
430 {
431
432 return vp->v_usecount;
433 }
434
435 void
436 rump_vp_decref(struct vnode *vp)
437 {
438
439 mutex_enter(&vp->v_interlock);
440 --vp->v_usecount;
441 mutex_exit(&vp->v_interlock);
442 }
443
444 /*
445 * Really really recycle with a cherry on top. We should be
446 * extra-sure we can do this. For example with p2k there is
447 * no problem, since puffs in the kernel takes care of refcounting
448 * for us.
449 */
450 void
451 rump_vp_recycle_nokidding(struct vnode *vp)
452 {
453
454 mutex_enter(&vp->v_interlock);
455 vp->v_usecount = 1;
456 vclean(vp, DOCLOSE);
457 vrelel(vp, 0);
458 }
459
460 void
461 rump_vp_rele(struct vnode *vp)
462 {
463
464 vrele(vp);
465 }
466
467 struct uio *
468 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
469 {
470 struct uio *uio;
471 enum uio_rw uiorw;
472
473 switch (rw) {
474 case RUMPUIO_READ:
475 uiorw = UIO_READ;
476 break;
477 case RUMPUIO_WRITE:
478 uiorw = UIO_WRITE;
479 break;
480 default:
481 panic("%s: invalid rw %d", __func__, rw);
482 }
483
484 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
485 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
486
487 uio->uio_iov->iov_base = buf;
488 uio->uio_iov->iov_len = bufsize;
489
490 uio->uio_iovcnt = 1;
491 uio->uio_offset = offset;
492 uio->uio_resid = bufsize;
493 uio->uio_rw = uiorw;
494 uio->uio_vmspace = UIO_VMSPACE_SYS;
495
496 return uio;
497 }
498
499 size_t
500 rump_uio_getresid(struct uio *uio)
501 {
502
503 return uio->uio_resid;
504 }
505
506 off_t
507 rump_uio_getoff(struct uio *uio)
508 {
509
510 return uio->uio_offset;
511 }
512
513 size_t
514 rump_uio_free(struct uio *uio)
515 {
516 size_t resid;
517
518 resid = uio->uio_resid;
519 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
520 kmem_free(uio, sizeof(*uio));
521
522 return resid;
523 }
524
525 void
526 rump_vp_lock_exclusive(struct vnode *vp)
527 {
528
529 /* we can skip vn_lock() */
530 VOP_LOCK(vp, LK_EXCLUSIVE);
531 }
532
533 void
534 rump_vp_lock_shared(struct vnode *vp)
535 {
536
537 VOP_LOCK(vp, LK_SHARED);
538 }
539
540 void
541 rump_vp_unlock(struct vnode *vp)
542 {
543
544 VOP_UNLOCK(vp, 0);
545 }
546
547 int
548 rump_vp_islocked(struct vnode *vp)
549 {
550
551 return VOP_ISLOCKED(vp);
552 }
553
554 void
555 rump_vp_interlock(struct vnode *vp)
556 {
557
558 mutex_enter(&vp->v_interlock);
559 }
560
561 int
562 rump_vfs_unmount(struct mount *mp, int mntflags)
563 {
564
565 return VFS_UNMOUNT(mp, mntflags);
566 }
567
568 int
569 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
570 {
571 int rv;
572
573 rv = VFS_ROOT(mp, vpp);
574 if (rv)
575 return rv;
576
577 if (!lock)
578 VOP_UNLOCK(*vpp, 0);
579
580 return 0;
581 }
582
583 int
584 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
585 {
586
587 return VFS_STATVFS(mp, sbp);
588 }
589
590 int
591 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
592 {
593
594 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
595 }
596
597 int
598 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
599 {
600
601 return VFS_FHTOVP(mp, fid, vpp);
602 }
603
604 int
605 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
606 {
607
608 return VFS_VPTOFH(vp, fid, fidsize);
609 }
610
611 /*ARGSUSED*/
612 void
613 rump_vfs_syncwait(struct mount *mp)
614 {
615 int n;
616
617 n = buf_syncwait();
618 if (n)
619 printf("syncwait: unsynced buffers: %d\n", n);
620 }
621
622 int
623 rump_vfs_load(struct modinfo **mi)
624 {
625
626 if (!module_compatible((*mi)->mi_version, __NetBSD_Version__))
627 return EPROGMISMATCH;
628
629 return (*mi)->mi_modcmd(MODULE_CMD_INIT, NULL);
630 }
631
632 void
633 rump_bioops_sync()
634 {
635
636 if (bioopsp)
637 bioopsp->io_sync(NULL);
638 }
639
640 struct lwp *
641 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
642 {
643 struct lwp *l;
644 struct proc *p;
645
646 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
647 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
648 p->p_cwdi = cwdinit();
649
650 p->p_stats = &rump_stats;
651 p->p_limit = &rump_limits;
652 p->p_pid = pid;
653 p->p_vmspace = &rump_vmspace;
654 l->l_cred = rump_cred;
655 l->l_proc = p;
656 l->l_lid = lid;
657
658 p->p_fd = fd_init(NULL);
659 l->l_fd = p->p_fd;
660
661 if (set)
662 rumpuser_set_curlwp(l);
663
664 return l;
665 }
666
667 void
668 rump_clear_curlwp()
669 {
670 struct lwp *l;
671
672 l = rumpuser_get_curlwp();
673 fd_free();
674 cwdfree(l->l_proc->p_cwdi);
675 kmem_free(l->l_proc, sizeof(*l->l_proc));
676 kmem_free(l, sizeof(*l));
677 rumpuser_set_curlwp(NULL);
678 }
679
680 struct lwp *
681 rump_get_curlwp()
682 {
683 struct lwp *l;
684
685 l = rumpuser_get_curlwp();
686 if (l == NULL)
687 l = &lwp0;
688
689 return l;
690 }
691
692 int
693 rump_splfoo()
694 {
695
696 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
697 rumpuser_rw_enter(&rumpspl, 0);
698 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
699 }
700
701 return 0;
702 }
703
704 static void
705 rump_intr_enter(void)
706 {
707
708 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
709 rumpuser_rw_enter(&rumpspl, 1);
710 }
711
712 static void
713 rump_intr_exit(void)
714 {
715
716 rumpuser_rw_exit(&rumpspl);
717 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
718 }
719
720 void
721 rump_splx(int dummy)
722 {
723
724 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
725 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
726 rumpuser_rw_exit(&rumpspl);
727 }
728 }
729
730 void
731 rump_biodone(void *arg, size_t count, int error)
732 {
733 struct buf *bp = arg;
734
735 bp->b_resid = bp->b_bcount - count;
736 KASSERT(bp->b_resid >= 0);
737 bp->b_error = error;
738
739 rump_intr_enter();
740 biodone(bp);
741 rump_intr_exit();
742 }
743