rump.c revision 1.48 1 /* $NetBSD: rump.c,v 1.48 2008/07/29 13:17:47 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/filedesc.h>
33 #include <sys/kauth.h>
34 #include <sys/kmem.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/queue.h>
38 #include <sys/resourcevar.h>
39 #include <sys/select.h>
40 #include <sys/vnode.h>
41 #include <sys/vfs_syscalls.h>
42 #include <sys/module.h>
43
44 #include <miscfs/specfs/specdev.h>
45
46 #include <rump/rumpuser.h>
47
48 #include "rump_private.h"
49
50 struct proc proc0;
51 struct cwdinfo rump_cwdi;
52 struct pstats rump_stats;
53 struct plimit rump_limits;
54 kauth_cred_t rump_cred = RUMPCRED_SUSER;
55 struct cpu_info rump_cpu;
56 struct filedesc rump_filedesc0;
57 struct proclist allproc;
58 char machine[] = "rump";
59
60 kmutex_t rump_giantlock;
61
62 sigset_t sigcantmask;
63
64 struct fakeblk {
65 char path[MAXPATHLEN];
66 LIST_ENTRY(fakeblk) entries;
67 };
68
69 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
70
71 #ifndef RUMP_WITHOUT_THREADS
72 static void
73 rump_aiodone_worker(struct work *wk, void *dummy)
74 {
75 struct buf *bp = (struct buf *)wk;
76
77 KASSERT(&bp->b_work == wk);
78 bp->b_iodone(bp);
79 }
80 #endif /* RUMP_WITHOUT_THREADS */
81
82 int rump_inited;
83
84 void
85 rump_init()
86 {
87 extern char hostname[];
88 extern size_t hostnamelen;
89 extern kmutex_t rump_atomic_lock;
90 char buf[256];
91 struct proc *p;
92 struct lwp *l;
93 int error;
94
95 /* XXX */
96 if (rump_inited)
97 return;
98 rump_inited = 1;
99
100 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
101 desiredvnodes = strtoul(buf, NULL, 10);
102 } else {
103 desiredvnodes = 1<<16;
104 }
105
106 cache_cpu_init(&rump_cpu);
107 rw_init(&rump_cwdi.cwdi_lock);
108 l = &lwp0;
109 p = &proc0;
110 p->p_stats = &rump_stats;
111 p->p_cwdi = &rump_cwdi;
112 p->p_limit = &rump_limits;
113 p->p_pid = 0;
114 p->p_fd = &rump_filedesc0;
115 p->p_vmspace = &rump_vmspace;
116 l->l_cred = rump_cred;
117 l->l_proc = p;
118 l->l_lid = 1;
119
120 LIST_INSERT_HEAD(&allproc, p, p_list);
121
122 mutex_init(&rump_atomic_lock, MUTEX_DEFAULT, IPL_NONE);
123 rumpvm_init();
124
125 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
126 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
127
128 syncdelay = 0;
129 dovfsusermount = 1;
130
131 rump_sleepers_init();
132 rumpuser_thrinit();
133
134 fd_sys_init();
135 module_init();
136 vfsinit();
137 bufinit();
138
139 rumpvfs_init();
140
141 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
142
143 #ifndef RUMP_WITHOUT_THREADS
144 /* aieeeedondest */
145 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
146 rump_aiodone_worker, NULL, 0, 0, 0))
147 panic("aiodoned");
148 #endif /* RUMP_WITHOUT_THREADS */
149
150 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
151 hostnamelen = strlen(hostname);
152
153 sigemptyset(&sigcantmask);
154
155 fd_init(&rump_filedesc0);
156 rump_cwdi.cwdi_cdir = rootvnode;
157 }
158
159 struct mount *
160 rump_mnt_init(struct vfsops *vfsops, int mntflags)
161 {
162 struct mount *mp;
163
164 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
165
166 mp->mnt_op = vfsops;
167 mp->mnt_flag = mntflags;
168 TAILQ_INIT(&mp->mnt_vnodelist);
169 rw_init(&mp->mnt_unmounting);
170 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
171 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
172 mp->mnt_refcnt = 1;
173
174 mount_initspecific(mp);
175
176 return mp;
177 }
178
179 int
180 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
181 {
182 int rv;
183
184 rv = VFS_MOUNT(mp, path, data, dlen);
185 if (rv)
186 return rv;
187
188 (void) VFS_STATVFS(mp, &mp->mnt_stat);
189 rv = VFS_START(mp, 0);
190 if (rv)
191 VFS_UNMOUNT(mp, MNT_FORCE);
192
193 return rv;
194 }
195
196 void
197 rump_mnt_destroy(struct mount *mp)
198 {
199
200 mount_finispecific(mp);
201 kmem_free(mp, sizeof(*mp));
202 }
203
204 struct componentname *
205 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
206 kauth_cred_t creds, struct lwp *l)
207 {
208 struct componentname *cnp;
209 const char *cp = NULL;
210
211 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
212
213 cnp->cn_nameiop = nameiop;
214 cnp->cn_flags = flags;
215
216 cnp->cn_pnbuf = PNBUF_GET();
217 strcpy(cnp->cn_pnbuf, name);
218 cnp->cn_nameptr = cnp->cn_pnbuf;
219 cnp->cn_namelen = namelen;
220 cnp->cn_hash = namei_hash(name, &cp);
221
222 cnp->cn_cred = creds;
223
224 return cnp;
225 }
226
227 void
228 rump_freecn(struct componentname *cnp, int flags)
229 {
230
231 if (flags & RUMPCN_FREECRED)
232 rump_cred_destroy(cnp->cn_cred);
233
234 if ((flags & RUMPCN_HASNTBUF) == 0) {
235 if (cnp->cn_flags & SAVENAME) {
236 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
237 PNBUF_PUT(cnp->cn_pnbuf);
238 } else {
239 PNBUF_PUT(cnp->cn_pnbuf);
240 }
241 }
242 kmem_free(cnp, sizeof(*cnp));
243 }
244
245 /* hey baby, what's your namei? */
246 int
247 rump_namei(uint32_t op, uint32_t flags, const char *namep,
248 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
249 {
250 struct nameidata nd;
251 int rv;
252
253 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
254 rv = namei(&nd);
255 if (rv)
256 return rv;
257
258 if (dvpp) {
259 KASSERT(flags & LOCKPARENT);
260 *dvpp = nd.ni_dvp;
261 } else {
262 KASSERT((flags & LOCKPARENT) == 0);
263 }
264
265 if (vpp) {
266 *vpp = nd.ni_vp;
267 } else {
268 if (nd.ni_vp) {
269 if (flags & LOCKLEAF)
270 vput(nd.ni_vp);
271 else
272 vrele(nd.ni_vp);
273 }
274 }
275
276 if (cnpp) {
277 struct componentname *cnp;
278
279 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
280 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
281 *cnpp = cnp;
282 } else if (nd.ni_cnd.cn_flags & HASBUF) {
283 panic("%s: pathbuf mismatch", __func__);
284 }
285
286 return rv;
287 }
288
289 static struct fakeblk *
290 _rump_fakeblk_find(const char *path)
291 {
292 char buf[MAXPATHLEN];
293 struct fakeblk *fblk;
294 int error;
295
296 if (rumpuser_realpath(path, buf, &error) == NULL)
297 return NULL;
298
299 LIST_FOREACH(fblk, &fakeblks, entries)
300 if (strcmp(fblk->path, buf) == 0)
301 return fblk;
302
303 return NULL;
304 }
305
306 int
307 rump_fakeblk_register(const char *path)
308 {
309 char buf[MAXPATHLEN];
310 struct fakeblk *fblk;
311 int error;
312
313 if (_rump_fakeblk_find(path))
314 return EEXIST;
315
316 if (rumpuser_realpath(path, buf, &error) == NULL)
317 return error;
318
319 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
320 if (fblk == NULL)
321 return ENOMEM;
322
323 strlcpy(fblk->path, buf, MAXPATHLEN);
324 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
325
326 return 0;
327 }
328
329 int
330 rump_fakeblk_find(const char *path)
331 {
332
333 return _rump_fakeblk_find(path) != NULL;
334 }
335
336 void
337 rump_fakeblk_deregister(const char *path)
338 {
339 struct fakeblk *fblk;
340
341 fblk = _rump_fakeblk_find(path);
342 if (fblk == NULL)
343 return;
344
345 LIST_REMOVE(fblk, entries);
346 kmem_free(fblk, sizeof(*fblk));
347 }
348
349 void
350 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
351 {
352
353 *vtype = vp->v_type;
354 *vsize = vp->v_size;
355 if (vp->v_specnode)
356 *vdev = vp->v_rdev;
357 else
358 *vdev = 0;
359 }
360
361 struct vfsops *
362 rump_vfslist_iterate(struct vfsops *ops)
363 {
364
365 if (ops == NULL)
366 return LIST_FIRST(&vfs_list);
367 else
368 return LIST_NEXT(ops, vfs_list);
369 }
370
371 struct vfsops *
372 rump_vfs_getopsbyname(const char *name)
373 {
374
375 return vfs_getopsbyname(name);
376 }
377
378 struct vattr*
379 rump_vattr_init()
380 {
381 struct vattr *vap;
382
383 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
384 vattr_null(vap);
385
386 return vap;
387 }
388
389 void
390 rump_vattr_settype(struct vattr *vap, enum vtype vt)
391 {
392
393 vap->va_type = vt;
394 }
395
396 void
397 rump_vattr_setmode(struct vattr *vap, mode_t mode)
398 {
399
400 vap->va_mode = mode;
401 }
402
403 void
404 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
405 {
406
407 vap->va_rdev = dev;
408 }
409
410 void
411 rump_vattr_free(struct vattr *vap)
412 {
413
414 kmem_free(vap, sizeof(*vap));
415 }
416
417 void
418 rump_vp_incref(struct vnode *vp)
419 {
420
421 mutex_enter(&vp->v_interlock);
422 ++vp->v_usecount;
423 mutex_exit(&vp->v_interlock);
424 }
425
426 int
427 rump_vp_getref(struct vnode *vp)
428 {
429
430 return vp->v_usecount;
431 }
432
433 void
434 rump_vp_decref(struct vnode *vp)
435 {
436
437 mutex_enter(&vp->v_interlock);
438 --vp->v_usecount;
439 mutex_exit(&vp->v_interlock);
440 }
441
442 /*
443 * Really really recycle with a cherry on top. We should be
444 * extra-sure we can do this. For example with p2k there is
445 * no problem, since puffs in the kernel takes care of refcounting
446 * for us.
447 */
448 void
449 rump_vp_recycle_nokidding(struct vnode *vp)
450 {
451
452 mutex_enter(&vp->v_interlock);
453 vp->v_usecount = 1;
454 vclean(vp, DOCLOSE);
455 vrelel(vp, 0);
456 }
457
458 void
459 rump_vp_rele(struct vnode *vp)
460 {
461
462 vrele(vp);
463 }
464
465 struct uio *
466 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
467 {
468 struct uio *uio;
469 enum uio_rw uiorw;
470
471 switch (rw) {
472 case RUMPUIO_READ:
473 uiorw = UIO_READ;
474 break;
475 case RUMPUIO_WRITE:
476 uiorw = UIO_WRITE;
477 break;
478 default:
479 panic("%s: invalid rw %d", __func__, rw);
480 }
481
482 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
483 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
484
485 uio->uio_iov->iov_base = buf;
486 uio->uio_iov->iov_len = bufsize;
487
488 uio->uio_iovcnt = 1;
489 uio->uio_offset = offset;
490 uio->uio_resid = bufsize;
491 uio->uio_rw = uiorw;
492 uio->uio_vmspace = UIO_VMSPACE_SYS;
493
494 return uio;
495 }
496
497 size_t
498 rump_uio_getresid(struct uio *uio)
499 {
500
501 return uio->uio_resid;
502 }
503
504 off_t
505 rump_uio_getoff(struct uio *uio)
506 {
507
508 return uio->uio_offset;
509 }
510
511 size_t
512 rump_uio_free(struct uio *uio)
513 {
514 size_t resid;
515
516 resid = uio->uio_resid;
517 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
518 kmem_free(uio, sizeof(*uio));
519
520 return resid;
521 }
522
523 void
524 rump_vp_lock_exclusive(struct vnode *vp)
525 {
526
527 /* we can skip vn_lock() */
528 VOP_LOCK(vp, LK_EXCLUSIVE);
529 }
530
531 void
532 rump_vp_lock_shared(struct vnode *vp)
533 {
534
535 VOP_LOCK(vp, LK_SHARED);
536 }
537
538 void
539 rump_vp_unlock(struct vnode *vp)
540 {
541
542 VOP_UNLOCK(vp, 0);
543 }
544
545 int
546 rump_vp_islocked(struct vnode *vp)
547 {
548
549 return VOP_ISLOCKED(vp);
550 }
551
552 void
553 rump_vp_interlock(struct vnode *vp)
554 {
555
556 mutex_enter(&vp->v_interlock);
557 }
558
559 int
560 rump_vfs_unmount(struct mount *mp, int mntflags)
561 {
562
563 return VFS_UNMOUNT(mp, mntflags);
564 }
565
566 int
567 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
568 {
569 int rv;
570
571 rv = VFS_ROOT(mp, vpp);
572 if (rv)
573 return rv;
574
575 if (!lock)
576 VOP_UNLOCK(*vpp, 0);
577
578 return 0;
579 }
580
581 int
582 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
583 {
584
585 return VFS_STATVFS(mp, sbp);
586 }
587
588 int
589 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
590 {
591
592 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
593 }
594
595 int
596 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
597 {
598
599 return VFS_FHTOVP(mp, fid, vpp);
600 }
601
602 int
603 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
604 {
605
606 return VFS_VPTOFH(vp, fid, fidsize);
607 }
608
609 /*ARGSUSED*/
610 void
611 rump_vfs_syncwait(struct mount *mp)
612 {
613 int n;
614
615 n = buf_syncwait();
616 if (n)
617 printf("syncwait: unsynced buffers: %d\n", n);
618 }
619
620 void
621 rump_bioops_sync()
622 {
623
624 if (bioopsp)
625 bioopsp->io_sync(NULL);
626 }
627
628 struct lwp *
629 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
630 {
631 struct lwp *l;
632 struct proc *p;
633
634 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
635 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
636 p->p_cwdi = cwdinit();
637
638 p->p_stats = &rump_stats;
639 p->p_limit = &rump_limits;
640 p->p_pid = pid;
641 p->p_vmspace = &rump_vmspace;
642 l->l_cred = rump_cred;
643 l->l_proc = p;
644 l->l_lid = lid;
645
646 p->p_fd = fd_init(NULL);
647 l->l_fd = p->p_fd;
648
649 if (set)
650 rumpuser_set_curlwp(l);
651
652 return l;
653 }
654
655 void
656 rump_clear_curlwp()
657 {
658 struct lwp *l;
659
660 l = rumpuser_get_curlwp();
661 fd_free();
662 cwdfree(l->l_proc->p_cwdi);
663 kmem_free(l->l_proc, sizeof(*l->l_proc));
664 kmem_free(l, sizeof(*l));
665 rumpuser_set_curlwp(NULL);
666 }
667
668 struct lwp *
669 rump_get_curlwp()
670 {
671 struct lwp *l;
672
673 l = rumpuser_get_curlwp();
674 if (l == NULL)
675 l = &lwp0;
676
677 return l;
678 }
679
680 int
681 rump_splfoo()
682 {
683
684 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
685 rumpuser_rw_enter(&rumpspl, 0);
686 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
687 }
688
689 return 0;
690 }
691
692 static void
693 rump_intr_enter(void)
694 {
695
696 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
697 rumpuser_rw_enter(&rumpspl, 1);
698 }
699
700 static void
701 rump_intr_exit(void)
702 {
703
704 rumpuser_rw_exit(&rumpspl);
705 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
706 }
707
708 void
709 rump_splx(int dummy)
710 {
711
712 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
713 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
714 rumpuser_rw_exit(&rumpspl);
715 }
716 }
717
718 void
719 rump_biodone(void *arg, size_t count, int error)
720 {
721 struct buf *bp = arg;
722
723 bp->b_resid = bp->b_bcount - count;
724 KASSERT(bp->b_resid >= 0);
725 bp->b_error = error;
726
727 rump_intr_enter();
728 biodone(bp);
729 rump_intr_exit();
730 }
731