rump.c revision 1.40 1 /* $NetBSD: rump.c,v 1.40 2008/03/21 21:55:01 ad Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/filedesc.h>
33 #include <sys/kauth.h>
34 #include <sys/kmem.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/queue.h>
38 #include <sys/resourcevar.h>
39 #include <sys/select.h>
40 #include <sys/vnode.h>
41 #include <sys/vfs_syscalls.h>
42
43 #include <miscfs/specfs/specdev.h>
44
45 #include "rump_private.h"
46 #include "rumpuser.h"
47
48 struct proc proc0;
49 struct cwdinfo rump_cwdi;
50 struct pstats rump_stats;
51 struct plimit rump_limits;
52 kauth_cred_t rump_cred = RUMPCRED_SUSER;
53 struct cpu_info rump_cpu;
54 struct filedesc rump_filedesc0;
55 struct proclist allproc;
56
57 kmutex_t rump_giantlock;
58
59 sigset_t sigcantmask;
60
61 struct fakeblk {
62 char path[MAXPATHLEN];
63 LIST_ENTRY(fakeblk) entries;
64 };
65
66 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
67
68 #ifndef RUMP_WITHOUT_THREADS
69 static void
70 rump_aiodone_worker(struct work *wk, void *dummy)
71 {
72 struct buf *bp = (struct buf *)wk;
73
74 KASSERT(&bp->b_work == wk);
75 bp->b_iodone(bp);
76 }
77 #endif /* RUMP_WITHOUT_THREADS */
78
79 int rump_inited;
80
81 void
82 rump_init()
83 {
84 extern char hostname[];
85 extern size_t hostnamelen;
86 extern kmutex_t rump_atomic_lock;
87 char buf[256];
88 struct proc *p;
89 struct lwp *l;
90 int error;
91
92 /* XXX */
93 if (rump_inited)
94 return;
95 rump_inited = 1;
96
97 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
98 desiredvnodes = strtoul(buf, NULL, 10);
99 } else {
100 desiredvnodes = 1<<16;
101 }
102
103 rw_init(&rump_cwdi.cwdi_lock);
104 l = &lwp0;
105 p = &proc0;
106 p->p_stats = &rump_stats;
107 p->p_cwdi = &rump_cwdi;
108 p->p_limit = &rump_limits;
109 p->p_pid = 0;
110 p->p_fd = &rump_filedesc0;
111 p->p_vmspace = &rump_vmspace;
112 l->l_cred = rump_cred;
113 l->l_proc = p;
114 l->l_lid = 1;
115
116 LIST_INSERT_HEAD(&allproc, p, p_list);
117
118 mutex_init(&rump_atomic_lock, MUTEX_DEFAULT, IPL_NONE);
119 rumpvm_init();
120
121 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
122 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
123
124 syncdelay = 0;
125 dovfsusermount = 1;
126
127 vfsinit();
128 bufinit();
129 fd_sys_init();
130 selsysinit();
131
132 rumpvfs_init();
133
134 rump_sleepers_init();
135 rumpuser_thrinit();
136
137 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
138
139 #ifndef RUMP_WITHOUT_THREADS
140 /* aieeeedondest */
141 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
142 rump_aiodone_worker, NULL, 0, 0, 0))
143 panic("aiodoned");
144 #endif /* RUMP_WITHOUT_THREADS */
145
146 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
147 hostnamelen = strlen(hostname);
148
149 sigemptyset(&sigcantmask);
150
151 fd_init(&rump_filedesc0);
152 rump_cwdi.cwdi_cdir = rootvnode;
153 }
154
155 struct mount *
156 rump_mnt_init(struct vfsops *vfsops, int mntflags)
157 {
158 struct mount *mp;
159
160 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
161
162 mp->mnt_op = vfsops;
163 mp->mnt_flag = mntflags;
164 TAILQ_INIT(&mp->mnt_vnodelist);
165 rw_init(&mp->mnt_lock);
166 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
167 mp->mnt_refcnt = 1;
168
169 mount_initspecific(mp);
170
171 return mp;
172 }
173
174 int
175 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
176 {
177 int rv;
178
179 rv = VFS_MOUNT(mp, path, data, dlen);
180 if (rv)
181 return rv;
182
183 (void) VFS_STATVFS(mp, &mp->mnt_stat);
184 rv = VFS_START(mp, 0);
185 if (rv)
186 VFS_UNMOUNT(mp, MNT_FORCE);
187
188 return rv;
189 }
190
191 void
192 rump_mnt_destroy(struct mount *mp)
193 {
194
195 mount_finispecific(mp);
196 kmem_free(mp, sizeof(*mp));
197 }
198
199 struct componentname *
200 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
201 kauth_cred_t creds, struct lwp *l)
202 {
203 struct componentname *cnp;
204 const char *cp = NULL;
205
206 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
207
208 cnp->cn_nameiop = nameiop;
209 cnp->cn_flags = flags;
210
211 cnp->cn_pnbuf = PNBUF_GET();
212 strcpy(cnp->cn_pnbuf, name);
213 cnp->cn_nameptr = cnp->cn_pnbuf;
214 cnp->cn_namelen = namelen;
215 cnp->cn_hash = namei_hash(name, &cp);
216
217 cnp->cn_cred = creds;
218
219 return cnp;
220 }
221
222 void
223 rump_freecn(struct componentname *cnp, int flags)
224 {
225
226 if (flags & RUMPCN_FREECRED)
227 rump_cred_destroy(cnp->cn_cred);
228
229 if ((flags & RUMPCN_HASNTBUF) == 0) {
230 if (cnp->cn_flags & SAVENAME) {
231 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
232 PNBUF_PUT(cnp->cn_pnbuf);
233 } else {
234 PNBUF_PUT(cnp->cn_pnbuf);
235 }
236 }
237 kmem_free(cnp, sizeof(*cnp));
238 }
239
240 /* hey baby, what's your namei? */
241 int
242 rump_namei(uint32_t op, uint32_t flags, const char *namep,
243 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
244 {
245 struct nameidata nd;
246 int rv;
247
248 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
249 rv = namei(&nd);
250 if (rv)
251 return rv;
252
253 if (dvpp) {
254 KASSERT(flags & LOCKPARENT);
255 *dvpp = nd.ni_dvp;
256 } else {
257 KASSERT((flags & LOCKPARENT) == 0);
258 }
259
260 if (vpp) {
261 *vpp = nd.ni_vp;
262 } else {
263 if (nd.ni_vp) {
264 if (flags & LOCKLEAF)
265 vput(nd.ni_vp);
266 else
267 vrele(nd.ni_vp);
268 }
269 }
270
271 if (cnpp) {
272 struct componentname *cnp;
273
274 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
275 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
276 *cnpp = cnp;
277 } else if (nd.ni_cnd.cn_flags & HASBUF) {
278 panic("%s: pathbuf mismatch", __func__);
279 }
280
281 return rv;
282 }
283
284 static struct fakeblk *
285 _rump_fakeblk_find(const char *path)
286 {
287 char buf[MAXPATHLEN];
288 struct fakeblk *fblk;
289 int error;
290
291 if (rumpuser_realpath(path, buf, &error) == NULL)
292 return NULL;
293
294 LIST_FOREACH(fblk, &fakeblks, entries)
295 if (strcmp(fblk->path, buf) == 0)
296 return fblk;
297
298 return NULL;
299 }
300
301 int
302 rump_fakeblk_register(const char *path)
303 {
304 char buf[MAXPATHLEN];
305 struct fakeblk *fblk;
306 int error;
307
308 if (_rump_fakeblk_find(path))
309 return EEXIST;
310
311 if (rumpuser_realpath(path, buf, &error) == NULL)
312 return error;
313
314 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
315 if (fblk == NULL)
316 return ENOMEM;
317
318 strlcpy(fblk->path, buf, MAXPATHLEN);
319 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
320
321 return 0;
322 }
323
324 int
325 rump_fakeblk_find(const char *path)
326 {
327
328 return _rump_fakeblk_find(path) != NULL;
329 }
330
331 void
332 rump_fakeblk_deregister(const char *path)
333 {
334 struct fakeblk *fblk;
335
336 fblk = _rump_fakeblk_find(path);
337 if (fblk == NULL)
338 return;
339
340 LIST_REMOVE(fblk, entries);
341 kmem_free(fblk, sizeof(*fblk));
342 }
343
344 void
345 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
346 {
347
348 *vtype = vp->v_type;
349 *vsize = vp->v_size;
350 if (vp->v_specnode)
351 *vdev = vp->v_rdev;
352 else
353 *vdev = 0;
354 }
355
356 struct vfsops *
357 rump_vfslist_iterate(struct vfsops *ops)
358 {
359
360 if (ops == NULL)
361 return LIST_FIRST(&vfs_list);
362 else
363 return LIST_NEXT(ops, vfs_list);
364 }
365
366 struct vfsops *
367 rump_vfs_getopsbyname(const char *name)
368 {
369
370 return vfs_getopsbyname(name);
371 }
372
373 struct vattr*
374 rump_vattr_init()
375 {
376 struct vattr *vap;
377
378 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
379 vattr_null(vap);
380
381 return vap;
382 }
383
384 void
385 rump_vattr_settype(struct vattr *vap, enum vtype vt)
386 {
387
388 vap->va_type = vt;
389 }
390
391 void
392 rump_vattr_setmode(struct vattr *vap, mode_t mode)
393 {
394
395 vap->va_mode = mode;
396 }
397
398 void
399 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
400 {
401
402 vap->va_rdev = dev;
403 }
404
405 void
406 rump_vattr_free(struct vattr *vap)
407 {
408
409 kmem_free(vap, sizeof(*vap));
410 }
411
412 void
413 rump_vp_incref(struct vnode *vp)
414 {
415
416 mutex_enter(&vp->v_interlock);
417 ++vp->v_usecount;
418 mutex_exit(&vp->v_interlock);
419 }
420
421 int
422 rump_vp_getref(struct vnode *vp)
423 {
424
425 return vp->v_usecount;
426 }
427
428 void
429 rump_vp_decref(struct vnode *vp)
430 {
431
432 mutex_enter(&vp->v_interlock);
433 --vp->v_usecount;
434 mutex_exit(&vp->v_interlock);
435 }
436
437 /*
438 * Really really recycle with a cherry on top. We should be
439 * extra-sure we can do this. For example with p2k there is
440 * no problem, since puffs in the kernel takes care of refcounting
441 * for us.
442 */
443 void
444 rump_vp_recycle_nokidding(struct vnode *vp)
445 {
446
447 mutex_enter(&vp->v_interlock);
448 vp->v_usecount = 1;
449 vclean(vp, DOCLOSE);
450 vrelel(vp, 0);
451 }
452
453 void
454 rump_vp_rele(struct vnode *vp)
455 {
456
457 vrele(vp);
458 }
459
460 struct uio *
461 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
462 {
463 struct uio *uio;
464 enum uio_rw uiorw;
465
466 switch (rw) {
467 case RUMPUIO_READ:
468 uiorw = UIO_READ;
469 break;
470 case RUMPUIO_WRITE:
471 uiorw = UIO_WRITE;
472 break;
473 default:
474 panic("%s: invalid rw %d", __func__, rw);
475 }
476
477 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
478 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
479
480 uio->uio_iov->iov_base = buf;
481 uio->uio_iov->iov_len = bufsize;
482
483 uio->uio_iovcnt = 1;
484 uio->uio_offset = offset;
485 uio->uio_resid = bufsize;
486 uio->uio_rw = uiorw;
487 uio->uio_vmspace = UIO_VMSPACE_SYS;
488
489 return uio;
490 }
491
492 size_t
493 rump_uio_getresid(struct uio *uio)
494 {
495
496 return uio->uio_resid;
497 }
498
499 off_t
500 rump_uio_getoff(struct uio *uio)
501 {
502
503 return uio->uio_offset;
504 }
505
506 size_t
507 rump_uio_free(struct uio *uio)
508 {
509 size_t resid;
510
511 resid = uio->uio_resid;
512 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
513 kmem_free(uio, sizeof(*uio));
514
515 return resid;
516 }
517
518 void
519 rump_vp_lock_exclusive(struct vnode *vp)
520 {
521
522 /* we can skip vn_lock() */
523 VOP_LOCK(vp, LK_EXCLUSIVE);
524 }
525
526 void
527 rump_vp_lock_shared(struct vnode *vp)
528 {
529
530 VOP_LOCK(vp, LK_SHARED);
531 }
532
533 void
534 rump_vp_unlock(struct vnode *vp)
535 {
536
537 VOP_UNLOCK(vp, 0);
538 }
539
540 int
541 rump_vp_islocked(struct vnode *vp)
542 {
543
544 return VOP_ISLOCKED(vp);
545 }
546
547 void
548 rump_vp_interlock(struct vnode *vp)
549 {
550
551 mutex_enter(&vp->v_interlock);
552 }
553
554 int
555 rump_vfs_unmount(struct mount *mp, int mntflags)
556 {
557
558 return VFS_UNMOUNT(mp, mntflags);
559 }
560
561 int
562 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
563 {
564 int rv;
565
566 rv = VFS_ROOT(mp, vpp);
567 if (rv)
568 return rv;
569
570 if (!lock)
571 VOP_UNLOCK(*vpp, 0);
572
573 return 0;
574 }
575
576 int
577 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
578 {
579
580 return VFS_STATVFS(mp, sbp);
581 }
582
583 int
584 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
585 {
586
587 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
588 }
589
590 int
591 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
592 {
593
594 return VFS_FHTOVP(mp, fid, vpp);
595 }
596
597 int
598 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
599 {
600
601 return VFS_VPTOFH(vp, fid, fidsize);
602 }
603
604 /*ARGSUSED*/
605 void
606 rump_vfs_syncwait(struct mount *mp)
607 {
608 int n;
609
610 n = buf_syncwait();
611 if (n)
612 printf("syncwait: unsynced buffers: %d\n", n);
613 }
614
615 void
616 rump_bioops_sync()
617 {
618
619 if (bioopsp)
620 bioopsp->io_sync(NULL);
621 }
622
623 struct lwp *
624 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
625 {
626 struct lwp *l;
627 struct proc *p;
628
629 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
630 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
631 p->p_cwdi = cwdinit();
632
633 p->p_stats = &rump_stats;
634 p->p_limit = &rump_limits;
635 p->p_pid = pid;
636 p->p_vmspace = &rump_vmspace;
637 l->l_cred = rump_cred;
638 l->l_proc = p;
639 l->l_lid = lid;
640
641 p->p_fd = fd_init(&rump_filedesc0);
642 l->l_fd = p->p_fd;
643
644 if (set)
645 rumpuser_set_curlwp(l);
646
647 return l;
648 }
649
650 void
651 rump_clear_curlwp()
652 {
653 struct lwp *l;
654
655 l = rumpuser_get_curlwp();
656 fd_free();
657 cwdfree(l->l_proc->p_cwdi);
658 kmem_free(l->l_proc, sizeof(*l->l_proc));
659 kmem_free(l, sizeof(*l));
660 rumpuser_set_curlwp(NULL);
661 }
662
663 struct lwp *
664 rump_get_curlwp()
665 {
666 struct lwp *l;
667
668 l = rumpuser_get_curlwp();
669 if (l == NULL)
670 l = &lwp0;
671
672 return l;
673 }
674
675 int
676 rump_splfoo()
677 {
678
679 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
680 rumpuser_rw_enter(&rumpspl, 0);
681 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
682 }
683
684 return 0;
685 }
686
687 static void
688 rump_intr_enter(void)
689 {
690
691 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
692 rumpuser_rw_enter(&rumpspl, 1);
693 }
694
695 static void
696 rump_intr_exit(void)
697 {
698
699 rumpuser_rw_exit(&rumpspl);
700 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
701 }
702
703 void
704 rump_splx(int dummy)
705 {
706
707 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
708 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
709 rumpuser_rw_exit(&rumpspl);
710 }
711 }
712
713 void
714 rump_biodone(void *arg, size_t count, int error)
715 {
716 struct buf *bp = arg;
717
718 bp->b_resid = bp->b_bcount - count;
719 KASSERT(bp->b_resid >= 0);
720 bp->b_error = error;
721
722 rump_intr_enter();
723 biodone(bp);
724 rump_intr_exit();
725 }
726