rump.c revision 1.46 1 /* $NetBSD: rump.c,v 1.46 2008/05/20 19:02:36 ad Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/filedesc.h>
33 #include <sys/kauth.h>
34 #include <sys/kmem.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/queue.h>
38 #include <sys/resourcevar.h>
39 #include <sys/select.h>
40 #include <sys/vnode.h>
41 #include <sys/vfs_syscalls.h>
42 #include <sys/module.h>
43
44 #include <miscfs/specfs/specdev.h>
45
46 #include "rump_private.h"
47 #include "rumpuser.h"
48
49 struct proc proc0;
50 struct cwdinfo rump_cwdi;
51 struct pstats rump_stats;
52 struct plimit rump_limits;
53 kauth_cred_t rump_cred = RUMPCRED_SUSER;
54 struct cpu_info rump_cpu;
55 struct filedesc rump_filedesc0;
56 struct proclist allproc;
57 char machine[] = "rump";
58
59 kmutex_t rump_giantlock;
60
61 sigset_t sigcantmask;
62
63 struct fakeblk {
64 char path[MAXPATHLEN];
65 LIST_ENTRY(fakeblk) entries;
66 };
67
68 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
69
70 #ifndef RUMP_WITHOUT_THREADS
71 static void
72 rump_aiodone_worker(struct work *wk, void *dummy)
73 {
74 struct buf *bp = (struct buf *)wk;
75
76 KASSERT(&bp->b_work == wk);
77 bp->b_iodone(bp);
78 }
79 #endif /* RUMP_WITHOUT_THREADS */
80
81 int rump_inited;
82
83 void
84 rump_init()
85 {
86 extern char hostname[];
87 extern size_t hostnamelen;
88 extern kmutex_t rump_atomic_lock;
89 char buf[256];
90 struct proc *p;
91 struct lwp *l;
92 int error;
93
94 /* XXX */
95 if (rump_inited)
96 return;
97 rump_inited = 1;
98
99 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
100 desiredvnodes = strtoul(buf, NULL, 10);
101 } else {
102 desiredvnodes = 1<<16;
103 }
104
105 rump_cpu.ci_data.cpu_cachelock = mutex_obj_alloc(MUTEX_DEFAULT,
106 IPL_NONE);
107
108 rw_init(&rump_cwdi.cwdi_lock);
109 l = &lwp0;
110 p = &proc0;
111 p->p_stats = &rump_stats;
112 p->p_cwdi = &rump_cwdi;
113 p->p_limit = &rump_limits;
114 p->p_pid = 0;
115 p->p_fd = &rump_filedesc0;
116 p->p_vmspace = &rump_vmspace;
117 l->l_cred = rump_cred;
118 l->l_proc = p;
119 l->l_lid = 1;
120
121 LIST_INSERT_HEAD(&allproc, p, p_list);
122
123 mutex_init(&rump_atomic_lock, MUTEX_DEFAULT, IPL_NONE);
124 rumpvm_init();
125
126 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
127 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
128
129 syncdelay = 0;
130 dovfsusermount = 1;
131
132 rump_sleepers_init();
133 rumpuser_thrinit();
134
135 fd_sys_init();
136 module_init();
137 vfsinit();
138 bufinit();
139
140 rumpvfs_init();
141
142 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
143
144 #ifndef RUMP_WITHOUT_THREADS
145 /* aieeeedondest */
146 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
147 rump_aiodone_worker, NULL, 0, 0, 0))
148 panic("aiodoned");
149 #endif /* RUMP_WITHOUT_THREADS */
150
151 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
152 hostnamelen = strlen(hostname);
153
154 sigemptyset(&sigcantmask);
155
156 fd_init(&rump_filedesc0);
157 rump_cwdi.cwdi_cdir = rootvnode;
158 }
159
160 struct mount *
161 rump_mnt_init(struct vfsops *vfsops, int mntflags)
162 {
163 struct mount *mp;
164
165 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
166
167 mp->mnt_op = vfsops;
168 mp->mnt_flag = mntflags;
169 TAILQ_INIT(&mp->mnt_vnodelist);
170 rw_init(&mp->mnt_unmounting);
171 mutex_init(&mp->mnt_updating, MUTEX_DEFAULT, IPL_NONE);
172 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
173 mp->mnt_refcnt = 1;
174
175 mount_initspecific(mp);
176
177 return mp;
178 }
179
180 int
181 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
182 {
183 int rv;
184
185 rv = VFS_MOUNT(mp, path, data, dlen);
186 if (rv)
187 return rv;
188
189 (void) VFS_STATVFS(mp, &mp->mnt_stat);
190 rv = VFS_START(mp, 0);
191 if (rv)
192 VFS_UNMOUNT(mp, MNT_FORCE);
193
194 return rv;
195 }
196
197 void
198 rump_mnt_destroy(struct mount *mp)
199 {
200
201 mount_finispecific(mp);
202 kmem_free(mp, sizeof(*mp));
203 }
204
205 struct componentname *
206 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
207 kauth_cred_t creds, struct lwp *l)
208 {
209 struct componentname *cnp;
210 const char *cp = NULL;
211
212 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
213
214 cnp->cn_nameiop = nameiop;
215 cnp->cn_flags = flags;
216
217 cnp->cn_pnbuf = PNBUF_GET();
218 strcpy(cnp->cn_pnbuf, name);
219 cnp->cn_nameptr = cnp->cn_pnbuf;
220 cnp->cn_namelen = namelen;
221 cnp->cn_hash = namei_hash(name, &cp);
222
223 cnp->cn_cred = creds;
224
225 return cnp;
226 }
227
228 void
229 rump_freecn(struct componentname *cnp, int flags)
230 {
231
232 if (flags & RUMPCN_FREECRED)
233 rump_cred_destroy(cnp->cn_cred);
234
235 if ((flags & RUMPCN_HASNTBUF) == 0) {
236 if (cnp->cn_flags & SAVENAME) {
237 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
238 PNBUF_PUT(cnp->cn_pnbuf);
239 } else {
240 PNBUF_PUT(cnp->cn_pnbuf);
241 }
242 }
243 kmem_free(cnp, sizeof(*cnp));
244 }
245
246 /* hey baby, what's your namei? */
247 int
248 rump_namei(uint32_t op, uint32_t flags, const char *namep,
249 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
250 {
251 struct nameidata nd;
252 int rv;
253
254 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
255 rv = namei(&nd);
256 if (rv)
257 return rv;
258
259 if (dvpp) {
260 KASSERT(flags & LOCKPARENT);
261 *dvpp = nd.ni_dvp;
262 } else {
263 KASSERT((flags & LOCKPARENT) == 0);
264 }
265
266 if (vpp) {
267 *vpp = nd.ni_vp;
268 } else {
269 if (nd.ni_vp) {
270 if (flags & LOCKLEAF)
271 vput(nd.ni_vp);
272 else
273 vrele(nd.ni_vp);
274 }
275 }
276
277 if (cnpp) {
278 struct componentname *cnp;
279
280 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
281 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
282 *cnpp = cnp;
283 } else if (nd.ni_cnd.cn_flags & HASBUF) {
284 panic("%s: pathbuf mismatch", __func__);
285 }
286
287 return rv;
288 }
289
290 static struct fakeblk *
291 _rump_fakeblk_find(const char *path)
292 {
293 char buf[MAXPATHLEN];
294 struct fakeblk *fblk;
295 int error;
296
297 if (rumpuser_realpath(path, buf, &error) == NULL)
298 return NULL;
299
300 LIST_FOREACH(fblk, &fakeblks, entries)
301 if (strcmp(fblk->path, buf) == 0)
302 return fblk;
303
304 return NULL;
305 }
306
307 int
308 rump_fakeblk_register(const char *path)
309 {
310 char buf[MAXPATHLEN];
311 struct fakeblk *fblk;
312 int error;
313
314 if (_rump_fakeblk_find(path))
315 return EEXIST;
316
317 if (rumpuser_realpath(path, buf, &error) == NULL)
318 return error;
319
320 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
321 if (fblk == NULL)
322 return ENOMEM;
323
324 strlcpy(fblk->path, buf, MAXPATHLEN);
325 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
326
327 return 0;
328 }
329
330 int
331 rump_fakeblk_find(const char *path)
332 {
333
334 return _rump_fakeblk_find(path) != NULL;
335 }
336
337 void
338 rump_fakeblk_deregister(const char *path)
339 {
340 struct fakeblk *fblk;
341
342 fblk = _rump_fakeblk_find(path);
343 if (fblk == NULL)
344 return;
345
346 LIST_REMOVE(fblk, entries);
347 kmem_free(fblk, sizeof(*fblk));
348 }
349
350 void
351 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
352 {
353
354 *vtype = vp->v_type;
355 *vsize = vp->v_size;
356 if (vp->v_specnode)
357 *vdev = vp->v_rdev;
358 else
359 *vdev = 0;
360 }
361
362 struct vfsops *
363 rump_vfslist_iterate(struct vfsops *ops)
364 {
365
366 if (ops == NULL)
367 return LIST_FIRST(&vfs_list);
368 else
369 return LIST_NEXT(ops, vfs_list);
370 }
371
372 struct vfsops *
373 rump_vfs_getopsbyname(const char *name)
374 {
375
376 return vfs_getopsbyname(name);
377 }
378
379 struct vattr*
380 rump_vattr_init()
381 {
382 struct vattr *vap;
383
384 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
385 vattr_null(vap);
386
387 return vap;
388 }
389
390 void
391 rump_vattr_settype(struct vattr *vap, enum vtype vt)
392 {
393
394 vap->va_type = vt;
395 }
396
397 void
398 rump_vattr_setmode(struct vattr *vap, mode_t mode)
399 {
400
401 vap->va_mode = mode;
402 }
403
404 void
405 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
406 {
407
408 vap->va_rdev = dev;
409 }
410
411 void
412 rump_vattr_free(struct vattr *vap)
413 {
414
415 kmem_free(vap, sizeof(*vap));
416 }
417
418 void
419 rump_vp_incref(struct vnode *vp)
420 {
421
422 mutex_enter(&vp->v_interlock);
423 ++vp->v_usecount;
424 mutex_exit(&vp->v_interlock);
425 }
426
427 int
428 rump_vp_getref(struct vnode *vp)
429 {
430
431 return vp->v_usecount;
432 }
433
434 void
435 rump_vp_decref(struct vnode *vp)
436 {
437
438 mutex_enter(&vp->v_interlock);
439 --vp->v_usecount;
440 mutex_exit(&vp->v_interlock);
441 }
442
443 /*
444 * Really really recycle with a cherry on top. We should be
445 * extra-sure we can do this. For example with p2k there is
446 * no problem, since puffs in the kernel takes care of refcounting
447 * for us.
448 */
449 void
450 rump_vp_recycle_nokidding(struct vnode *vp)
451 {
452
453 mutex_enter(&vp->v_interlock);
454 vp->v_usecount = 1;
455 vclean(vp, DOCLOSE);
456 vrelel(vp, 0);
457 }
458
459 void
460 rump_vp_rele(struct vnode *vp)
461 {
462
463 vrele(vp);
464 }
465
466 struct uio *
467 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
468 {
469 struct uio *uio;
470 enum uio_rw uiorw;
471
472 switch (rw) {
473 case RUMPUIO_READ:
474 uiorw = UIO_READ;
475 break;
476 case RUMPUIO_WRITE:
477 uiorw = UIO_WRITE;
478 break;
479 default:
480 panic("%s: invalid rw %d", __func__, rw);
481 }
482
483 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
484 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
485
486 uio->uio_iov->iov_base = buf;
487 uio->uio_iov->iov_len = bufsize;
488
489 uio->uio_iovcnt = 1;
490 uio->uio_offset = offset;
491 uio->uio_resid = bufsize;
492 uio->uio_rw = uiorw;
493 uio->uio_vmspace = UIO_VMSPACE_SYS;
494
495 return uio;
496 }
497
498 size_t
499 rump_uio_getresid(struct uio *uio)
500 {
501
502 return uio->uio_resid;
503 }
504
505 off_t
506 rump_uio_getoff(struct uio *uio)
507 {
508
509 return uio->uio_offset;
510 }
511
512 size_t
513 rump_uio_free(struct uio *uio)
514 {
515 size_t resid;
516
517 resid = uio->uio_resid;
518 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
519 kmem_free(uio, sizeof(*uio));
520
521 return resid;
522 }
523
524 void
525 rump_vp_lock_exclusive(struct vnode *vp)
526 {
527
528 /* we can skip vn_lock() */
529 VOP_LOCK(vp, LK_EXCLUSIVE);
530 }
531
532 void
533 rump_vp_lock_shared(struct vnode *vp)
534 {
535
536 VOP_LOCK(vp, LK_SHARED);
537 }
538
539 void
540 rump_vp_unlock(struct vnode *vp)
541 {
542
543 VOP_UNLOCK(vp, 0);
544 }
545
546 int
547 rump_vp_islocked(struct vnode *vp)
548 {
549
550 return VOP_ISLOCKED(vp);
551 }
552
553 void
554 rump_vp_interlock(struct vnode *vp)
555 {
556
557 mutex_enter(&vp->v_interlock);
558 }
559
560 int
561 rump_vfs_unmount(struct mount *mp, int mntflags)
562 {
563
564 return VFS_UNMOUNT(mp, mntflags);
565 }
566
567 int
568 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
569 {
570 int rv;
571
572 rv = VFS_ROOT(mp, vpp);
573 if (rv)
574 return rv;
575
576 if (!lock)
577 VOP_UNLOCK(*vpp, 0);
578
579 return 0;
580 }
581
582 int
583 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
584 {
585
586 return VFS_STATVFS(mp, sbp);
587 }
588
589 int
590 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
591 {
592
593 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
594 }
595
596 int
597 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
598 {
599
600 return VFS_FHTOVP(mp, fid, vpp);
601 }
602
603 int
604 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
605 {
606
607 return VFS_VPTOFH(vp, fid, fidsize);
608 }
609
610 /*ARGSUSED*/
611 void
612 rump_vfs_syncwait(struct mount *mp)
613 {
614 int n;
615
616 n = buf_syncwait();
617 if (n)
618 printf("syncwait: unsynced buffers: %d\n", n);
619 }
620
621 void
622 rump_bioops_sync()
623 {
624
625 if (bioopsp)
626 bioopsp->io_sync(NULL);
627 }
628
629 struct lwp *
630 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
631 {
632 struct lwp *l;
633 struct proc *p;
634
635 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
636 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
637 p->p_cwdi = cwdinit();
638
639 p->p_stats = &rump_stats;
640 p->p_limit = &rump_limits;
641 p->p_pid = pid;
642 p->p_vmspace = &rump_vmspace;
643 l->l_cred = rump_cred;
644 l->l_proc = p;
645 l->l_lid = lid;
646
647 p->p_fd = fd_init(NULL);
648 l->l_fd = p->p_fd;
649
650 if (set)
651 rumpuser_set_curlwp(l);
652
653 return l;
654 }
655
656 void
657 rump_clear_curlwp()
658 {
659 struct lwp *l;
660
661 l = rumpuser_get_curlwp();
662 fd_free();
663 cwdfree(l->l_proc->p_cwdi);
664 kmem_free(l->l_proc, sizeof(*l->l_proc));
665 kmem_free(l, sizeof(*l));
666 rumpuser_set_curlwp(NULL);
667 }
668
669 struct lwp *
670 rump_get_curlwp()
671 {
672 struct lwp *l;
673
674 l = rumpuser_get_curlwp();
675 if (l == NULL)
676 l = &lwp0;
677
678 return l;
679 }
680
681 int
682 rump_splfoo()
683 {
684
685 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
686 rumpuser_rw_enter(&rumpspl, 0);
687 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
688 }
689
690 return 0;
691 }
692
693 static void
694 rump_intr_enter(void)
695 {
696
697 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
698 rumpuser_rw_enter(&rumpspl, 1);
699 }
700
701 static void
702 rump_intr_exit(void)
703 {
704
705 rumpuser_rw_exit(&rumpspl);
706 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
707 }
708
709 void
710 rump_splx(int dummy)
711 {
712
713 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
714 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
715 rumpuser_rw_exit(&rumpspl);
716 }
717 }
718
719 void
720 rump_biodone(void *arg, size_t count, int error)
721 {
722 struct buf *bp = arg;
723
724 bp->b_resid = bp->b_bcount - count;
725 KASSERT(bp->b_resid >= 0);
726 bp->b_error = error;
727
728 rump_intr_enter();
729 biodone(bp);
730 rump_intr_exit();
731 }
732