rump.c revision 1.43 1 /* $NetBSD: rump.c,v 1.43 2008/04/28 19:31:45 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/filedesc.h>
33 #include <sys/kauth.h>
34 #include <sys/kmem.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/queue.h>
38 #include <sys/resourcevar.h>
39 #include <sys/select.h>
40 #include <sys/vnode.h>
41 #include <sys/vfs_syscalls.h>
42
43 #include <miscfs/specfs/specdev.h>
44
45 #include "rump_private.h"
46 #include "rumpuser.h"
47
48 struct proc proc0;
49 struct cwdinfo rump_cwdi;
50 struct pstats rump_stats;
51 struct plimit rump_limits;
52 kauth_cred_t rump_cred = RUMPCRED_SUSER;
53 struct cpu_info rump_cpu;
54 struct filedesc rump_filedesc0;
55 struct proclist allproc;
56
57 kmutex_t rump_giantlock;
58
59 sigset_t sigcantmask;
60
61 struct fakeblk {
62 char path[MAXPATHLEN];
63 LIST_ENTRY(fakeblk) entries;
64 };
65
66 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
67
68 #ifndef RUMP_WITHOUT_THREADS
69 static void
70 rump_aiodone_worker(struct work *wk, void *dummy)
71 {
72 struct buf *bp = (struct buf *)wk;
73
74 KASSERT(&bp->b_work == wk);
75 bp->b_iodone(bp);
76 }
77 #endif /* RUMP_WITHOUT_THREADS */
78
79 int rump_inited;
80
81 void
82 rump_init()
83 {
84 extern char hostname[];
85 extern size_t hostnamelen;
86 extern kmutex_t rump_atomic_lock;
87 char buf[256];
88 struct proc *p;
89 struct lwp *l;
90 int error;
91
92 /* XXX */
93 if (rump_inited)
94 return;
95 rump_inited = 1;
96
97 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
98 desiredvnodes = strtoul(buf, NULL, 10);
99 } else {
100 desiredvnodes = 1<<16;
101 }
102
103 rump_cpu.ci_data.cpu_cachelock = mutex_obj_alloc(MUTEX_DEFAULT,
104 IPL_NONE);
105
106 rw_init(&rump_cwdi.cwdi_lock);
107 l = &lwp0;
108 p = &proc0;
109 p->p_stats = &rump_stats;
110 p->p_cwdi = &rump_cwdi;
111 p->p_limit = &rump_limits;
112 p->p_pid = 0;
113 p->p_fd = &rump_filedesc0;
114 p->p_vmspace = &rump_vmspace;
115 l->l_cred = rump_cred;
116 l->l_proc = p;
117 l->l_lid = 1;
118
119 LIST_INSERT_HEAD(&allproc, p, p_list);
120
121 mutex_init(&rump_atomic_lock, MUTEX_DEFAULT, IPL_NONE);
122 rumpvm_init();
123
124 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
125 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
126
127 syncdelay = 0;
128 dovfsusermount = 1;
129
130 rump_sleepers_init();
131 rumpuser_thrinit();
132
133 fd_sys_init();
134 vfsinit();
135 bufinit();
136
137 rumpvfs_init();
138
139 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
140
141 #ifndef RUMP_WITHOUT_THREADS
142 /* aieeeedondest */
143 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
144 rump_aiodone_worker, NULL, 0, 0, 0))
145 panic("aiodoned");
146 #endif /* RUMP_WITHOUT_THREADS */
147
148 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
149 hostnamelen = strlen(hostname);
150
151 sigemptyset(&sigcantmask);
152
153 fd_init(&rump_filedesc0);
154 rump_cwdi.cwdi_cdir = rootvnode;
155 }
156
157 struct mount *
158 rump_mnt_init(struct vfsops *vfsops, int mntflags)
159 {
160 struct mount *mp;
161
162 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
163
164 mp->mnt_op = vfsops;
165 mp->mnt_flag = mntflags;
166 TAILQ_INIT(&mp->mnt_vnodelist);
167 rw_init(&mp->mnt_lock);
168 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
169 mp->mnt_refcnt = 1;
170
171 mount_initspecific(mp);
172
173 return mp;
174 }
175
176 int
177 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
178 {
179 int rv;
180
181 rv = VFS_MOUNT(mp, path, data, dlen);
182 if (rv)
183 return rv;
184
185 (void) VFS_STATVFS(mp, &mp->mnt_stat);
186 rv = VFS_START(mp, 0);
187 if (rv)
188 VFS_UNMOUNT(mp, MNT_FORCE);
189
190 return rv;
191 }
192
193 void
194 rump_mnt_destroy(struct mount *mp)
195 {
196
197 mount_finispecific(mp);
198 kmem_free(mp, sizeof(*mp));
199 }
200
201 struct componentname *
202 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
203 kauth_cred_t creds, struct lwp *l)
204 {
205 struct componentname *cnp;
206 const char *cp = NULL;
207
208 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
209
210 cnp->cn_nameiop = nameiop;
211 cnp->cn_flags = flags;
212
213 cnp->cn_pnbuf = PNBUF_GET();
214 strcpy(cnp->cn_pnbuf, name);
215 cnp->cn_nameptr = cnp->cn_pnbuf;
216 cnp->cn_namelen = namelen;
217 cnp->cn_hash = namei_hash(name, &cp);
218
219 cnp->cn_cred = creds;
220
221 return cnp;
222 }
223
224 void
225 rump_freecn(struct componentname *cnp, int flags)
226 {
227
228 if (flags & RUMPCN_FREECRED)
229 rump_cred_destroy(cnp->cn_cred);
230
231 if ((flags & RUMPCN_HASNTBUF) == 0) {
232 if (cnp->cn_flags & SAVENAME) {
233 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
234 PNBUF_PUT(cnp->cn_pnbuf);
235 } else {
236 PNBUF_PUT(cnp->cn_pnbuf);
237 }
238 }
239 kmem_free(cnp, sizeof(*cnp));
240 }
241
242 /* hey baby, what's your namei? */
243 int
244 rump_namei(uint32_t op, uint32_t flags, const char *namep,
245 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
246 {
247 struct nameidata nd;
248 int rv;
249
250 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
251 rv = namei(&nd);
252 if (rv)
253 return rv;
254
255 if (dvpp) {
256 KASSERT(flags & LOCKPARENT);
257 *dvpp = nd.ni_dvp;
258 } else {
259 KASSERT((flags & LOCKPARENT) == 0);
260 }
261
262 if (vpp) {
263 *vpp = nd.ni_vp;
264 } else {
265 if (nd.ni_vp) {
266 if (flags & LOCKLEAF)
267 vput(nd.ni_vp);
268 else
269 vrele(nd.ni_vp);
270 }
271 }
272
273 if (cnpp) {
274 struct componentname *cnp;
275
276 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
277 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
278 *cnpp = cnp;
279 } else if (nd.ni_cnd.cn_flags & HASBUF) {
280 panic("%s: pathbuf mismatch", __func__);
281 }
282
283 return rv;
284 }
285
286 static struct fakeblk *
287 _rump_fakeblk_find(const char *path)
288 {
289 char buf[MAXPATHLEN];
290 struct fakeblk *fblk;
291 int error;
292
293 if (rumpuser_realpath(path, buf, &error) == NULL)
294 return NULL;
295
296 LIST_FOREACH(fblk, &fakeblks, entries)
297 if (strcmp(fblk->path, buf) == 0)
298 return fblk;
299
300 return NULL;
301 }
302
303 int
304 rump_fakeblk_register(const char *path)
305 {
306 char buf[MAXPATHLEN];
307 struct fakeblk *fblk;
308 int error;
309
310 if (_rump_fakeblk_find(path))
311 return EEXIST;
312
313 if (rumpuser_realpath(path, buf, &error) == NULL)
314 return error;
315
316 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
317 if (fblk == NULL)
318 return ENOMEM;
319
320 strlcpy(fblk->path, buf, MAXPATHLEN);
321 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
322
323 return 0;
324 }
325
326 int
327 rump_fakeblk_find(const char *path)
328 {
329
330 return _rump_fakeblk_find(path) != NULL;
331 }
332
333 void
334 rump_fakeblk_deregister(const char *path)
335 {
336 struct fakeblk *fblk;
337
338 fblk = _rump_fakeblk_find(path);
339 if (fblk == NULL)
340 return;
341
342 LIST_REMOVE(fblk, entries);
343 kmem_free(fblk, sizeof(*fblk));
344 }
345
346 void
347 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
348 {
349
350 *vtype = vp->v_type;
351 *vsize = vp->v_size;
352 if (vp->v_specnode)
353 *vdev = vp->v_rdev;
354 else
355 *vdev = 0;
356 }
357
358 struct vfsops *
359 rump_vfslist_iterate(struct vfsops *ops)
360 {
361
362 if (ops == NULL)
363 return LIST_FIRST(&vfs_list);
364 else
365 return LIST_NEXT(ops, vfs_list);
366 }
367
368 struct vfsops *
369 rump_vfs_getopsbyname(const char *name)
370 {
371
372 return vfs_getopsbyname(name);
373 }
374
375 struct vattr*
376 rump_vattr_init()
377 {
378 struct vattr *vap;
379
380 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
381 vattr_null(vap);
382
383 return vap;
384 }
385
386 void
387 rump_vattr_settype(struct vattr *vap, enum vtype vt)
388 {
389
390 vap->va_type = vt;
391 }
392
393 void
394 rump_vattr_setmode(struct vattr *vap, mode_t mode)
395 {
396
397 vap->va_mode = mode;
398 }
399
400 void
401 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
402 {
403
404 vap->va_rdev = dev;
405 }
406
407 void
408 rump_vattr_free(struct vattr *vap)
409 {
410
411 kmem_free(vap, sizeof(*vap));
412 }
413
414 void
415 rump_vp_incref(struct vnode *vp)
416 {
417
418 mutex_enter(&vp->v_interlock);
419 ++vp->v_usecount;
420 mutex_exit(&vp->v_interlock);
421 }
422
423 int
424 rump_vp_getref(struct vnode *vp)
425 {
426
427 return vp->v_usecount;
428 }
429
430 void
431 rump_vp_decref(struct vnode *vp)
432 {
433
434 mutex_enter(&vp->v_interlock);
435 --vp->v_usecount;
436 mutex_exit(&vp->v_interlock);
437 }
438
439 /*
440 * Really really recycle with a cherry on top. We should be
441 * extra-sure we can do this. For example with p2k there is
442 * no problem, since puffs in the kernel takes care of refcounting
443 * for us.
444 */
445 void
446 rump_vp_recycle_nokidding(struct vnode *vp)
447 {
448
449 mutex_enter(&vp->v_interlock);
450 vp->v_usecount = 1;
451 vclean(vp, DOCLOSE);
452 vrelel(vp, 0);
453 }
454
455 void
456 rump_vp_rele(struct vnode *vp)
457 {
458
459 vrele(vp);
460 }
461
462 struct uio *
463 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
464 {
465 struct uio *uio;
466 enum uio_rw uiorw;
467
468 switch (rw) {
469 case RUMPUIO_READ:
470 uiorw = UIO_READ;
471 break;
472 case RUMPUIO_WRITE:
473 uiorw = UIO_WRITE;
474 break;
475 default:
476 panic("%s: invalid rw %d", __func__, rw);
477 }
478
479 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
480 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
481
482 uio->uio_iov->iov_base = buf;
483 uio->uio_iov->iov_len = bufsize;
484
485 uio->uio_iovcnt = 1;
486 uio->uio_offset = offset;
487 uio->uio_resid = bufsize;
488 uio->uio_rw = uiorw;
489 uio->uio_vmspace = UIO_VMSPACE_SYS;
490
491 return uio;
492 }
493
494 size_t
495 rump_uio_getresid(struct uio *uio)
496 {
497
498 return uio->uio_resid;
499 }
500
501 off_t
502 rump_uio_getoff(struct uio *uio)
503 {
504
505 return uio->uio_offset;
506 }
507
508 size_t
509 rump_uio_free(struct uio *uio)
510 {
511 size_t resid;
512
513 resid = uio->uio_resid;
514 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
515 kmem_free(uio, sizeof(*uio));
516
517 return resid;
518 }
519
520 void
521 rump_vp_lock_exclusive(struct vnode *vp)
522 {
523
524 /* we can skip vn_lock() */
525 VOP_LOCK(vp, LK_EXCLUSIVE);
526 }
527
528 void
529 rump_vp_lock_shared(struct vnode *vp)
530 {
531
532 VOP_LOCK(vp, LK_SHARED);
533 }
534
535 void
536 rump_vp_unlock(struct vnode *vp)
537 {
538
539 VOP_UNLOCK(vp, 0);
540 }
541
542 int
543 rump_vp_islocked(struct vnode *vp)
544 {
545
546 return VOP_ISLOCKED(vp);
547 }
548
549 void
550 rump_vp_interlock(struct vnode *vp)
551 {
552
553 mutex_enter(&vp->v_interlock);
554 }
555
556 int
557 rump_vfs_unmount(struct mount *mp, int mntflags)
558 {
559
560 return VFS_UNMOUNT(mp, mntflags);
561 }
562
563 int
564 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
565 {
566 int rv;
567
568 rv = VFS_ROOT(mp, vpp);
569 if (rv)
570 return rv;
571
572 if (!lock)
573 VOP_UNLOCK(*vpp, 0);
574
575 return 0;
576 }
577
578 int
579 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
580 {
581
582 return VFS_STATVFS(mp, sbp);
583 }
584
585 int
586 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
587 {
588
589 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
590 }
591
592 int
593 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
594 {
595
596 return VFS_FHTOVP(mp, fid, vpp);
597 }
598
599 int
600 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
601 {
602
603 return VFS_VPTOFH(vp, fid, fidsize);
604 }
605
606 /*ARGSUSED*/
607 void
608 rump_vfs_syncwait(struct mount *mp)
609 {
610 int n;
611
612 n = buf_syncwait();
613 if (n)
614 printf("syncwait: unsynced buffers: %d\n", n);
615 }
616
617 void
618 rump_bioops_sync()
619 {
620
621 if (bioopsp)
622 bioopsp->io_sync(NULL);
623 }
624
625 struct lwp *
626 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
627 {
628 struct lwp *l;
629 struct proc *p;
630
631 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
632 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
633 p->p_cwdi = cwdinit();
634
635 p->p_stats = &rump_stats;
636 p->p_limit = &rump_limits;
637 p->p_pid = pid;
638 p->p_vmspace = &rump_vmspace;
639 l->l_cred = rump_cred;
640 l->l_proc = p;
641 l->l_lid = lid;
642
643 p->p_fd = fd_init(NULL);
644 l->l_fd = p->p_fd;
645
646 if (set)
647 rumpuser_set_curlwp(l);
648
649 return l;
650 }
651
652 void
653 rump_clear_curlwp()
654 {
655 struct lwp *l;
656
657 l = rumpuser_get_curlwp();
658 fd_free();
659 cwdfree(l->l_proc->p_cwdi);
660 kmem_free(l->l_proc, sizeof(*l->l_proc));
661 kmem_free(l, sizeof(*l));
662 rumpuser_set_curlwp(NULL);
663 }
664
665 struct lwp *
666 rump_get_curlwp()
667 {
668 struct lwp *l;
669
670 l = rumpuser_get_curlwp();
671 if (l == NULL)
672 l = &lwp0;
673
674 return l;
675 }
676
677 int
678 rump_splfoo()
679 {
680
681 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
682 rumpuser_rw_enter(&rumpspl, 0);
683 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
684 }
685
686 return 0;
687 }
688
689 static void
690 rump_intr_enter(void)
691 {
692
693 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
694 rumpuser_rw_enter(&rumpspl, 1);
695 }
696
697 static void
698 rump_intr_exit(void)
699 {
700
701 rumpuser_rw_exit(&rumpspl);
702 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
703 }
704
705 void
706 rump_splx(int dummy)
707 {
708
709 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
710 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
711 rumpuser_rw_exit(&rumpspl);
712 }
713 }
714
715 void
716 rump_biodone(void *arg, size_t count, int error)
717 {
718 struct buf *bp = arg;
719
720 bp->b_resid = bp->b_bcount - count;
721 KASSERT(bp->b_resid >= 0);
722 bp->b_error = error;
723
724 rump_intr_enter();
725 biodone(bp);
726 rump_intr_exit();
727 }
728