rump.c revision 1.44 1 /* $NetBSD: rump.c,v 1.44 2008/05/04 12:52:58 ad Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/filedesc.h>
33 #include <sys/kauth.h>
34 #include <sys/kmem.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/queue.h>
38 #include <sys/resourcevar.h>
39 #include <sys/select.h>
40 #include <sys/vnode.h>
41 #include <sys/vfs_syscalls.h>
42 #include <sys/module.h>
43
44 #include <miscfs/specfs/specdev.h>
45
46 #include "rump_private.h"
47 #include "rumpuser.h"
48
49 struct proc proc0;
50 struct cwdinfo rump_cwdi;
51 struct pstats rump_stats;
52 struct plimit rump_limits;
53 kauth_cred_t rump_cred = RUMPCRED_SUSER;
54 struct cpu_info rump_cpu;
55 struct filedesc rump_filedesc0;
56 struct proclist allproc;
57
58 kmutex_t rump_giantlock;
59
60 sigset_t sigcantmask;
61
62 struct fakeblk {
63 char path[MAXPATHLEN];
64 LIST_ENTRY(fakeblk) entries;
65 };
66
67 static LIST_HEAD(, fakeblk) fakeblks = LIST_HEAD_INITIALIZER(fakeblks);
68
69 #ifndef RUMP_WITHOUT_THREADS
70 static void
71 rump_aiodone_worker(struct work *wk, void *dummy)
72 {
73 struct buf *bp = (struct buf *)wk;
74
75 KASSERT(&bp->b_work == wk);
76 bp->b_iodone(bp);
77 }
78 #endif /* RUMP_WITHOUT_THREADS */
79
80 int rump_inited;
81
82 void
83 rump_init()
84 {
85 extern char hostname[];
86 extern size_t hostnamelen;
87 extern kmutex_t rump_atomic_lock;
88 char buf[256];
89 struct proc *p;
90 struct lwp *l;
91 int error;
92
93 /* XXX */
94 if (rump_inited)
95 return;
96 rump_inited = 1;
97
98 if (rumpuser_getenv("RUMP_NVNODES", buf, sizeof(buf), &error) == 0) {
99 desiredvnodes = strtoul(buf, NULL, 10);
100 } else {
101 desiredvnodes = 1<<16;
102 }
103
104 rump_cpu.ci_data.cpu_cachelock = mutex_obj_alloc(MUTEX_DEFAULT,
105 IPL_NONE);
106
107 rw_init(&rump_cwdi.cwdi_lock);
108 l = &lwp0;
109 p = &proc0;
110 p->p_stats = &rump_stats;
111 p->p_cwdi = &rump_cwdi;
112 p->p_limit = &rump_limits;
113 p->p_pid = 0;
114 p->p_fd = &rump_filedesc0;
115 p->p_vmspace = &rump_vmspace;
116 l->l_cred = rump_cred;
117 l->l_proc = p;
118 l->l_lid = 1;
119
120 LIST_INSERT_HEAD(&allproc, p, p_list);
121
122 mutex_init(&rump_atomic_lock, MUTEX_DEFAULT, IPL_NONE);
123 rumpvm_init();
124
125 rump_limits.pl_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
126 rump_limits.pl_rlimit[RLIMIT_NOFILE].rlim_cur = RLIM_INFINITY;
127
128 syncdelay = 0;
129 dovfsusermount = 1;
130
131 rump_sleepers_init();
132 rumpuser_thrinit();
133
134 fd_sys_init();
135 module_init();
136 vfsinit();
137 bufinit();
138
139 rumpvfs_init();
140
141 rumpuser_mutex_recursive_init(&rump_giantlock.kmtx_mtx);
142
143 #ifndef RUMP_WITHOUT_THREADS
144 /* aieeeedondest */
145 if (workqueue_create(&uvm.aiodone_queue, "aiodoned",
146 rump_aiodone_worker, NULL, 0, 0, 0))
147 panic("aiodoned");
148 #endif /* RUMP_WITHOUT_THREADS */
149
150 rumpuser_gethostname(hostname, MAXHOSTNAMELEN, &error);
151 hostnamelen = strlen(hostname);
152
153 sigemptyset(&sigcantmask);
154
155 fd_init(&rump_filedesc0);
156 rump_cwdi.cwdi_cdir = rootvnode;
157 }
158
159 struct mount *
160 rump_mnt_init(struct vfsops *vfsops, int mntflags)
161 {
162 struct mount *mp;
163
164 mp = kmem_zalloc(sizeof(struct mount), KM_SLEEP);
165
166 mp->mnt_op = vfsops;
167 mp->mnt_flag = mntflags;
168 TAILQ_INIT(&mp->mnt_vnodelist);
169 rw_init(&mp->mnt_lock);
170 mutex_init(&mp->mnt_renamelock, MUTEX_DEFAULT, IPL_NONE);
171 mp->mnt_refcnt = 1;
172
173 mount_initspecific(mp);
174
175 return mp;
176 }
177
178 int
179 rump_mnt_mount(struct mount *mp, const char *path, void *data, size_t *dlen)
180 {
181 int rv;
182
183 rv = VFS_MOUNT(mp, path, data, dlen);
184 if (rv)
185 return rv;
186
187 (void) VFS_STATVFS(mp, &mp->mnt_stat);
188 rv = VFS_START(mp, 0);
189 if (rv)
190 VFS_UNMOUNT(mp, MNT_FORCE);
191
192 return rv;
193 }
194
195 void
196 rump_mnt_destroy(struct mount *mp)
197 {
198
199 mount_finispecific(mp);
200 kmem_free(mp, sizeof(*mp));
201 }
202
203 struct componentname *
204 rump_makecn(u_long nameiop, u_long flags, const char *name, size_t namelen,
205 kauth_cred_t creds, struct lwp *l)
206 {
207 struct componentname *cnp;
208 const char *cp = NULL;
209
210 cnp = kmem_zalloc(sizeof(struct componentname), KM_SLEEP);
211
212 cnp->cn_nameiop = nameiop;
213 cnp->cn_flags = flags;
214
215 cnp->cn_pnbuf = PNBUF_GET();
216 strcpy(cnp->cn_pnbuf, name);
217 cnp->cn_nameptr = cnp->cn_pnbuf;
218 cnp->cn_namelen = namelen;
219 cnp->cn_hash = namei_hash(name, &cp);
220
221 cnp->cn_cred = creds;
222
223 return cnp;
224 }
225
226 void
227 rump_freecn(struct componentname *cnp, int flags)
228 {
229
230 if (flags & RUMPCN_FREECRED)
231 rump_cred_destroy(cnp->cn_cred);
232
233 if ((flags & RUMPCN_HASNTBUF) == 0) {
234 if (cnp->cn_flags & SAVENAME) {
235 if (flags & RUMPCN_ISLOOKUP ||cnp->cn_flags & SAVESTART)
236 PNBUF_PUT(cnp->cn_pnbuf);
237 } else {
238 PNBUF_PUT(cnp->cn_pnbuf);
239 }
240 }
241 kmem_free(cnp, sizeof(*cnp));
242 }
243
244 /* hey baby, what's your namei? */
245 int
246 rump_namei(uint32_t op, uint32_t flags, const char *namep,
247 struct vnode **dvpp, struct vnode **vpp, struct componentname **cnpp)
248 {
249 struct nameidata nd;
250 int rv;
251
252 NDINIT(&nd, op, flags, UIO_SYSSPACE, namep);
253 rv = namei(&nd);
254 if (rv)
255 return rv;
256
257 if (dvpp) {
258 KASSERT(flags & LOCKPARENT);
259 *dvpp = nd.ni_dvp;
260 } else {
261 KASSERT((flags & LOCKPARENT) == 0);
262 }
263
264 if (vpp) {
265 *vpp = nd.ni_vp;
266 } else {
267 if (nd.ni_vp) {
268 if (flags & LOCKLEAF)
269 vput(nd.ni_vp);
270 else
271 vrele(nd.ni_vp);
272 }
273 }
274
275 if (cnpp) {
276 struct componentname *cnp;
277
278 cnp = kmem_alloc(sizeof(*cnp), KM_SLEEP);
279 memcpy(cnp, &nd.ni_cnd, sizeof(*cnp));
280 *cnpp = cnp;
281 } else if (nd.ni_cnd.cn_flags & HASBUF) {
282 panic("%s: pathbuf mismatch", __func__);
283 }
284
285 return rv;
286 }
287
288 static struct fakeblk *
289 _rump_fakeblk_find(const char *path)
290 {
291 char buf[MAXPATHLEN];
292 struct fakeblk *fblk;
293 int error;
294
295 if (rumpuser_realpath(path, buf, &error) == NULL)
296 return NULL;
297
298 LIST_FOREACH(fblk, &fakeblks, entries)
299 if (strcmp(fblk->path, buf) == 0)
300 return fblk;
301
302 return NULL;
303 }
304
305 int
306 rump_fakeblk_register(const char *path)
307 {
308 char buf[MAXPATHLEN];
309 struct fakeblk *fblk;
310 int error;
311
312 if (_rump_fakeblk_find(path))
313 return EEXIST;
314
315 if (rumpuser_realpath(path, buf, &error) == NULL)
316 return error;
317
318 fblk = kmem_alloc(sizeof(struct fakeblk), KM_NOSLEEP);
319 if (fblk == NULL)
320 return ENOMEM;
321
322 strlcpy(fblk->path, buf, MAXPATHLEN);
323 LIST_INSERT_HEAD(&fakeblks, fblk, entries);
324
325 return 0;
326 }
327
328 int
329 rump_fakeblk_find(const char *path)
330 {
331
332 return _rump_fakeblk_find(path) != NULL;
333 }
334
335 void
336 rump_fakeblk_deregister(const char *path)
337 {
338 struct fakeblk *fblk;
339
340 fblk = _rump_fakeblk_find(path);
341 if (fblk == NULL)
342 return;
343
344 LIST_REMOVE(fblk, entries);
345 kmem_free(fblk, sizeof(*fblk));
346 }
347
348 void
349 rump_getvninfo(struct vnode *vp, enum vtype *vtype, voff_t *vsize, dev_t *vdev)
350 {
351
352 *vtype = vp->v_type;
353 *vsize = vp->v_size;
354 if (vp->v_specnode)
355 *vdev = vp->v_rdev;
356 else
357 *vdev = 0;
358 }
359
360 struct vfsops *
361 rump_vfslist_iterate(struct vfsops *ops)
362 {
363
364 if (ops == NULL)
365 return LIST_FIRST(&vfs_list);
366 else
367 return LIST_NEXT(ops, vfs_list);
368 }
369
370 struct vfsops *
371 rump_vfs_getopsbyname(const char *name)
372 {
373
374 return vfs_getopsbyname(name);
375 }
376
377 struct vattr*
378 rump_vattr_init()
379 {
380 struct vattr *vap;
381
382 vap = kmem_alloc(sizeof(struct vattr), KM_SLEEP);
383 vattr_null(vap);
384
385 return vap;
386 }
387
388 void
389 rump_vattr_settype(struct vattr *vap, enum vtype vt)
390 {
391
392 vap->va_type = vt;
393 }
394
395 void
396 rump_vattr_setmode(struct vattr *vap, mode_t mode)
397 {
398
399 vap->va_mode = mode;
400 }
401
402 void
403 rump_vattr_setrdev(struct vattr *vap, dev_t dev)
404 {
405
406 vap->va_rdev = dev;
407 }
408
409 void
410 rump_vattr_free(struct vattr *vap)
411 {
412
413 kmem_free(vap, sizeof(*vap));
414 }
415
416 void
417 rump_vp_incref(struct vnode *vp)
418 {
419
420 mutex_enter(&vp->v_interlock);
421 ++vp->v_usecount;
422 mutex_exit(&vp->v_interlock);
423 }
424
425 int
426 rump_vp_getref(struct vnode *vp)
427 {
428
429 return vp->v_usecount;
430 }
431
432 void
433 rump_vp_decref(struct vnode *vp)
434 {
435
436 mutex_enter(&vp->v_interlock);
437 --vp->v_usecount;
438 mutex_exit(&vp->v_interlock);
439 }
440
441 /*
442 * Really really recycle with a cherry on top. We should be
443 * extra-sure we can do this. For example with p2k there is
444 * no problem, since puffs in the kernel takes care of refcounting
445 * for us.
446 */
447 void
448 rump_vp_recycle_nokidding(struct vnode *vp)
449 {
450
451 mutex_enter(&vp->v_interlock);
452 vp->v_usecount = 1;
453 vclean(vp, DOCLOSE);
454 vrelel(vp, 0);
455 }
456
457 void
458 rump_vp_rele(struct vnode *vp)
459 {
460
461 vrele(vp);
462 }
463
464 struct uio *
465 rump_uio_setup(void *buf, size_t bufsize, off_t offset, enum rump_uiorw rw)
466 {
467 struct uio *uio;
468 enum uio_rw uiorw;
469
470 switch (rw) {
471 case RUMPUIO_READ:
472 uiorw = UIO_READ;
473 break;
474 case RUMPUIO_WRITE:
475 uiorw = UIO_WRITE;
476 break;
477 default:
478 panic("%s: invalid rw %d", __func__, rw);
479 }
480
481 uio = kmem_alloc(sizeof(struct uio), KM_SLEEP);
482 uio->uio_iov = kmem_alloc(sizeof(struct iovec), KM_SLEEP);
483
484 uio->uio_iov->iov_base = buf;
485 uio->uio_iov->iov_len = bufsize;
486
487 uio->uio_iovcnt = 1;
488 uio->uio_offset = offset;
489 uio->uio_resid = bufsize;
490 uio->uio_rw = uiorw;
491 uio->uio_vmspace = UIO_VMSPACE_SYS;
492
493 return uio;
494 }
495
496 size_t
497 rump_uio_getresid(struct uio *uio)
498 {
499
500 return uio->uio_resid;
501 }
502
503 off_t
504 rump_uio_getoff(struct uio *uio)
505 {
506
507 return uio->uio_offset;
508 }
509
510 size_t
511 rump_uio_free(struct uio *uio)
512 {
513 size_t resid;
514
515 resid = uio->uio_resid;
516 kmem_free(uio->uio_iov, sizeof(*uio->uio_iov));
517 kmem_free(uio, sizeof(*uio));
518
519 return resid;
520 }
521
522 void
523 rump_vp_lock_exclusive(struct vnode *vp)
524 {
525
526 /* we can skip vn_lock() */
527 VOP_LOCK(vp, LK_EXCLUSIVE);
528 }
529
530 void
531 rump_vp_lock_shared(struct vnode *vp)
532 {
533
534 VOP_LOCK(vp, LK_SHARED);
535 }
536
537 void
538 rump_vp_unlock(struct vnode *vp)
539 {
540
541 VOP_UNLOCK(vp, 0);
542 }
543
544 int
545 rump_vp_islocked(struct vnode *vp)
546 {
547
548 return VOP_ISLOCKED(vp);
549 }
550
551 void
552 rump_vp_interlock(struct vnode *vp)
553 {
554
555 mutex_enter(&vp->v_interlock);
556 }
557
558 int
559 rump_vfs_unmount(struct mount *mp, int mntflags)
560 {
561
562 return VFS_UNMOUNT(mp, mntflags);
563 }
564
565 int
566 rump_vfs_root(struct mount *mp, struct vnode **vpp, int lock)
567 {
568 int rv;
569
570 rv = VFS_ROOT(mp, vpp);
571 if (rv)
572 return rv;
573
574 if (!lock)
575 VOP_UNLOCK(*vpp, 0);
576
577 return 0;
578 }
579
580 int
581 rump_vfs_statvfs(struct mount *mp, struct statvfs *sbp)
582 {
583
584 return VFS_STATVFS(mp, sbp);
585 }
586
587 int
588 rump_vfs_sync(struct mount *mp, int wait, kauth_cred_t cred)
589 {
590
591 return VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, cred);
592 }
593
594 int
595 rump_vfs_fhtovp(struct mount *mp, struct fid *fid, struct vnode **vpp)
596 {
597
598 return VFS_FHTOVP(mp, fid, vpp);
599 }
600
601 int
602 rump_vfs_vptofh(struct vnode *vp, struct fid *fid, size_t *fidsize)
603 {
604
605 return VFS_VPTOFH(vp, fid, fidsize);
606 }
607
608 /*ARGSUSED*/
609 void
610 rump_vfs_syncwait(struct mount *mp)
611 {
612 int n;
613
614 n = buf_syncwait();
615 if (n)
616 printf("syncwait: unsynced buffers: %d\n", n);
617 }
618
619 void
620 rump_bioops_sync()
621 {
622
623 if (bioopsp)
624 bioopsp->io_sync(NULL);
625 }
626
627 struct lwp *
628 rump_setup_curlwp(pid_t pid, lwpid_t lid, int set)
629 {
630 struct lwp *l;
631 struct proc *p;
632
633 l = kmem_zalloc(sizeof(struct lwp), KM_SLEEP);
634 p = kmem_zalloc(sizeof(struct proc), KM_SLEEP);
635 p->p_cwdi = cwdinit();
636
637 p->p_stats = &rump_stats;
638 p->p_limit = &rump_limits;
639 p->p_pid = pid;
640 p->p_vmspace = &rump_vmspace;
641 l->l_cred = rump_cred;
642 l->l_proc = p;
643 l->l_lid = lid;
644
645 p->p_fd = fd_init(NULL);
646 l->l_fd = p->p_fd;
647
648 if (set)
649 rumpuser_set_curlwp(l);
650
651 return l;
652 }
653
654 void
655 rump_clear_curlwp()
656 {
657 struct lwp *l;
658
659 l = rumpuser_get_curlwp();
660 fd_free();
661 cwdfree(l->l_proc->p_cwdi);
662 kmem_free(l->l_proc, sizeof(*l->l_proc));
663 kmem_free(l, sizeof(*l));
664 rumpuser_set_curlwp(NULL);
665 }
666
667 struct lwp *
668 rump_get_curlwp()
669 {
670 struct lwp *l;
671
672 l = rumpuser_get_curlwp();
673 if (l == NULL)
674 l = &lwp0;
675
676 return l;
677 }
678
679 int
680 rump_splfoo()
681 {
682
683 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
684 rumpuser_rw_enter(&rumpspl, 0);
685 rumpuser_set_ipl(RUMPUSER_IPL_SPLFOO);
686 }
687
688 return 0;
689 }
690
691 static void
692 rump_intr_enter(void)
693 {
694
695 rumpuser_set_ipl(RUMPUSER_IPL_INTR);
696 rumpuser_rw_enter(&rumpspl, 1);
697 }
698
699 static void
700 rump_intr_exit(void)
701 {
702
703 rumpuser_rw_exit(&rumpspl);
704 rumpuser_clear_ipl(RUMPUSER_IPL_INTR);
705 }
706
707 void
708 rump_splx(int dummy)
709 {
710
711 if (rumpuser_whatis_ipl() != RUMPUSER_IPL_INTR) {
712 rumpuser_clear_ipl(RUMPUSER_IPL_SPLFOO);
713 rumpuser_rw_exit(&rumpspl);
714 }
715 }
716
717 void
718 rump_biodone(void *arg, size_t count, int error)
719 {
720 struct buf *bp = arg;
721
722 bp->b_resid = bp->b_bcount - count;
723 KASSERT(bp->b_resid >= 0);
724 bp->b_error = error;
725
726 rump_intr_enter();
727 biodone(bp);
728 rump_intr_exit();
729 }
730