rumpfs.c revision 1.25 1 /* $NetBSD: rumpfs.c,v 1.25 2009/10/07 09:17:54 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: rumpfs.c,v 1.25 2009/10/07 09:17:54 pooka Exp $");
32
33 #include <sys/param.h>
34 #include <sys/atomic.h>
35 #include <sys/filedesc.h>
36 #include <sys/errno.h>
37 #include <sys/fcntl.h>
38 #include <sys/kauth.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/namei.h>
42 #include <sys/lock.h>
43 #include <sys/lockf.h>
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 #include <sys/syscallargs.h>
47 #include <sys/vnode.h>
48
49 #include <miscfs/fifofs/fifo.h>
50 #include <miscfs/specfs/specdev.h>
51 #include <miscfs/genfs/genfs.h>
52
53 #include <rump/rumpuser.h>
54
55 #include "rump_private.h"
56 #include "rump_vfs_private.h"
57
58 static int rump_vop_lookup(void *);
59 static int rump_vop_getattr(void *);
60 static int rump_vop_mkdir(void *);
61 static int rump_vop_mknod(void *);
62 static int rump_vop_inactive(void *);
63 static int rump_vop_reclaim(void *);
64 static int rump_vop_success(void *);
65 static int rump_vop_spec(void *);
66 static int rump_vop_read(void *);
67 static int rump_vop_write(void *);
68 static int rump_vop_open(void *);
69
70 int (**fifo_vnodeop_p)(void *);
71 const struct vnodeopv_entry_desc fifo_vnodeop_entries[] = {
72 { &vop_default_desc, vn_default_error },
73 { NULL, NULL }
74 };
75 const struct vnodeopv_desc fifo_vnodeop_opv_desc =
76 { &fifo_vnodeop_p, fifo_vnodeop_entries };
77
78 int (**rump_vnodeop_p)(void *);
79 const struct vnodeopv_entry_desc rump_vnodeop_entries[] = {
80 { &vop_default_desc, vn_default_error },
81 { &vop_lookup_desc, rump_vop_lookup },
82 { &vop_getattr_desc, rump_vop_getattr },
83 { &vop_mkdir_desc, rump_vop_mkdir },
84 { &vop_mknod_desc, rump_vop_mknod },
85 { &vop_access_desc, rump_vop_success },
86 { &vop_read_desc, rump_vop_read },
87 { &vop_write_desc, rump_vop_write },
88 { &vop_open_desc, rump_vop_open },
89 { &vop_putpages_desc, genfs_null_putpages },
90 { &vop_fsync_desc, rump_vop_success },
91 { &vop_lock_desc, genfs_lock },
92 { &vop_unlock_desc, genfs_unlock },
93 { &vop_inactive_desc, rump_vop_inactive },
94 { &vop_reclaim_desc, rump_vop_reclaim },
95 { NULL, NULL }
96 };
97 const struct vnodeopv_desc rump_vnodeop_opv_desc =
98 { &rump_vnodeop_p, rump_vnodeop_entries };
99
100 int (**rump_specop_p)(void *);
101 const struct vnodeopv_entry_desc rump_specop_entries[] = {
102 { &vop_default_desc, rump_vop_spec },
103 { NULL, NULL }
104 };
105 const struct vnodeopv_desc rump_specop_opv_desc =
106 { &rump_specop_p, rump_specop_entries };
107
108 const struct vnodeopv_desc * const rump_opv_descs[] = {
109 &rump_vnodeop_opv_desc,
110 &rump_specop_opv_desc,
111 NULL
112 };
113
114 struct rumpfs_dent {
115 char *rd_name;
116 struct rumpfs_node *rd_node;
117
118 LIST_ENTRY(rumpfs_dent) rd_entries;
119 };
120
121 struct rumpfs_node {
122 struct vattr rn_va;
123 struct vnode *rn_vp;
124
125 union {
126 struct {
127 char *hostpath; /* VREG */
128 int readfd;
129 int writefd;
130 } reg;
131 LIST_HEAD(, rumpfs_dent) dir; /* VDIR */
132 } rn_u;
133 };
134 #define rn_hostpath rn_u.reg.hostpath
135 #define rn_readfd rn_u.reg.readfd
136 #define rn_writefd rn_u.reg.writefd
137 #define rn_dir rn_u.dir
138
139 static struct rumpfs_node *makeprivate(enum vtype, dev_t, off_t, const char *);
140
141 /*
142 * Extra Terrestrial stuff. We map a given key (pathname) to a file on
143 * the host FS. ET phones home only from the root node of rumpfs.
144 *
145 * When an etfs node is removed, a vnode potentially behind it is not
146 * immediately recycled.
147 */
148
149 struct etfs {
150 char et_key[MAXPATHLEN];
151 size_t et_keylen;
152
153 LIST_ENTRY(etfs) et_entries;
154
155 struct rumpfs_node *et_rn;
156 };
157 static kmutex_t etfs_lock;
158 static LIST_HEAD(, etfs) etfs_list = LIST_HEAD_INITIALIZER(etfs_list);
159
160 static enum vtype
161 ettype_to_vtype(enum rump_etfs_type et)
162 {
163 enum vtype vt;
164
165 switch (et) {
166 case RUMP_ETFS_REG:
167 vt = VREG;
168 break;
169 case RUMP_ETFS_BLK:
170 vt = VBLK;
171 break;
172 case RUMP_ETFS_CHR:
173 vt = VCHR;
174 break;
175 default:
176 panic("invalid et type: %d", et);
177 }
178
179 return vt;
180 }
181
182 static bool
183 etfs_find(const char *key, struct rumpfs_node **rnp)
184 {
185 struct etfs *et;
186 size_t keylen = strlen(key);
187 bool rv = false;
188
189 KASSERT(mutex_owned(&etfs_lock));
190
191 LIST_FOREACH(et, &etfs_list, et_entries) {
192 if (keylen == et->et_keylen && strcmp(key, et->et_key) == 0) {
193 *rnp = et->et_rn;
194 rv = true;
195 break;
196 }
197 }
198
199 return rv;
200 }
201
202 static int
203 doregister(const char *key, const char *hostpath,
204 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
205 {
206 struct etfs *et;
207 struct rumpfs_node *rn_dummy;
208 uint64_t fsize;
209 dev_t rdev = NODEV;
210 devminor_t dmin;
211 int hft, error;
212
213 if (rumpuser_getfileinfo(hostpath, &fsize, &hft, &error))
214 return error;
215
216 /* check that we give sensible arguments */
217 if (begin > fsize)
218 return EINVAL;
219 if (size == RUMP_ETFS_SIZE_ENDOFF)
220 size = fsize - begin;
221 if (begin + size > fsize)
222 return EINVAL;
223
224 if (ftype == RUMP_ETFS_BLK || ftype == RUMP_ETFS_CHR) {
225 error = rumpblk_register(hostpath, &dmin, begin, size);
226 if (error != 0) {
227 return error;
228 }
229 rdev = makedev(RUMPBLK, dmin);
230 }
231
232 et = kmem_alloc(sizeof(*et), KM_SLEEP);
233 strcpy(et->et_key, key);
234 et->et_keylen = strlen(et->et_key);
235 et->et_rn = makeprivate(ettype_to_vtype(ftype), rdev, size, hostpath);
236
237 mutex_enter(&etfs_lock);
238 if (etfs_find(key, &rn_dummy)) {
239 mutex_exit(&etfs_lock);
240 kmem_free(et, sizeof(*et));
241 /* XXX: rumpblk_deregister(hostpath); */
242 return EEXIST;
243 }
244 LIST_INSERT_HEAD(&etfs_list, et, et_entries);
245 mutex_exit(&etfs_lock);
246
247 return 0;
248 }
249
250 int
251 rump_etfs_register(const char *key, const char *hostpath,
252 enum rump_etfs_type ftype)
253 {
254
255 return doregister(key, hostpath, ftype, 0, RUMP_ETFS_SIZE_ENDOFF);
256 }
257
258 int
259 rump_etfs_register_withsize(const char *key, const char *hostpath,
260 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
261 {
262
263 /*
264 * Check that we're mapping at block offsets. I guess this
265 * is not technically necessary except for BLK/CHR backends
266 * (i.e. what getfileinfo() returns, not ftype) and can be
267 * removed later if there are problems.
268 */
269 if ((begin & (DEV_BSIZE-1)) != 0)
270 return EINVAL;
271 if (size != RUMP_ETFS_SIZE_ENDOFF && (size & (DEV_BSIZE-1)) != 0)
272 return EINVAL;
273
274 return doregister(key, hostpath, ftype, begin, size);
275 }
276
277 int
278 rump_etfs_remove(const char *key)
279 {
280 struct etfs *et;
281 size_t keylen = strlen(key);
282
283 mutex_enter(&etfs_lock);
284 LIST_FOREACH(et, &etfs_list, et_entries) {
285 if (keylen == et->et_keylen && strcmp(et->et_key, key) == 0) {
286 LIST_REMOVE(et, et_entries);
287 kmem_free(et, sizeof(*et));
288 break;
289 }
290 }
291 mutex_exit(&etfs_lock);
292
293 if (!et)
294 return ENOENT;
295 return 0;
296 }
297
298 /*
299 * rumpfs
300 */
301
302 static struct mount rump_mnt;
303 static int lastino = 1;
304 static kmutex_t reclock;
305
306 static struct rumpfs_node *
307 makeprivate(enum vtype vt, dev_t rdev, off_t size, const char *hostpath)
308 {
309 struct rumpfs_node *rn;
310 struct vattr *va;
311 struct timespec ts;
312
313 rn = kmem_zalloc(sizeof(*rn), KM_SLEEP);
314
315 switch (vt) {
316 case VDIR:
317 LIST_INIT(&rn->rn_dir);
318 break;
319 case VREG:
320 rn->rn_readfd = -1;
321 rn->rn_writefd = -1;
322 rn->rn_hostpath = malloc(strlen(hostpath)+1, M_TEMP, M_WAITOK);
323 strcpy(rn->rn_hostpath, hostpath);
324 break;
325 default:
326 break;
327 }
328
329 nanotime(&ts);
330
331 va = &rn->rn_va;
332 va->va_type = vt;
333 va->va_mode = 0755;
334 if (vt == VDIR)
335 va->va_nlink = 2;
336 else
337 va->va_nlink = 1;
338 va->va_uid = 0;
339 va->va_gid = 0;
340 va->va_fsid =
341 va->va_fileid = atomic_inc_uint_nv(&lastino);
342 va->va_size = size;
343 va->va_blocksize = 512;
344 va->va_atime = ts;
345 va->va_mtime = ts;
346 va->va_ctime = ts;
347 va->va_birthtime = ts;
348 va->va_gen = 0;
349 va->va_flags = 0;
350 va->va_rdev = rdev;
351 va->va_bytes = 512;
352 va->va_filerev = 0;
353 va->va_vaflags = 0;
354
355 return rn;
356 }
357
358 static int
359 makevnode(struct rumpfs_node *rn, struct vnode **vpp)
360 {
361 struct vnode *vp;
362 int (**vpops)(void *);
363 struct vattr *va = &rn->rn_va;
364 int rv;
365
366 KASSERT(mutex_owned(&reclock));
367
368 if (va->va_type == VCHR || va->va_type == VBLK) {
369 vpops = rump_specop_p;
370 } else {
371 vpops = rump_vnodeop_p;
372 }
373 if (vpops != rump_specop_p && va->va_type != VDIR
374 && !(va->va_type == VREG && rn->rn_hostpath != NULL))
375 return EOPNOTSUPP;
376
377 rv = getnewvnode(VT_RUMP, &rump_mnt, vpops, &vp);
378 if (rv)
379 return rv;
380
381 vp->v_size = vp->v_writesize = va->va_size;
382 vp->v_type = va->va_type;
383
384 if (vpops == rump_specop_p) {
385 spec_node_init(vp, va->va_rdev);
386 }
387 vp->v_data = rn;
388
389 vn_lock(vp, LK_RETRY | LK_EXCLUSIVE);
390 rn->rn_vp = vp;
391 *vpp = vp;
392
393 return 0;
394 }
395
396 /*
397 * Simple lookup for faking lookup of device entry for rump file systems
398 * and for locating/creating directories. Yes, this will panic if you
399 * call it with the wrong arguments.
400 *
401 * uhm, this is twisted. C F C C, hope of C C F C looming
402 */
403 static int
404 rump_vop_lookup(void *v)
405 {
406 struct vop_lookup_args /* {
407 struct vnode *a_dvp;
408 struct vnode **a_vpp;
409 struct componentname *a_cnp;
410 }; */ *ap = v;
411 struct componentname *cnp = ap->a_cnp;
412 struct vnode *dvp = ap->a_dvp;
413 struct vnode **vpp = ap->a_vpp;
414 struct vnode *vp;
415 struct rumpfs_node *rnd = dvp->v_data, *rn;
416 struct rumpfs_dent *rd = NULL;
417 int rv;
418
419 /* we handle only some "non-special" cases */
420 if (!(((cnp->cn_flags & ISLASTCN) == 0)
421 || (cnp->cn_nameiop == LOOKUP || cnp->cn_nameiop == CREATE)))
422 return EOPNOTSUPP;
423 if (!((cnp->cn_flags & ISDOTDOT) == 0))
424 return EOPNOTSUPP;
425 if (!(cnp->cn_namelen != 0 && cnp->cn_pnbuf[0] != '.'))
426 return EOPNOTSUPP;
427
428 /* check if we are returning a faked block device */
429 if (dvp == rootvnode && cnp->cn_nameiop == LOOKUP) {
430 mutex_enter(&etfs_lock);
431 if (etfs_find(cnp->cn_pnbuf, &rn)) {
432 mutex_exit(&etfs_lock);
433 cnp->cn_consume = strlen(cnp->cn_nameptr
434 + cnp->cn_namelen);
435 cnp->cn_flags &= ~REQUIREDIR;
436 goto getvnode;
437 }
438 mutex_exit(&etfs_lock);
439 }
440
441 if (!rd) {
442 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
443 if (strncmp(rd->rd_name, cnp->cn_nameptr,
444 cnp->cn_namelen) == 0)
445 break;
446 }
447 }
448
449 if (!rd && ((cnp->cn_flags & ISLASTCN) == 0||cnp->cn_nameiop != CREATE))
450 return ENOENT;
451
452 if (!rd && (cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
453 cnp->cn_flags |= SAVENAME;
454 return EJUSTRETURN;
455 }
456 rn = rd->rd_node;
457 rd = NULL;
458
459 getvnode:
460 KASSERT(rn);
461 mutex_enter(&reclock);
462 if ((vp = rn->rn_vp)) {
463 mutex_enter(&vp->v_interlock);
464 mutex_exit(&reclock);
465 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
466 goto getvnode;
467 *vpp = vp;
468 } else {
469 rv = makevnode(rn, vpp);
470 rn->rn_vp = *vpp;
471 mutex_exit(&reclock);
472 if (rv)
473 return rv;
474 }
475
476 return 0;
477 }
478
479 static int
480 rump_vop_getattr(void *v)
481 {
482 struct vop_getattr_args /* {
483 struct vnode *a_vp;
484 struct vattr *a_vap;
485 kauth_cred_t a_cred;
486 } */ *ap = v;
487 struct rumpfs_node *rn = ap->a_vp->v_data;
488
489 memcpy(ap->a_vap, &rn->rn_va, sizeof(struct vattr));
490 return 0;
491 }
492
493 static int
494 rump_vop_mkdir(void *v)
495 {
496 struct vop_mkdir_args /* {
497 struct vnode *a_dvp;
498 struct vnode **a_vpp;
499 struct componentname *a_cnp;
500 struct vattr *a_vap;
501 }; */ *ap = v;
502 struct vnode *dvp = ap->a_dvp;
503 struct vnode **vpp = ap->a_vpp;
504 struct componentname *cnp = ap->a_cnp;
505 struct rumpfs_node *rnd = dvp->v_data, *rn;
506 struct rumpfs_dent *rdent;
507 int rv = 0;
508
509 rn = makeprivate(VDIR, NODEV, DEV_BSIZE, NULL);
510 mutex_enter(&reclock);
511 rv = makevnode(rn, vpp);
512 mutex_exit(&reclock);
513 if (rv)
514 goto out;
515
516 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
517 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
518 rdent->rd_node = (*vpp)->v_data;
519 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
520
521 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
522
523 out:
524 vput(dvp);
525 return rv;
526 }
527
528 static int
529 rump_vop_mknod(void *v)
530 {
531 struct vop_mknod_args /* {
532 struct vnode *a_dvp;
533 struct vnode **a_vpp;
534 struct componentname *a_cnp;
535 struct vattr *a_vap;
536 }; */ *ap = v;
537 struct vnode *dvp = ap->a_dvp;
538 struct vnode **vpp = ap->a_vpp;
539 struct componentname *cnp = ap->a_cnp;
540 struct vattr *va = ap->a_vap;
541 struct rumpfs_node *rnd = dvp->v_data, *rn;
542 struct rumpfs_dent *rdent;
543 int rv;
544
545 rn = makeprivate(va->va_type, va->va_rdev, DEV_BSIZE, NULL);
546 mutex_enter(&reclock);
547 rv = makevnode(rn, vpp);
548 mutex_exit(&reclock);
549 if (rv)
550 goto out;
551
552 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
553 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
554 rdent->rd_node = (*vpp)->v_data;
555 rdent->rd_node->rn_va.va_rdev = va->va_rdev;
556 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
557
558 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
559
560 out:
561 vput(dvp);
562 return rv;
563 }
564
565 static int
566 rump_vop_open(void *v)
567 {
568 struct vop_open_args /* {
569 struct vnode *a_vp;
570 int a_mode;
571 kauth_cred_t a_cred;
572 } */ *ap = v;
573 struct vnode *vp = ap->a_vp;
574 struct rumpfs_node *rn = vp->v_data;
575 int mode = ap->a_mode;
576 int error = EINVAL;
577
578 if (vp->v_type != VREG)
579 return 0;
580
581 if (mode & FREAD) {
582 if (rn->rn_readfd != -1)
583 return 0;
584 rn->rn_readfd = rumpuser_open(rn->rn_hostpath,
585 O_RDONLY, &error);
586 } else if (mode & FWRITE) {
587 if (rn->rn_writefd != -1)
588 return 0;
589 rn->rn_writefd = rumpuser_open(rn->rn_hostpath,
590 O_WRONLY, &error);
591 }
592
593 return error;
594 }
595
596 static int
597 rump_vop_read(void *v)
598 {
599 struct vop_read_args /* {
600 struct vnode *a_vp;
601 struct uio *a_uio;
602 int ioflags a_ioflag;
603 kauth_cred_t a_cred;
604 }; */ *ap = v;
605 struct vnode *vp = ap->a_vp;
606 struct rumpfs_node *rn = vp->v_data;
607 struct uio *uio = ap->a_uio;
608 uint8_t *buf;
609 size_t bufsize;
610 int error = 0;
611
612 bufsize = uio->uio_resid;
613 buf = kmem_alloc(bufsize, KM_SLEEP);
614 if (rumpuser_read(rn->rn_readfd, buf, bufsize, &error) == -1)
615 goto out;
616 error = uiomove(buf, bufsize, uio);
617
618 out:
619 kmem_free(buf, bufsize);
620 return error;
621 }
622
623 static int
624 rump_vop_write(void *v)
625 {
626 struct vop_read_args /* {
627 struct vnode *a_vp;
628 struct uio *a_uio;
629 int ioflags a_ioflag;
630 kauth_cred_t a_cred;
631 }; */ *ap = v;
632 struct vnode *vp = ap->a_vp;
633 struct rumpfs_node *rn = vp->v_data;
634 struct uio *uio = ap->a_uio;
635 uint8_t *buf;
636 size_t bufsize;
637 int error = 0;
638
639 bufsize = uio->uio_resid;
640 buf = kmem_alloc(bufsize, KM_SLEEP);
641 error = uiomove(buf, bufsize, uio);
642 if (error)
643 goto out;
644 KASSERT(uio->uio_resid == 0);
645 rumpuser_write(rn->rn_writefd, buf, bufsize, &error);
646
647 out:
648 kmem_free(buf, bufsize);
649 return error;
650 }
651
652 static int
653 rump_vop_success(void *v)
654 {
655
656 return 0;
657 }
658
659 static int
660 rump_vop_inactive(void *v)
661 {
662 struct vop_inactive_args *ap = v;
663 struct vnode *vp = ap->a_vp;
664 struct rumpfs_node *rn = vp->v_data;
665 int error;
666
667 if (vp->v_type == VREG) {
668 if (rn->rn_readfd != -1) {
669 rumpuser_close(rn->rn_readfd, &error);
670 rn->rn_readfd = -1;
671 }
672 if (rn->rn_writefd != -1) {
673 rumpuser_close(rn->rn_writefd, &error);
674 rn->rn_writefd = -1;
675 }
676 }
677
678 VOP_UNLOCK(vp, 0);
679 return 0;
680 }
681
682 static int
683 rump_vop_reclaim(void *v)
684 {
685 struct vop_reclaim_args /* {
686 struct vnode *a_vp;
687 } */ *ap = v;
688 struct vnode *vp = ap->a_vp;
689 struct rumpfs_node *rn = vp->v_data;
690
691 mutex_enter(&reclock);
692 rn->rn_vp = NULL;
693 mutex_exit(&reclock);
694 vp->v_data = NULL;
695
696 return 0;
697 }
698
699 static int
700 rump_vop_spec(void *v)
701 {
702 struct vop_generic_args *ap = v;
703 int (**opvec)(void *);
704
705 switch (ap->a_desc->vdesc_offset) {
706 case VOP_ACCESS_DESCOFFSET:
707 case VOP_GETATTR_DESCOFFSET:
708 case VOP_LOCK_DESCOFFSET:
709 case VOP_UNLOCK_DESCOFFSET:
710 opvec = rump_vnodeop_p;
711 break;
712 default:
713 opvec = spec_vnodeop_p;
714 break;
715 }
716
717 return VOCALL(opvec, ap->a_desc->vdesc_offset, v);
718 }
719
720 void
721 rumpfs_init(void)
722 {
723 struct rumpfs_node *rn;
724 int rv;
725
726 CTASSERT(RUMP_ETFS_SIZE_ENDOFF == RUMPBLK_SIZENOTSET);
727
728 mutex_init(&reclock, MUTEX_DEFAULT, IPL_NONE);
729 mutex_init(&etfs_lock, MUTEX_DEFAULT, IPL_NONE);
730
731 /* XXX: init properly instead of this crap */
732 rump_mnt.mnt_refcnt = 1;
733 rump_mnt.mnt_flag = MNT_ROOTFS;
734 rw_init(&rump_mnt.mnt_unmounting);
735 TAILQ_INIT(&rump_mnt.mnt_vnodelist);
736
737 vfs_opv_init(rump_opv_descs);
738 rn = makeprivate(VDIR, NODEV, DEV_BSIZE, NULL);
739 mutex_enter(&reclock);
740 rv = makevnode(rn, &rootvnode);
741 mutex_exit(&reclock);
742 if (rv)
743 panic("could not create root vnode: %d", rv);
744 rootvnode->v_vflag |= VV_ROOT;
745 VOP_UNLOCK(rootvnode, 0);
746 }
747