rumpfs.c revision 1.28 1 /* $NetBSD: rumpfs.c,v 1.28 2009/10/14 17:29:20 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by Google Summer of Code.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: rumpfs.c,v 1.28 2009/10/14 17:29:20 pooka Exp $");
32
33 #include <sys/param.h>
34 #include <sys/atomic.h>
35 #include <sys/filedesc.h>
36 #include <sys/errno.h>
37 #include <sys/fcntl.h>
38 #include <sys/kauth.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/namei.h>
42 #include <sys/lock.h>
43 #include <sys/lockf.h>
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 #include <sys/syscallargs.h>
47 #include <sys/vnode.h>
48
49 #include <miscfs/fifofs/fifo.h>
50 #include <miscfs/specfs/specdev.h>
51 #include <miscfs/genfs/genfs.h>
52
53 #include <rump/rumpuser.h>
54
55 #include "rump_private.h"
56 #include "rump_vfs_private.h"
57
58 static int rump_vop_lookup(void *);
59 static int rump_vop_getattr(void *);
60 static int rump_vop_mkdir(void *);
61 static int rump_vop_mknod(void *);
62 static int rump_vop_create(void *);
63 static int rump_vop_inactive(void *);
64 static int rump_vop_reclaim(void *);
65 static int rump_vop_success(void *);
66 static int rump_vop_spec(void *);
67 static int rump_vop_read(void *);
68 static int rump_vop_write(void *);
69 static int rump_vop_open(void *);
70
71 int (**fifo_vnodeop_p)(void *);
72 const struct vnodeopv_entry_desc fifo_vnodeop_entries[] = {
73 { &vop_default_desc, vn_default_error },
74 { NULL, NULL }
75 };
76 const struct vnodeopv_desc fifo_vnodeop_opv_desc =
77 { &fifo_vnodeop_p, fifo_vnodeop_entries };
78
79 int (**rump_vnodeop_p)(void *);
80 const struct vnodeopv_entry_desc rump_vnodeop_entries[] = {
81 { &vop_default_desc, vn_default_error },
82 { &vop_lookup_desc, rump_vop_lookup },
83 { &vop_getattr_desc, rump_vop_getattr },
84 { &vop_mkdir_desc, rump_vop_mkdir },
85 { &vop_mknod_desc, rump_vop_mknod },
86 { &vop_create_desc, rump_vop_create },
87 { &vop_access_desc, rump_vop_success },
88 { &vop_read_desc, rump_vop_read },
89 { &vop_write_desc, rump_vop_write },
90 { &vop_open_desc, rump_vop_open },
91 { &vop_putpages_desc, genfs_null_putpages },
92 { &vop_fsync_desc, rump_vop_success },
93 { &vop_lock_desc, genfs_lock },
94 { &vop_unlock_desc, genfs_unlock },
95 { &vop_inactive_desc, rump_vop_inactive },
96 { &vop_reclaim_desc, rump_vop_reclaim },
97 { NULL, NULL }
98 };
99 const struct vnodeopv_desc rump_vnodeop_opv_desc =
100 { &rump_vnodeop_p, rump_vnodeop_entries };
101
102 int (**rump_specop_p)(void *);
103 const struct vnodeopv_entry_desc rump_specop_entries[] = {
104 { &vop_default_desc, rump_vop_spec },
105 { NULL, NULL }
106 };
107 const struct vnodeopv_desc rump_specop_opv_desc =
108 { &rump_specop_p, rump_specop_entries };
109
110 const struct vnodeopv_desc * const rump_opv_descs[] = {
111 &rump_vnodeop_opv_desc,
112 &rump_specop_opv_desc,
113 NULL
114 };
115
116 struct rumpfs_dent {
117 char *rd_name;
118 struct rumpfs_node *rd_node;
119
120 LIST_ENTRY(rumpfs_dent) rd_entries;
121 };
122
123 struct rumpfs_node {
124 struct vattr rn_va;
125 struct vnode *rn_vp;
126
127 union {
128 struct {
129 char *hostpath; /* VREG */
130 int readfd;
131 int writefd;
132 uint64_t offset;
133 } reg;
134 LIST_HEAD(, rumpfs_dent) dir; /* VDIR */
135 } rn_u;
136 };
137 #define rn_hostpath rn_u.reg.hostpath
138 #define rn_readfd rn_u.reg.readfd
139 #define rn_writefd rn_u.reg.writefd
140 #define rn_offset rn_u.reg.offset
141 #define rn_dir rn_u.dir
142
143 static struct rumpfs_node *makeprivate(enum vtype, dev_t, off_t);
144
145 /*
146 * Extra Terrestrial stuff. We map a given key (pathname) to a file on
147 * the host FS. ET phones home only from the root node of rumpfs.
148 *
149 * When an etfs node is removed, a vnode potentially behind it is not
150 * immediately recycled.
151 */
152
153 struct etfs {
154 char et_key[MAXPATHLEN];
155 size_t et_keylen;
156
157 LIST_ENTRY(etfs) et_entries;
158
159 struct rumpfs_node *et_rn;
160 };
161 static kmutex_t etfs_lock;
162 static LIST_HEAD(, etfs) etfs_list = LIST_HEAD_INITIALIZER(etfs_list);
163
164 static enum vtype
165 ettype_to_vtype(enum rump_etfs_type et)
166 {
167 enum vtype vt;
168
169 switch (et) {
170 case RUMP_ETFS_REG:
171 vt = VREG;
172 break;
173 case RUMP_ETFS_BLK:
174 vt = VBLK;
175 break;
176 case RUMP_ETFS_CHR:
177 vt = VCHR;
178 break;
179 default:
180 panic("invalid et type: %d", et);
181 }
182
183 return vt;
184 }
185
186 static bool
187 etfs_find(const char *key, struct rumpfs_node **rnp)
188 {
189 struct etfs *et;
190 size_t keylen = strlen(key);
191 bool rv = false;
192
193 KASSERT(mutex_owned(&etfs_lock));
194
195 LIST_FOREACH(et, &etfs_list, et_entries) {
196 if (keylen == et->et_keylen && strcmp(key, et->et_key) == 0) {
197 *rnp = et->et_rn;
198 rv = true;
199 break;
200 }
201 }
202
203 return rv;
204 }
205
206 static int
207 doregister(const char *key, const char *hostpath,
208 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
209 {
210 struct etfs *et;
211 struct rumpfs_node *rn_dummy, *rn;
212 uint64_t fsize;
213 dev_t rdev = NODEV;
214 devminor_t dmin;
215 int hft, error;
216
217 if (rumpuser_getfileinfo(hostpath, &fsize, &hft, &error))
218 return error;
219
220 /* check that we give sensible arguments */
221 if (begin > fsize)
222 return EINVAL;
223 if (size == RUMP_ETFS_SIZE_ENDOFF)
224 size = fsize - begin;
225 if (begin + size > fsize)
226 return EINVAL;
227
228 if (ftype == RUMP_ETFS_BLK || ftype == RUMP_ETFS_CHR) {
229 error = rumpblk_register(hostpath, &dmin, begin, size);
230 if (error != 0) {
231 return error;
232 }
233 rdev = makedev(RUMPBLK, dmin);
234 }
235
236 et = kmem_alloc(sizeof(*et), KM_SLEEP);
237 strcpy(et->et_key, key);
238 et->et_keylen = strlen(et->et_key);
239 et->et_rn = rn = makeprivate(ettype_to_vtype(ftype), rdev, size);
240 if (ftype == RUMP_ETFS_REG) {
241 size_t len = strlen(hostpath)+1;
242
243 rn->rn_hostpath = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
244 memcpy(rn->rn_hostpath, hostpath, len);
245 rn->rn_offset = begin;
246 }
247
248 mutex_enter(&etfs_lock);
249 if (etfs_find(key, &rn_dummy)) {
250 mutex_exit(&etfs_lock);
251 kmem_free(et, sizeof(*et));
252 /* XXX: rumpblk_deregister(hostpath); */
253 return EEXIST;
254 }
255 LIST_INSERT_HEAD(&etfs_list, et, et_entries);
256 mutex_exit(&etfs_lock);
257
258 return 0;
259 }
260
261 int
262 rumppriv_etfs_register(const char *key, const char *hostpath,
263 enum rump_etfs_type ftype)
264 {
265
266 return doregister(key, hostpath, ftype, 0, RUMP_ETFS_SIZE_ENDOFF);
267 }
268
269 int
270 rumppriv_etfs_register_withsize(const char *key, const char *hostpath,
271 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
272 {
273
274 /*
275 * Check that we're mapping at block offsets. I guess this
276 * is not technically necessary except for BLK/CHR backends
277 * (i.e. what getfileinfo() returns, not ftype) and can be
278 * removed later if there are problems.
279 */
280 if ((begin & (DEV_BSIZE-1)) != 0)
281 return EINVAL;
282 if (size != RUMP_ETFS_SIZE_ENDOFF && (size & (DEV_BSIZE-1)) != 0)
283 return EINVAL;
284
285 return doregister(key, hostpath, ftype, begin, size);
286 }
287
288 int
289 rumppriv_etfs_remove(const char *key)
290 {
291 struct etfs *et;
292 size_t keylen = strlen(key);
293
294 mutex_enter(&etfs_lock);
295 LIST_FOREACH(et, &etfs_list, et_entries) {
296 if (keylen == et->et_keylen && strcmp(et->et_key, key) == 0) {
297 LIST_REMOVE(et, et_entries);
298 kmem_free(et, sizeof(*et));
299 break;
300 }
301 }
302 mutex_exit(&etfs_lock);
303
304 if (!et)
305 return ENOENT;
306 return 0;
307 }
308
309 /*
310 * rumpfs
311 */
312
313 static struct mount rump_mnt;
314 static int lastino = 1;
315 static kmutex_t reclock;
316
317 static struct rumpfs_node *
318 makeprivate(enum vtype vt, dev_t rdev, off_t size)
319 {
320 struct rumpfs_node *rn;
321 struct vattr *va;
322 struct timespec ts;
323
324 rn = kmem_zalloc(sizeof(*rn), KM_SLEEP);
325
326 switch (vt) {
327 case VDIR:
328 LIST_INIT(&rn->rn_dir);
329 break;
330 case VREG:
331 rn->rn_readfd = -1;
332 rn->rn_writefd = -1;
333 break;
334 default:
335 break;
336 }
337
338 nanotime(&ts);
339
340 va = &rn->rn_va;
341 va->va_type = vt;
342 va->va_mode = 0755;
343 if (vt == VDIR)
344 va->va_nlink = 2;
345 else
346 va->va_nlink = 1;
347 va->va_uid = 0;
348 va->va_gid = 0;
349 va->va_fsid =
350 va->va_fileid = atomic_inc_uint_nv(&lastino);
351 va->va_size = size;
352 va->va_blocksize = 512;
353 va->va_atime = ts;
354 va->va_mtime = ts;
355 va->va_ctime = ts;
356 va->va_birthtime = ts;
357 va->va_gen = 0;
358 va->va_flags = 0;
359 va->va_rdev = rdev;
360 va->va_bytes = 512;
361 va->va_filerev = 0;
362 va->va_vaflags = 0;
363
364 return rn;
365 }
366
367 static int
368 makevnode(struct rumpfs_node *rn, struct vnode **vpp)
369 {
370 struct vnode *vp;
371 int (**vpops)(void *);
372 struct vattr *va = &rn->rn_va;
373 int rv;
374
375 KASSERT(mutex_owned(&reclock));
376
377 if (va->va_type == VCHR || va->va_type == VBLK) {
378 vpops = rump_specop_p;
379 } else {
380 vpops = rump_vnodeop_p;
381 }
382 if (vpops != rump_specop_p && va->va_type != VDIR
383 && !(va->va_type == VREG && rn->rn_hostpath != NULL)
384 && va->va_type != VSOCK)
385 return EOPNOTSUPP;
386
387 rv = getnewvnode(VT_RUMP, &rump_mnt, vpops, &vp);
388 if (rv)
389 return rv;
390
391 vp->v_size = vp->v_writesize = va->va_size;
392 vp->v_type = va->va_type;
393
394 if (vpops == rump_specop_p) {
395 spec_node_init(vp, va->va_rdev);
396 }
397 vp->v_data = rn;
398
399 vn_lock(vp, LK_RETRY | LK_EXCLUSIVE);
400 rn->rn_vp = vp;
401 *vpp = vp;
402
403 return 0;
404 }
405
406 /*
407 * Simple lookup for faking lookup of device entry for rump file systems
408 * and for locating/creating directories. Yes, this will panic if you
409 * call it with the wrong arguments.
410 *
411 * uhm, this is twisted. C F C C, hope of C C F C looming
412 */
413 static int
414 rump_vop_lookup(void *v)
415 {
416 struct vop_lookup_args /* {
417 struct vnode *a_dvp;
418 struct vnode **a_vpp;
419 struct componentname *a_cnp;
420 }; */ *ap = v;
421 struct componentname *cnp = ap->a_cnp;
422 struct vnode *dvp = ap->a_dvp;
423 struct vnode **vpp = ap->a_vpp;
424 struct vnode *vp;
425 struct rumpfs_node *rnd = dvp->v_data, *rn;
426 struct rumpfs_dent *rd = NULL;
427 int rv;
428
429 /* we handle only some "non-special" cases */
430 if (!(((cnp->cn_flags & ISLASTCN) == 0)
431 || (cnp->cn_nameiop == LOOKUP || cnp->cn_nameiop == CREATE)))
432 return EOPNOTSUPP;
433 if (!((cnp->cn_flags & ISDOTDOT) == 0))
434 return EOPNOTSUPP;
435 if (!(cnp->cn_namelen != 0 && cnp->cn_pnbuf[0] != '.'))
436 return EOPNOTSUPP;
437
438 /* check if we are returning a faked block device */
439 if (dvp == rootvnode && cnp->cn_nameiop == LOOKUP) {
440 mutex_enter(&etfs_lock);
441 if (etfs_find(cnp->cn_pnbuf, &rn)) {
442 mutex_exit(&etfs_lock);
443 cnp->cn_consume = strlen(cnp->cn_nameptr
444 + cnp->cn_namelen);
445 cnp->cn_flags &= ~REQUIREDIR;
446 goto getvnode;
447 }
448 mutex_exit(&etfs_lock);
449 }
450
451 if (!rd) {
452 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
453 if (strncmp(rd->rd_name, cnp->cn_nameptr,
454 cnp->cn_namelen) == 0)
455 break;
456 }
457 }
458
459 if (!rd && ((cnp->cn_flags & ISLASTCN) == 0||cnp->cn_nameiop != CREATE))
460 return ENOENT;
461
462 if (!rd && (cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
463 cnp->cn_flags |= SAVENAME;
464 return EJUSTRETURN;
465 }
466 rn = rd->rd_node;
467 rd = NULL;
468
469 getvnode:
470 KASSERT(rn);
471 mutex_enter(&reclock);
472 if ((vp = rn->rn_vp)) {
473 mutex_enter(&vp->v_interlock);
474 mutex_exit(&reclock);
475 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
476 goto getvnode;
477 *vpp = vp;
478 } else {
479 rv = makevnode(rn, vpp);
480 rn->rn_vp = *vpp;
481 mutex_exit(&reclock);
482 if (rv)
483 return rv;
484 }
485
486 return 0;
487 }
488
489 static int
490 rump_vop_getattr(void *v)
491 {
492 struct vop_getattr_args /* {
493 struct vnode *a_vp;
494 struct vattr *a_vap;
495 kauth_cred_t a_cred;
496 } */ *ap = v;
497 struct rumpfs_node *rn = ap->a_vp->v_data;
498
499 memcpy(ap->a_vap, &rn->rn_va, sizeof(struct vattr));
500 return 0;
501 }
502
503 static int
504 rump_vop_mkdir(void *v)
505 {
506 struct vop_mkdir_args /* {
507 struct vnode *a_dvp;
508 struct vnode **a_vpp;
509 struct componentname *a_cnp;
510 struct vattr *a_vap;
511 }; */ *ap = v;
512 struct vnode *dvp = ap->a_dvp;
513 struct vnode **vpp = ap->a_vpp;
514 struct componentname *cnp = ap->a_cnp;
515 struct rumpfs_node *rnd = dvp->v_data, *rn;
516 struct rumpfs_dent *rdent;
517 int rv = 0;
518
519 rn = makeprivate(VDIR, NODEV, DEV_BSIZE);
520 mutex_enter(&reclock);
521 rv = makevnode(rn, vpp);
522 mutex_exit(&reclock);
523 if (rv)
524 goto out;
525
526 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
527 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
528 rdent->rd_node = (*vpp)->v_data;
529 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
530
531 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
532
533 out:
534 vput(dvp);
535 return rv;
536 }
537
538 static int
539 rump_vop_mknod(void *v)
540 {
541 struct vop_mknod_args /* {
542 struct vnode *a_dvp;
543 struct vnode **a_vpp;
544 struct componentname *a_cnp;
545 struct vattr *a_vap;
546 }; */ *ap = v;
547 struct vnode *dvp = ap->a_dvp;
548 struct vnode **vpp = ap->a_vpp;
549 struct componentname *cnp = ap->a_cnp;
550 struct vattr *va = ap->a_vap;
551 struct rumpfs_node *rnd = dvp->v_data, *rn;
552 struct rumpfs_dent *rdent;
553 int rv;
554
555 rn = makeprivate(va->va_type, va->va_rdev, DEV_BSIZE);
556 mutex_enter(&reclock);
557 rv = makevnode(rn, vpp);
558 mutex_exit(&reclock);
559 if (rv)
560 goto out;
561
562 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
563 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
564 rdent->rd_node = (*vpp)->v_data;
565 rdent->rd_node->rn_va.va_rdev = va->va_rdev;
566 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
567
568 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
569
570 out:
571 vput(dvp);
572 return rv;
573 }
574
575 static int
576 rump_vop_create(void *v)
577 {
578 struct vop_create_args /* {
579 struct vnode *a_dvp;
580 struct vnode **a_vpp;
581 struct componentname *a_cnp;
582 struct vattr *a_vap;
583 }; */ *ap = v;
584 struct vnode *dvp = ap->a_dvp;
585 struct vnode **vpp = ap->a_vpp;
586 struct componentname *cnp = ap->a_cnp;
587 struct vattr *va = ap->a_vap;
588 struct rumpfs_node *rnd = dvp->v_data, *rn;
589 struct rumpfs_dent *rdent;
590 int rv;
591
592 if (va->va_type != VSOCK) {
593 rv = EOPNOTSUPP;
594 goto out;
595 }
596 rn = makeprivate(VSOCK, NODEV, DEV_BSIZE);
597 mutex_enter(&reclock);
598 rv = makevnode(rn, vpp);
599 mutex_exit(&reclock);
600 if (rv)
601 goto out;
602
603 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
604 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
605 rdent->rd_node = (*vpp)->v_data;
606 rdent->rd_node->rn_va.va_rdev = NODEV;
607 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
608
609 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
610
611 out:
612 vput(dvp);
613 return rv;
614 }
615
616 static int
617 rump_vop_open(void *v)
618 {
619 struct vop_open_args /* {
620 struct vnode *a_vp;
621 int a_mode;
622 kauth_cred_t a_cred;
623 } */ *ap = v;
624 struct vnode *vp = ap->a_vp;
625 struct rumpfs_node *rn = vp->v_data;
626 int mode = ap->a_mode;
627 int error = EINVAL;
628
629 if (vp->v_type != VREG)
630 return 0;
631
632 if (mode & FREAD) {
633 if (rn->rn_readfd != -1)
634 return 0;
635 rn->rn_readfd = rumpuser_open(rn->rn_hostpath,
636 O_RDONLY, &error);
637 } else if (mode & FWRITE) {
638 if (rn->rn_writefd != -1)
639 return 0;
640 rn->rn_writefd = rumpuser_open(rn->rn_hostpath,
641 O_WRONLY, &error);
642 }
643
644 return error;
645 }
646
647 static int
648 rump_vop_read(void *v)
649 {
650 struct vop_read_args /* {
651 struct vnode *a_vp;
652 struct uio *a_uio;
653 int ioflags a_ioflag;
654 kauth_cred_t a_cred;
655 }; */ *ap = v;
656 struct vnode *vp = ap->a_vp;
657 struct rumpfs_node *rn = vp->v_data;
658 struct uio *uio = ap->a_uio;
659 uint8_t *buf;
660 size_t bufsize;
661 int error = 0;
662
663 bufsize = uio->uio_resid;
664 buf = kmem_alloc(bufsize, KM_SLEEP);
665 if (rumpuser_pread(rn->rn_readfd, buf, bufsize,
666 uio->uio_offset + rn->rn_offset, &error) == -1)
667 goto out;
668 error = uiomove(buf, bufsize, uio);
669
670 out:
671 kmem_free(buf, bufsize);
672 return error;
673 }
674
675 static int
676 rump_vop_write(void *v)
677 {
678 struct vop_read_args /* {
679 struct vnode *a_vp;
680 struct uio *a_uio;
681 int ioflags a_ioflag;
682 kauth_cred_t a_cred;
683 }; */ *ap = v;
684 struct vnode *vp = ap->a_vp;
685 struct rumpfs_node *rn = vp->v_data;
686 struct uio *uio = ap->a_uio;
687 uint8_t *buf;
688 size_t bufsize;
689 int error = 0;
690
691 bufsize = uio->uio_resid;
692 buf = kmem_alloc(bufsize, KM_SLEEP);
693 error = uiomove(buf, bufsize, uio);
694 if (error)
695 goto out;
696 KASSERT(uio->uio_resid == 0);
697 rumpuser_pwrite(rn->rn_writefd, buf, bufsize,
698 uio->uio_offset + rn->rn_offset, &error);
699
700 out:
701 kmem_free(buf, bufsize);
702 return error;
703 }
704
705 static int
706 rump_vop_success(void *v)
707 {
708
709 return 0;
710 }
711
712 static int
713 rump_vop_inactive(void *v)
714 {
715 struct vop_inactive_args *ap = v;
716 struct vnode *vp = ap->a_vp;
717 struct rumpfs_node *rn = vp->v_data;
718 int error;
719
720 if (vp->v_type == VREG) {
721 if (rn->rn_readfd != -1) {
722 rumpuser_close(rn->rn_readfd, &error);
723 rn->rn_readfd = -1;
724 }
725 if (rn->rn_writefd != -1) {
726 rumpuser_close(rn->rn_writefd, &error);
727 rn->rn_writefd = -1;
728 }
729 }
730
731 VOP_UNLOCK(vp, 0);
732 return 0;
733 }
734
735 static int
736 rump_vop_reclaim(void *v)
737 {
738 struct vop_reclaim_args /* {
739 struct vnode *a_vp;
740 } */ *ap = v;
741 struct vnode *vp = ap->a_vp;
742 struct rumpfs_node *rn = vp->v_data;
743
744 mutex_enter(&reclock);
745 rn->rn_vp = NULL;
746 mutex_exit(&reclock);
747 vp->v_data = NULL;
748
749 return 0;
750 }
751
752 static int
753 rump_vop_spec(void *v)
754 {
755 struct vop_generic_args *ap = v;
756 int (**opvec)(void *);
757
758 switch (ap->a_desc->vdesc_offset) {
759 case VOP_ACCESS_DESCOFFSET:
760 case VOP_GETATTR_DESCOFFSET:
761 case VOP_LOCK_DESCOFFSET:
762 case VOP_UNLOCK_DESCOFFSET:
763 opvec = rump_vnodeop_p;
764 break;
765 default:
766 opvec = spec_vnodeop_p;
767 break;
768 }
769
770 return VOCALL(opvec, ap->a_desc->vdesc_offset, v);
771 }
772
773 void
774 rumpfs_init(void)
775 {
776 struct rumpfs_node *rn;
777 int rv;
778
779 CTASSERT(RUMP_ETFS_SIZE_ENDOFF == RUMPBLK_SIZENOTSET);
780
781 mutex_init(&reclock, MUTEX_DEFAULT, IPL_NONE);
782 mutex_init(&etfs_lock, MUTEX_DEFAULT, IPL_NONE);
783
784 /* XXX: init properly instead of this crap */
785 rump_mnt.mnt_refcnt = 1;
786 rump_mnt.mnt_flag = MNT_ROOTFS;
787 rw_init(&rump_mnt.mnt_unmounting);
788 TAILQ_INIT(&rump_mnt.mnt_vnodelist);
789
790 vfs_opv_init(rump_opv_descs);
791 rn = makeprivate(VDIR, NODEV, DEV_BSIZE);
792 mutex_enter(&reclock);
793 rv = makevnode(rn, &rootvnode);
794 mutex_exit(&reclock);
795 if (rv)
796 panic("could not create root vnode: %d", rv);
797 rootvnode->v_vflag |= VV_ROOT;
798 VOP_UNLOCK(rootvnode, 0);
799 }
800