rumpfs.c revision 1.34 1 /* $NetBSD: rumpfs.c,v 1.34 2009/11/30 12:32:13 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: rumpfs.c,v 1.34 2009/11/30 12:32:13 pooka Exp $");
30
31 #include <sys/param.h>
32 #include <sys/atomic.h>
33 #include <sys/dirent.h>
34 #include <sys/errno.h>
35 #include <sys/filedesc.h>
36 #include <sys/fcntl.h>
37 #include <sys/kauth.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mount.h>
41 #include <sys/namei.h>
42 #include <sys/lock.h>
43 #include <sys/lockf.h>
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 #include <sys/syscallargs.h>
47 #include <sys/vnode.h>
48
49 #include <miscfs/fifofs/fifo.h>
50 #include <miscfs/specfs/specdev.h>
51 #include <miscfs/genfs/genfs.h>
52
53 #include <rump/rumpuser.h>
54
55 #include "rump_private.h"
56 #include "rump_vfs_private.h"
57
58 static int rump_vop_lookup(void *);
59 static int rump_vop_getattr(void *);
60 static int rump_vop_mkdir(void *);
61 static int rump_vop_mknod(void *);
62 static int rump_vop_create(void *);
63 static int rump_vop_inactive(void *);
64 static int rump_vop_reclaim(void *);
65 static int rump_vop_success(void *);
66 static int rump_vop_spec(void *);
67 static int rump_vop_read(void *);
68 static int rump_vop_write(void *);
69 static int rump_vop_open(void *);
70
71 int (**fifo_vnodeop_p)(void *);
72 const struct vnodeopv_entry_desc fifo_vnodeop_entries[] = {
73 { &vop_default_desc, vn_default_error },
74 { NULL, NULL }
75 };
76 const struct vnodeopv_desc fifo_vnodeop_opv_desc =
77 { &fifo_vnodeop_p, fifo_vnodeop_entries };
78
79 int (**rump_vnodeop_p)(void *);
80 const struct vnodeopv_entry_desc rump_vnodeop_entries[] = {
81 { &vop_default_desc, vn_default_error },
82 { &vop_lookup_desc, rump_vop_lookup },
83 { &vop_getattr_desc, rump_vop_getattr },
84 { &vop_mkdir_desc, rump_vop_mkdir },
85 { &vop_mknod_desc, rump_vop_mknod },
86 { &vop_create_desc, rump_vop_create },
87 { &vop_access_desc, rump_vop_success },
88 { &vop_read_desc, rump_vop_read },
89 { &vop_write_desc, rump_vop_write },
90 { &vop_open_desc, rump_vop_open },
91 { &vop_putpages_desc, genfs_null_putpages },
92 { &vop_fsync_desc, rump_vop_success },
93 { &vop_lock_desc, genfs_lock },
94 { &vop_unlock_desc, genfs_unlock },
95 { &vop_inactive_desc, rump_vop_inactive },
96 { &vop_reclaim_desc, rump_vop_reclaim },
97 { NULL, NULL }
98 };
99 const struct vnodeopv_desc rump_vnodeop_opv_desc =
100 { &rump_vnodeop_p, rump_vnodeop_entries };
101
102 int (**rump_specop_p)(void *);
103 const struct vnodeopv_entry_desc rump_specop_entries[] = {
104 { &vop_default_desc, rump_vop_spec },
105 { NULL, NULL }
106 };
107 const struct vnodeopv_desc rump_specop_opv_desc =
108 { &rump_specop_p, rump_specop_entries };
109
110 const struct vnodeopv_desc * const rump_opv_descs[] = {
111 &rump_vnodeop_opv_desc,
112 &rump_specop_opv_desc,
113 NULL
114 };
115
116 struct rumpfs_dent {
117 char *rd_name;
118 struct rumpfs_node *rd_node;
119
120 LIST_ENTRY(rumpfs_dent) rd_entries;
121 };
122
123 struct rumpfs_node {
124 struct vattr rn_va;
125 struct vnode *rn_vp;
126
127 union {
128 struct {
129 char *hostpath; /* VREG */
130 int readfd;
131 int writefd;
132 uint64_t offset;
133 } reg;
134 LIST_HEAD(, rumpfs_dent) dir; /* VDIR */
135 } rn_u;
136 };
137 #define rn_hostpath rn_u.reg.hostpath
138 #define rn_readfd rn_u.reg.readfd
139 #define rn_writefd rn_u.reg.writefd
140 #define rn_offset rn_u.reg.offset
141 #define rn_dir rn_u.dir
142
143 struct rumpfs_mount {
144 struct vnode *rfsmp_rvp;
145 };
146
147 static struct rumpfs_node *makeprivate(enum vtype, dev_t, off_t);
148
149 /*
150 * Extra Terrestrial stuff. We map a given key (pathname) to a file on
151 * the host FS. ET phones home only from the root node of rumpfs.
152 *
153 * When an etfs node is removed, a vnode potentially behind it is not
154 * immediately recycled.
155 */
156
157 struct etfs {
158 char et_key[MAXPATHLEN];
159 size_t et_keylen;
160
161 LIST_ENTRY(etfs) et_entries;
162
163 struct rumpfs_node *et_rn;
164 };
165 static kmutex_t etfs_lock;
166 static LIST_HEAD(, etfs) etfs_list = LIST_HEAD_INITIALIZER(etfs_list);
167
168 static enum vtype
169 ettype_to_vtype(enum rump_etfs_type et)
170 {
171 enum vtype vt;
172
173 switch (et) {
174 case RUMP_ETFS_REG:
175 vt = VREG;
176 break;
177 case RUMP_ETFS_BLK:
178 vt = VBLK;
179 break;
180 case RUMP_ETFS_CHR:
181 vt = VCHR;
182 break;
183 default:
184 panic("invalid et type: %d", et);
185 }
186
187 return vt;
188 }
189
190 static bool
191 etfs_find(const char *key, struct rumpfs_node **rnp)
192 {
193 struct etfs *et;
194 size_t keylen = strlen(key);
195 bool rv = false;
196
197 KASSERT(mutex_owned(&etfs_lock));
198
199 LIST_FOREACH(et, &etfs_list, et_entries) {
200 if (keylen == et->et_keylen && strcmp(key, et->et_key) == 0) {
201 *rnp = et->et_rn;
202 rv = true;
203 break;
204 }
205 }
206
207 return rv;
208 }
209
210 static int
211 doregister(const char *key, const char *hostpath,
212 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
213 {
214 struct etfs *et;
215 struct rumpfs_node *rn_dummy, *rn;
216 uint64_t fsize;
217 dev_t rdev = NODEV;
218 devminor_t dmin;
219 int hft, error;
220
221 if (rumpuser_getfileinfo(hostpath, &fsize, &hft, &error))
222 return error;
223
224 /* check that we give sensible arguments */
225 if (begin > fsize)
226 return EINVAL;
227 if (size == RUMP_ETFS_SIZE_ENDOFF)
228 size = fsize - begin;
229 if (begin + size > fsize)
230 return EINVAL;
231
232 if (ftype == RUMP_ETFS_BLK || ftype == RUMP_ETFS_CHR) {
233 error = rumpblk_register(hostpath, &dmin, begin, size);
234 if (error != 0) {
235 return error;
236 }
237 rdev = makedev(RUMPBLK, dmin);
238 }
239
240 et = kmem_alloc(sizeof(*et), KM_SLEEP);
241 strcpy(et->et_key, key);
242 et->et_keylen = strlen(et->et_key);
243 et->et_rn = rn = makeprivate(ettype_to_vtype(ftype), rdev, size);
244 if (ftype == RUMP_ETFS_REG) {
245 size_t len = strlen(hostpath)+1;
246
247 rn->rn_hostpath = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
248 memcpy(rn->rn_hostpath, hostpath, len);
249 rn->rn_offset = begin;
250 }
251
252 mutex_enter(&etfs_lock);
253 if (etfs_find(key, &rn_dummy)) {
254 mutex_exit(&etfs_lock);
255 kmem_free(et, sizeof(*et));
256 /* XXX: rumpblk_deregister(hostpath); */
257 return EEXIST;
258 }
259 LIST_INSERT_HEAD(&etfs_list, et, et_entries);
260 mutex_exit(&etfs_lock);
261
262 return 0;
263 }
264
265 int
266 rump_etfs_register(const char *key, const char *hostpath,
267 enum rump_etfs_type ftype)
268 {
269
270 return doregister(key, hostpath, ftype, 0, RUMP_ETFS_SIZE_ENDOFF);
271 }
272
273 int
274 rump_etfs_register_withsize(const char *key, const char *hostpath,
275 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
276 {
277
278 /*
279 * Check that we're mapping at block offsets. I guess this
280 * is not technically necessary except for BLK/CHR backends
281 * (i.e. what getfileinfo() returns, not ftype) and can be
282 * removed later if there are problems.
283 */
284 if ((begin & (DEV_BSIZE-1)) != 0)
285 return EINVAL;
286 if (size != RUMP_ETFS_SIZE_ENDOFF && (size & (DEV_BSIZE-1)) != 0)
287 return EINVAL;
288
289 return doregister(key, hostpath, ftype, begin, size);
290 }
291
292 int
293 rump_etfs_remove(const char *key)
294 {
295 struct etfs *et;
296 size_t keylen = strlen(key);
297
298 mutex_enter(&etfs_lock);
299 LIST_FOREACH(et, &etfs_list, et_entries) {
300 if (keylen == et->et_keylen && strcmp(et->et_key, key) == 0) {
301 LIST_REMOVE(et, et_entries);
302 kmem_free(et, sizeof(*et));
303 break;
304 }
305 }
306 mutex_exit(&etfs_lock);
307
308 if (!et)
309 return ENOENT;
310 return 0;
311 }
312
313 /*
314 * rumpfs
315 */
316
317 static int lastino = 1;
318 static kmutex_t reclock;
319
320 static struct rumpfs_node *
321 makeprivate(enum vtype vt, dev_t rdev, off_t size)
322 {
323 struct rumpfs_node *rn;
324 struct vattr *va;
325 struct timespec ts;
326
327 rn = kmem_zalloc(sizeof(*rn), KM_SLEEP);
328
329 switch (vt) {
330 case VDIR:
331 LIST_INIT(&rn->rn_dir);
332 break;
333 case VREG:
334 rn->rn_readfd = -1;
335 rn->rn_writefd = -1;
336 break;
337 default:
338 break;
339 }
340
341 nanotime(&ts);
342
343 va = &rn->rn_va;
344 va->va_type = vt;
345 va->va_mode = 0755;
346 if (vt == VDIR)
347 va->va_nlink = 2;
348 else
349 va->va_nlink = 1;
350 va->va_uid = 0;
351 va->va_gid = 0;
352 va->va_fsid =
353 va->va_fileid = atomic_inc_uint_nv(&lastino);
354 va->va_size = size;
355 va->va_blocksize = 512;
356 va->va_atime = ts;
357 va->va_mtime = ts;
358 va->va_ctime = ts;
359 va->va_birthtime = ts;
360 va->va_gen = 0;
361 va->va_flags = 0;
362 va->va_rdev = rdev;
363 va->va_bytes = 512;
364 va->va_filerev = 0;
365 va->va_vaflags = 0;
366
367 return rn;
368 }
369
370 static int
371 makevnode(struct mount *mp, struct rumpfs_node *rn, struct vnode **vpp)
372 {
373 struct vnode *vp;
374 int (**vpops)(void *);
375 struct vattr *va = &rn->rn_va;
376 int rv;
377
378 KASSERT(mutex_owned(&reclock));
379
380 if (va->va_type == VCHR || va->va_type == VBLK) {
381 vpops = rump_specop_p;
382 } else {
383 vpops = rump_vnodeop_p;
384 }
385 if (vpops != rump_specop_p && va->va_type != VDIR
386 && !(va->va_type == VREG && rn->rn_hostpath != NULL)
387 && va->va_type != VSOCK)
388 return EOPNOTSUPP;
389
390 rv = getnewvnode(VT_RUMP, mp, vpops, &vp);
391 if (rv)
392 return rv;
393
394 vp->v_size = vp->v_writesize = va->va_size;
395 vp->v_type = va->va_type;
396
397 if (vpops == rump_specop_p) {
398 spec_node_init(vp, va->va_rdev);
399 }
400 vp->v_data = rn;
401
402 vn_lock(vp, LK_RETRY | LK_EXCLUSIVE);
403 rn->rn_vp = vp;
404 *vpp = vp;
405
406 return 0;
407 }
408
409 /*
410 * Simple lookup for faking lookup of device entry for rump file systems
411 * and for locating/creating directories. Yes, this will panic if you
412 * call it with the wrong arguments.
413 *
414 * uhm, this is twisted. C F C C, hope of C C F C looming
415 */
416 static int
417 rump_vop_lookup(void *v)
418 {
419 struct vop_lookup_args /* {
420 struct vnode *a_dvp;
421 struct vnode **a_vpp;
422 struct componentname *a_cnp;
423 }; */ *ap = v;
424 struct componentname *cnp = ap->a_cnp;
425 struct vnode *dvp = ap->a_dvp;
426 struct vnode **vpp = ap->a_vpp;
427 struct vnode *vp;
428 struct rumpfs_node *rnd = dvp->v_data, *rn;
429 struct rumpfs_dent *rd = NULL;
430 int rv;
431
432 /* we handle only some "non-special" cases */
433 if (!(((cnp->cn_flags & ISLASTCN) == 0)
434 || (cnp->cn_nameiop == LOOKUP || cnp->cn_nameiop == CREATE)))
435 return EOPNOTSUPP;
436 if (!((cnp->cn_flags & ISDOTDOT) == 0))
437 return EOPNOTSUPP;
438
439 /* check for dot, return directly if the case */
440 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
441 vref(dvp);
442 *vpp = dvp;
443 goto out;
444 }
445
446 /* check if we are returning a faked block device */
447 if (dvp == rootvnode && cnp->cn_nameiop == LOOKUP) {
448 mutex_enter(&etfs_lock);
449 if (etfs_find(cnp->cn_pnbuf, &rn)) {
450 mutex_exit(&etfs_lock);
451 cnp->cn_consume = strlen(cnp->cn_nameptr
452 + cnp->cn_namelen);
453 cnp->cn_flags &= ~REQUIREDIR;
454 goto getvnode;
455 }
456 mutex_exit(&etfs_lock);
457 }
458
459 if (!rd) {
460 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
461 if (strncmp(rd->rd_name, cnp->cn_nameptr,
462 cnp->cn_namelen) == 0)
463 break;
464 }
465 }
466
467 if (!rd && ((cnp->cn_flags & ISLASTCN) == 0||cnp->cn_nameiop != CREATE))
468 return ENOENT;
469
470 if (!rd && (cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
471 cnp->cn_flags |= SAVENAME;
472 return EJUSTRETURN;
473 }
474 rn = rd->rd_node;
475 rd = NULL;
476
477 getvnode:
478 KASSERT(rn);
479 mutex_enter(&reclock);
480 if ((vp = rn->rn_vp)) {
481 mutex_enter(&vp->v_interlock);
482 mutex_exit(&reclock);
483 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
484 goto getvnode;
485 *vpp = vp;
486 } else {
487 rv = makevnode(dvp->v_mount, rn, vpp);
488 rn->rn_vp = *vpp;
489 mutex_exit(&reclock);
490 if (rv)
491 return rv;
492 }
493
494 out:
495 return 0;
496 }
497
498 static int
499 rump_vop_getattr(void *v)
500 {
501 struct vop_getattr_args /* {
502 struct vnode *a_vp;
503 struct vattr *a_vap;
504 kauth_cred_t a_cred;
505 } */ *ap = v;
506 struct rumpfs_node *rn = ap->a_vp->v_data;
507
508 memcpy(ap->a_vap, &rn->rn_va, sizeof(struct vattr));
509 return 0;
510 }
511
512 static int
513 rump_vop_mkdir(void *v)
514 {
515 struct vop_mkdir_args /* {
516 struct vnode *a_dvp;
517 struct vnode **a_vpp;
518 struct componentname *a_cnp;
519 struct vattr *a_vap;
520 }; */ *ap = v;
521 struct vnode *dvp = ap->a_dvp;
522 struct vnode **vpp = ap->a_vpp;
523 struct componentname *cnp = ap->a_cnp;
524 struct rumpfs_node *rnd = dvp->v_data, *rn;
525 struct rumpfs_dent *rdent;
526 int rv = 0;
527
528 rn = makeprivate(VDIR, NODEV, DEV_BSIZE);
529 mutex_enter(&reclock);
530 rv = makevnode(dvp->v_mount, rn, vpp);
531 mutex_exit(&reclock);
532 if (rv)
533 goto out;
534
535 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
536 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
537 rdent->rd_node = (*vpp)->v_data;
538 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
539
540 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
541
542 out:
543 vput(dvp);
544 return rv;
545 }
546
547 static int
548 rump_vop_mknod(void *v)
549 {
550 struct vop_mknod_args /* {
551 struct vnode *a_dvp;
552 struct vnode **a_vpp;
553 struct componentname *a_cnp;
554 struct vattr *a_vap;
555 }; */ *ap = v;
556 struct vnode *dvp = ap->a_dvp;
557 struct vnode **vpp = ap->a_vpp;
558 struct componentname *cnp = ap->a_cnp;
559 struct vattr *va = ap->a_vap;
560 struct rumpfs_node *rnd = dvp->v_data, *rn;
561 struct rumpfs_dent *rdent;
562 int rv;
563
564 rn = makeprivate(va->va_type, va->va_rdev, DEV_BSIZE);
565 mutex_enter(&reclock);
566 rv = makevnode(dvp->v_mount, rn, vpp);
567 mutex_exit(&reclock);
568 if (rv)
569 goto out;
570
571 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
572 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
573 rdent->rd_node = (*vpp)->v_data;
574 rdent->rd_node->rn_va.va_rdev = va->va_rdev;
575 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
576
577 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
578
579 out:
580 vput(dvp);
581 return rv;
582 }
583
584 static int
585 rump_vop_create(void *v)
586 {
587 struct vop_create_args /* {
588 struct vnode *a_dvp;
589 struct vnode **a_vpp;
590 struct componentname *a_cnp;
591 struct vattr *a_vap;
592 }; */ *ap = v;
593 struct vnode *dvp = ap->a_dvp;
594 struct vnode **vpp = ap->a_vpp;
595 struct componentname *cnp = ap->a_cnp;
596 struct vattr *va = ap->a_vap;
597 struct rumpfs_node *rnd = dvp->v_data, *rn;
598 struct rumpfs_dent *rdent;
599 int rv;
600
601 if (va->va_type != VSOCK) {
602 rv = EOPNOTSUPP;
603 goto out;
604 }
605 rn = makeprivate(VSOCK, NODEV, DEV_BSIZE);
606 mutex_enter(&reclock);
607 rv = makevnode(dvp->v_mount, rn, vpp);
608 mutex_exit(&reclock);
609 if (rv)
610 goto out;
611
612 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
613 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
614 rdent->rd_node = (*vpp)->v_data;
615 rdent->rd_node->rn_va.va_rdev = NODEV;
616 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
617
618 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
619
620 out:
621 vput(dvp);
622 return rv;
623 }
624
625 static int
626 rump_vop_open(void *v)
627 {
628 struct vop_open_args /* {
629 struct vnode *a_vp;
630 int a_mode;
631 kauth_cred_t a_cred;
632 } */ *ap = v;
633 struct vnode *vp = ap->a_vp;
634 struct rumpfs_node *rn = vp->v_data;
635 int mode = ap->a_mode;
636 int error = EINVAL;
637
638 if (vp->v_type != VREG)
639 return 0;
640
641 if (mode & FREAD) {
642 if (rn->rn_readfd != -1)
643 return 0;
644 rn->rn_readfd = rumpuser_open(rn->rn_hostpath,
645 O_RDONLY, &error);
646 } else if (mode & FWRITE) {
647 if (rn->rn_writefd != -1)
648 return 0;
649 rn->rn_writefd = rumpuser_open(rn->rn_hostpath,
650 O_WRONLY, &error);
651 }
652
653 return error;
654 }
655
656 static int
657 rump_vop_read(void *v)
658 {
659 struct vop_read_args /* {
660 struct vnode *a_vp;
661 struct uio *a_uio;
662 int ioflags a_ioflag;
663 kauth_cred_t a_cred;
664 }; */ *ap = v;
665 struct vnode *vp = ap->a_vp;
666 struct rumpfs_node *rn = vp->v_data;
667 struct uio *uio = ap->a_uio;
668 uint8_t *buf;
669 size_t bufsize;
670 int error = 0;
671
672 bufsize = uio->uio_resid;
673 buf = kmem_alloc(bufsize, KM_SLEEP);
674 if (rumpuser_pread(rn->rn_readfd, buf, bufsize,
675 uio->uio_offset + rn->rn_offset, &error) == -1)
676 goto out;
677 error = uiomove(buf, bufsize, uio);
678
679 out:
680 kmem_free(buf, bufsize);
681 return error;
682 }
683
684 static int
685 rump_vop_write(void *v)
686 {
687 struct vop_read_args /* {
688 struct vnode *a_vp;
689 struct uio *a_uio;
690 int ioflags a_ioflag;
691 kauth_cred_t a_cred;
692 }; */ *ap = v;
693 struct vnode *vp = ap->a_vp;
694 struct rumpfs_node *rn = vp->v_data;
695 struct uio *uio = ap->a_uio;
696 uint8_t *buf;
697 size_t bufsize;
698 int error = 0;
699
700 bufsize = uio->uio_resid;
701 buf = kmem_alloc(bufsize, KM_SLEEP);
702 error = uiomove(buf, bufsize, uio);
703 if (error)
704 goto out;
705 KASSERT(uio->uio_resid == 0);
706 rumpuser_pwrite(rn->rn_writefd, buf, bufsize,
707 uio->uio_offset + rn->rn_offset, &error);
708
709 out:
710 kmem_free(buf, bufsize);
711 return error;
712 }
713
714 static int
715 rump_vop_success(void *v)
716 {
717
718 return 0;
719 }
720
721 static int
722 rump_vop_inactive(void *v)
723 {
724 struct vop_inactive_args *ap = v;
725 struct vnode *vp = ap->a_vp;
726 struct rumpfs_node *rn = vp->v_data;
727 int error;
728
729 if (vp->v_type == VREG) {
730 if (rn->rn_readfd != -1) {
731 rumpuser_close(rn->rn_readfd, &error);
732 rn->rn_readfd = -1;
733 }
734 if (rn->rn_writefd != -1) {
735 rumpuser_close(rn->rn_writefd, &error);
736 rn->rn_writefd = -1;
737 }
738 }
739
740 VOP_UNLOCK(vp, 0);
741 return 0;
742 }
743
744 static int
745 rump_vop_reclaim(void *v)
746 {
747 struct vop_reclaim_args /* {
748 struct vnode *a_vp;
749 } */ *ap = v;
750 struct vnode *vp = ap->a_vp;
751 struct rumpfs_node *rn = vp->v_data;
752
753 mutex_enter(&reclock);
754 rn->rn_vp = NULL;
755 mutex_exit(&reclock);
756 vp->v_data = NULL;
757
758 return 0;
759 }
760
761 static int
762 rump_vop_spec(void *v)
763 {
764 struct vop_generic_args *ap = v;
765 int (**opvec)(void *);
766
767 switch (ap->a_desc->vdesc_offset) {
768 case VOP_ACCESS_DESCOFFSET:
769 case VOP_GETATTR_DESCOFFSET:
770 case VOP_LOCK_DESCOFFSET:
771 case VOP_UNLOCK_DESCOFFSET:
772 opvec = rump_vnodeop_p;
773 break;
774 default:
775 opvec = spec_vnodeop_p;
776 break;
777 }
778
779 return VOCALL(opvec, ap->a_desc->vdesc_offset, v);
780 }
781
782 /*
783 * Begin vfs-level stuff
784 */
785
786 VFS_PROTOS(rumpfs);
787 struct vfsops rumpfs_vfsops = {
788 .vfs_name = MOUNT_RUMPFS,
789 .vfs_min_mount_data = 0,
790 .vfs_mount = rumpfs_mount,
791 .vfs_start = (void *)nullop,
792 .vfs_unmount = rumpfs_unmount,
793 .vfs_root = rumpfs_root,
794 .vfs_quotactl = (void *)eopnotsupp,
795 .vfs_statvfs = genfs_statvfs,
796 .vfs_sync = (void *)nullop,
797 .vfs_vget = rumpfs_vget,
798 .vfs_fhtovp = (void *)eopnotsupp,
799 .vfs_vptofh = (void *)eopnotsupp,
800 .vfs_init = rumpfs_init,
801 .vfs_reinit = NULL,
802 .vfs_done = rumpfs_done,
803 .vfs_mountroot = rumpfs_mountroot,
804 .vfs_snapshot = (void *)eopnotsupp,
805 .vfs_extattrctl = (void *)eopnotsupp,
806 .vfs_suspendctl = (void *)eopnotsupp,
807 .vfs_opv_descs = rump_opv_descs,
808 /* vfs_refcount */
809 /* vfs_list */
810 };
811
812 int
813 rumpfs_mount(struct mount *mp, const char *mntpath, void *arg, size_t *alen)
814 {
815
816 return EOPNOTSUPP;
817 }
818
819 int
820 rumpfs_unmount(struct mount *mp, int flags)
821 {
822
823 return EOPNOTSUPP; /* ;) */
824 }
825
826 int
827 rumpfs_root(struct mount *mp, struct vnode **vpp)
828 {
829 struct rumpfs_mount *rfsmp = mp->mnt_data;
830
831 vget(rfsmp->rfsmp_rvp, LK_EXCLUSIVE | LK_RETRY);
832 *vpp = rfsmp->rfsmp_rvp;
833 return 0;
834 }
835
836 int
837 rumpfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
838 {
839
840 return EOPNOTSUPP;
841 }
842
843 void
844 rumpfs_init()
845 {
846
847 CTASSERT(RUMP_ETFS_SIZE_ENDOFF == RUMPBLK_SIZENOTSET);
848
849 mutex_init(&reclock, MUTEX_DEFAULT, IPL_NONE);
850 mutex_init(&etfs_lock, MUTEX_DEFAULT, IPL_NONE);
851 }
852
853 void
854 rumpfs_done()
855 {
856
857 mutex_destroy(&reclock);
858 mutex_destroy(&etfs_lock);
859 }
860
861 int
862 rumpfs_mountroot()
863 {
864 struct mount *mp;
865 struct rumpfs_mount *rfsmp;
866 struct rumpfs_node *rn;
867 int error;
868
869 if ((error = vfs_rootmountalloc(MOUNT_RUMPFS, "rootdev", &mp)) != 0) {
870 vrele(rootvp);
871 return error;
872 }
873
874 rfsmp = kmem_alloc(sizeof(*rfsmp), KM_SLEEP);
875
876 rn = makeprivate(VDIR, NODEV, DEV_BSIZE);
877 mutex_enter(&reclock);
878 error = makevnode(mp, rn, &rfsmp->rfsmp_rvp);
879 mutex_exit(&reclock);
880 if (error)
881 panic("could not create root vnode: %d", error);
882 rfsmp->rfsmp_rvp->v_vflag |= VV_ROOT;
883 VOP_UNLOCK(rfsmp->rfsmp_rvp, 0);
884
885 mutex_enter(&mountlist_lock);
886 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
887 mutex_exit(&mountlist_lock);
888
889 mp->mnt_data = rfsmp;
890 mp->mnt_stat.f_namemax = MAXNAMLEN;
891 mp->mnt_flag |= MNT_LOCAL;
892 vfs_getnewfsid(mp);
893
894 error = set_statvfs_info("/", UIO_SYSSPACE, "rumpfs", UIO_SYSSPACE,
895 mp->mnt_op->vfs_name, mp, curlwp);
896 if (error)
897 panic("set statvfsinfo for rootfs failed");
898
899 vfs_unbusy(mp, false, NULL);
900
901 return 0;
902 }
903
904 MODULE(MODULE_CLASS_VFS, rumpfs, NULL);
905
906 static int
907 rumpfs_modcmd(modcmd_t cmd, void *arg)
908 {
909
910 switch (cmd) {
911 case MODULE_CMD_INIT:
912 return vfs_attach(&rumpfs_vfsops);
913 case MODULE_CMD_FINI:
914 return vfs_detach(&rumpfs_vfsops);
915 default:
916 return ENOTTY;
917 }
918 }
919