rumpfs.c revision 1.73 1 /* $NetBSD: rumpfs.c,v 1.73 2010/11/11 18:45:09 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2009, 2010 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: rumpfs.c,v 1.73 2010/11/11 18:45:09 pooka Exp $");
30
31 #include <sys/param.h>
32 #include <sys/atomic.h>
33 #include <sys/buf.h>
34 #include <sys/dirent.h>
35 #include <sys/errno.h>
36 #include <sys/filedesc.h>
37 #include <sys/fcntl.h>
38 #include <sys/kauth.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/lock.h>
44 #include <sys/lockf.h>
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 #include <sys/syscallargs.h>
48 #include <sys/vnode.h>
49 #include <sys/unistd.h>
50
51 #include <miscfs/fifofs/fifo.h>
52 #include <miscfs/specfs/specdev.h>
53 #include <miscfs/genfs/genfs.h>
54 #include <miscfs/genfs/genfs_node.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <rump/rumpuser.h>
59
60 #include "rump_private.h"
61 #include "rump_vfs_private.h"
62
63 static int rump_vop_lookup(void *);
64 static int rump_vop_getattr(void *);
65 static int rump_vop_mkdir(void *);
66 static int rump_vop_rmdir(void *);
67 static int rump_vop_remove(void *);
68 static int rump_vop_mknod(void *);
69 static int rump_vop_create(void *);
70 static int rump_vop_inactive(void *);
71 static int rump_vop_reclaim(void *);
72 static int rump_vop_success(void *);
73 static int rump_vop_readdir(void *);
74 static int rump_vop_spec(void *);
75 static int rump_vop_read(void *);
76 static int rump_vop_write(void *);
77 static int rump_vop_open(void *);
78 static int rump_vop_symlink(void *);
79 static int rump_vop_readlink(void *);
80 static int rump_vop_whiteout(void *);
81 static int rump_vop_pathconf(void *);
82 static int rump_vop_bmap(void *);
83 static int rump_vop_strategy(void *);
84
85 int (**fifo_vnodeop_p)(void *);
86 const struct vnodeopv_entry_desc fifo_vnodeop_entries[] = {
87 { &vop_default_desc, vn_default_error },
88 { NULL, NULL }
89 };
90 const struct vnodeopv_desc fifo_vnodeop_opv_desc =
91 { &fifo_vnodeop_p, fifo_vnodeop_entries };
92
93 int (**rump_vnodeop_p)(void *);
94 const struct vnodeopv_entry_desc rump_vnodeop_entries[] = {
95 { &vop_default_desc, vn_default_error },
96 { &vop_lookup_desc, rump_vop_lookup },
97 { &vop_getattr_desc, rump_vop_getattr },
98 { &vop_mkdir_desc, rump_vop_mkdir },
99 { &vop_rmdir_desc, rump_vop_rmdir },
100 { &vop_remove_desc, rump_vop_remove },
101 { &vop_mknod_desc, rump_vop_mknod },
102 { &vop_create_desc, rump_vop_create },
103 { &vop_symlink_desc, rump_vop_symlink },
104 { &vop_readlink_desc, rump_vop_readlink },
105 { &vop_access_desc, rump_vop_success },
106 { &vop_readdir_desc, rump_vop_readdir },
107 { &vop_read_desc, rump_vop_read },
108 { &vop_write_desc, rump_vop_write },
109 { &vop_open_desc, rump_vop_open },
110 { &vop_close_desc, genfs_nullop },
111 { &vop_seek_desc, genfs_seek },
112 { &vop_getpages_desc, genfs_getpages },
113 { &vop_putpages_desc, genfs_putpages },
114 { &vop_whiteout_desc, rump_vop_whiteout },
115 { &vop_fsync_desc, rump_vop_success },
116 { &vop_lock_desc, genfs_lock },
117 { &vop_unlock_desc, genfs_unlock },
118 { &vop_islocked_desc, genfs_islocked },
119 { &vop_inactive_desc, rump_vop_inactive },
120 { &vop_reclaim_desc, rump_vop_reclaim },
121 { &vop_link_desc, genfs_eopnotsupp },
122 { &vop_pathconf_desc, rump_vop_pathconf },
123 { &vop_bmap_desc, rump_vop_bmap },
124 { &vop_strategy_desc, rump_vop_strategy },
125 { NULL, NULL }
126 };
127 const struct vnodeopv_desc rump_vnodeop_opv_desc =
128 { &rump_vnodeop_p, rump_vnodeop_entries };
129
130 int (**rump_specop_p)(void *);
131 const struct vnodeopv_entry_desc rump_specop_entries[] = {
132 { &vop_default_desc, rump_vop_spec },
133 { NULL, NULL }
134 };
135 const struct vnodeopv_desc rump_specop_opv_desc =
136 { &rump_specop_p, rump_specop_entries };
137
138 const struct vnodeopv_desc * const rump_opv_descs[] = {
139 &rump_vnodeop_opv_desc,
140 &rump_specop_opv_desc,
141 NULL
142 };
143
144 #define RUMPFS_WHITEOUT NULL
145 #define RDENT_ISWHITEOUT(rdp) (rdp->rd_node == RUMPFS_WHITEOUT)
146 struct rumpfs_dent {
147 char *rd_name;
148 int rd_namelen;
149 struct rumpfs_node *rd_node;
150
151 LIST_ENTRY(rumpfs_dent) rd_entries;
152 };
153
154 struct genfs_ops rumpfs_genfsops = {
155 .gop_size = genfs_size,
156 .gop_write = genfs_gop_write,
157
158 /* optional */
159 .gop_alloc = NULL,
160 .gop_markupdate = NULL,
161 };
162
163 struct rumpfs_node {
164 struct genfs_node rn_gn;
165 struct vattr rn_va;
166 struct vnode *rn_vp;
167 char *rn_hostpath;
168 int rn_flags;
169
170 union {
171 struct { /* VREG */
172 int readfd;
173 int writefd;
174 uint64_t offset;
175 } reg;
176 struct {
177 void *data;
178 size_t dlen;
179 } reg_noet;
180 struct { /* VDIR */
181 LIST_HEAD(, rumpfs_dent) dents;
182 struct rumpfs_node *parent;
183 int flags;
184 } dir;
185 struct {
186 char *target;
187 size_t len;
188 } link;
189 } rn_u;
190 };
191 #define rn_readfd rn_u.reg.readfd
192 #define rn_writefd rn_u.reg.writefd
193 #define rn_offset rn_u.reg.offset
194 #define rn_data rn_u.reg_noet.data
195 #define rn_dlen rn_u.reg_noet.dlen
196 #define rn_dir rn_u.dir.dents
197 #define rn_parent rn_u.dir.parent
198 #define rn_linktarg rn_u.link.target
199 #define rn_linklen rn_u.link.len
200
201 #define RUMPNODE_CANRECLAIM 0x01
202 #define RUMPNODE_DIR_ET 0x02
203 #define RUMPNODE_DIR_ETSUBS 0x04
204 #define RUMPNODE_ET_PHONE_HOST 0x10
205
206 struct rumpfs_mount {
207 struct vnode *rfsmp_rvp;
208 };
209
210 static struct rumpfs_node *makeprivate(enum vtype, dev_t, off_t, bool);
211
212 /*
213 * Extra Terrestrial stuff. We map a given key (pathname) to a file on
214 * the host FS. ET phones home only from the root node of rumpfs.
215 *
216 * When an etfs node is removed, a vnode potentially behind it is not
217 * immediately recycled.
218 */
219
220 struct etfs {
221 char et_key[MAXPATHLEN];
222 size_t et_keylen;
223 bool et_prefixkey;
224 bool et_removing;
225 devminor_t et_blkmin;
226
227 LIST_ENTRY(etfs) et_entries;
228
229 struct rumpfs_node *et_rn;
230 };
231 static kmutex_t etfs_lock;
232 static LIST_HEAD(, etfs) etfs_list = LIST_HEAD_INITIALIZER(etfs_list);
233
234 static enum vtype
235 ettype_to_vtype(enum rump_etfs_type et)
236 {
237 enum vtype vt;
238
239 switch (et) {
240 case RUMP_ETFS_REG:
241 vt = VREG;
242 break;
243 case RUMP_ETFS_BLK:
244 vt = VBLK;
245 break;
246 case RUMP_ETFS_CHR:
247 vt = VCHR;
248 break;
249 case RUMP_ETFS_DIR:
250 vt = VDIR;
251 break;
252 case RUMP_ETFS_DIR_SUBDIRS:
253 vt = VDIR;
254 break;
255 default:
256 panic("invalid et type: %d", et);
257 }
258
259 return vt;
260 }
261
262 static enum vtype
263 hft_to_vtype(int hft)
264 {
265 enum vtype vt;
266
267 switch (hft) {
268 case RUMPUSER_FT_OTHER:
269 vt = VNON;
270 break;
271 case RUMPUSER_FT_DIR:
272 vt = VDIR;
273 break;
274 case RUMPUSER_FT_REG:
275 vt = VREG;
276 break;
277 case RUMPUSER_FT_BLK:
278 vt = VBLK;
279 break;
280 case RUMPUSER_FT_CHR:
281 vt = VCHR;
282 break;
283 default:
284 vt = VNON;
285 break;
286 }
287
288 return vt;
289 }
290
291 static bool
292 etfs_find(const char *key, struct etfs **etp, bool forceprefix)
293 {
294 struct etfs *et;
295 size_t keylen = strlen(key);
296
297 KASSERT(mutex_owned(&etfs_lock));
298
299 LIST_FOREACH(et, &etfs_list, et_entries) {
300 if ((keylen == et->et_keylen || et->et_prefixkey || forceprefix)
301 && strncmp(key, et->et_key, et->et_keylen) == 0) {
302 if (etp)
303 *etp = et;
304 return true;
305 }
306 }
307
308 return false;
309 }
310
311 #define REGDIR(ftype) \
312 ((ftype) == RUMP_ETFS_DIR || (ftype) == RUMP_ETFS_DIR_SUBDIRS)
313 static int
314 doregister(const char *key, const char *hostpath,
315 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
316 {
317 char buf[9];
318 struct etfs *et;
319 struct rumpfs_node *rn;
320 uint64_t fsize;
321 dev_t rdev = NODEV;
322 devminor_t dmin = -1;
323 int hft, error;
324
325 if (rumpuser_getfileinfo(hostpath, &fsize, &hft, &error))
326 return error;
327
328 /* etfs directory requires a directory on the host */
329 if (REGDIR(ftype)) {
330 if (hft != RUMPUSER_FT_DIR)
331 return ENOTDIR;
332 if (begin != 0)
333 return EISDIR;
334 if (size != RUMP_ETFS_SIZE_ENDOFF)
335 return EISDIR;
336 size = fsize;
337 } else {
338 if (begin > fsize)
339 return EINVAL;
340 if (size == RUMP_ETFS_SIZE_ENDOFF)
341 size = fsize - begin;
342 if (begin + size > fsize)
343 return EINVAL;
344 }
345
346 if (ftype == RUMP_ETFS_BLK || ftype == RUMP_ETFS_CHR) {
347 error = rumpblk_register(hostpath, &dmin, begin, size);
348 if (error != 0) {
349 return error;
350 }
351 rdev = makedev(RUMPBLK_DEVMAJOR, dmin);
352 }
353
354 et = kmem_alloc(sizeof(*et), KM_SLEEP);
355 strcpy(et->et_key, key);
356 et->et_keylen = strlen(et->et_key);
357 et->et_rn = rn = makeprivate(ettype_to_vtype(ftype), rdev, size, true);
358 et->et_removing = false;
359 et->et_blkmin = dmin;
360
361 rn->rn_flags |= RUMPNODE_ET_PHONE_HOST;
362
363 if (ftype == RUMP_ETFS_REG || REGDIR(ftype) || et->et_blkmin != -1) {
364 size_t len = strlen(hostpath)+1;
365
366 rn->rn_hostpath = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
367 memcpy(rn->rn_hostpath, hostpath, len);
368 rn->rn_offset = begin;
369 }
370
371 if (REGDIR(ftype)) {
372 rn->rn_flags |= RUMPNODE_DIR_ET;
373 et->et_prefixkey = true;
374 } else {
375 et->et_prefixkey = false;
376 }
377
378 if (ftype == RUMP_ETFS_DIR_SUBDIRS)
379 rn->rn_flags |= RUMPNODE_DIR_ETSUBS;
380
381 mutex_enter(&etfs_lock);
382 if (etfs_find(key, NULL, REGDIR(ftype))) {
383 mutex_exit(&etfs_lock);
384 if (et->et_blkmin != -1)
385 rumpblk_deregister(hostpath);
386 if (et->et_rn->rn_hostpath != NULL)
387 free(et->et_rn->rn_hostpath, M_TEMP);
388 kmem_free(et->et_rn, sizeof(*et->et_rn));
389 kmem_free(et, sizeof(*et));
390 return EEXIST;
391 }
392 LIST_INSERT_HEAD(&etfs_list, et, et_entries);
393 mutex_exit(&etfs_lock);
394
395 if (ftype == RUMP_ETFS_BLK) {
396 format_bytes(buf, sizeof(buf), size);
397 aprint_verbose("%s: hostpath %s (%s)\n", key, hostpath, buf);
398 }
399
400 return 0;
401 }
402 #undef REGDIR
403
404 int
405 rump_etfs_register(const char *key, const char *hostpath,
406 enum rump_etfs_type ftype)
407 {
408
409 return doregister(key, hostpath, ftype, 0, RUMP_ETFS_SIZE_ENDOFF);
410 }
411
412 int
413 rump_etfs_register_withsize(const char *key, const char *hostpath,
414 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
415 {
416
417 return doregister(key, hostpath, ftype, begin, size);
418 }
419
420 /* remove etfs mapping. caller's responsibility to make sure it's not in use */
421 int
422 rump_etfs_remove(const char *key)
423 {
424 struct etfs *et;
425 size_t keylen = strlen(key);
426 int rv;
427
428 mutex_enter(&etfs_lock);
429 LIST_FOREACH(et, &etfs_list, et_entries) {
430 if (keylen == et->et_keylen && strcmp(et->et_key, key) == 0) {
431 if (et->et_removing)
432 et = NULL;
433 else
434 et->et_removing = true;
435 break;
436 }
437 }
438 mutex_exit(&etfs_lock);
439 if (!et)
440 return ENOENT;
441
442 /*
443 * ok, we know what we want to remove and have signalled there
444 * actually are men at work. first, unregister from rumpblk
445 */
446 if (et->et_blkmin != -1) {
447 rv = rumpblk_deregister(et->et_rn->rn_hostpath);
448 } else {
449 rv = 0;
450 }
451 KASSERT(rv == 0);
452
453 /* then do the actual removal */
454 mutex_enter(&etfs_lock);
455 LIST_REMOVE(et, et_entries);
456 mutex_exit(&etfs_lock);
457
458 /* node is unreachable, safe to nuke all device copies */
459 if (et->et_blkmin != -1)
460 vdevgone(RUMPBLK_DEVMAJOR, et->et_blkmin, et->et_blkmin, VBLK);
461
462 if (et->et_rn->rn_hostpath != NULL)
463 free(et->et_rn->rn_hostpath, M_TEMP);
464 kmem_free(et->et_rn, sizeof(*et->et_rn));
465 kmem_free(et, sizeof(*et));
466
467 return 0;
468 }
469
470 /*
471 * rumpfs
472 */
473
474 #define INO_WHITEOUT 1
475 static int lastino = 2;
476 static kmutex_t reclock;
477
478 static struct rumpfs_node *
479 makeprivate(enum vtype vt, dev_t rdev, off_t size, bool et)
480 {
481 struct rumpfs_node *rn;
482 struct vattr *va;
483 struct timespec ts;
484
485 rn = kmem_zalloc(sizeof(*rn), KM_SLEEP);
486
487 switch (vt) {
488 case VDIR:
489 LIST_INIT(&rn->rn_dir);
490 break;
491 case VREG:
492 if (et) {
493 rn->rn_readfd = -1;
494 rn->rn_writefd = -1;
495 }
496 break;
497 default:
498 break;
499 }
500
501 nanotime(&ts);
502
503 va = &rn->rn_va;
504 va->va_type = vt;
505 va->va_mode = 0755;
506 if (vt == VDIR)
507 va->va_nlink = 2;
508 else
509 va->va_nlink = 1;
510 va->va_uid = 0;
511 va->va_gid = 0;
512 va->va_fsid =
513 va->va_fileid = atomic_inc_uint_nv(&lastino);
514 va->va_size = size;
515 va->va_blocksize = 512;
516 va->va_atime = ts;
517 va->va_mtime = ts;
518 va->va_ctime = ts;
519 va->va_birthtime = ts;
520 va->va_gen = 0;
521 va->va_flags = 0;
522 va->va_rdev = rdev;
523 va->va_bytes = 512;
524 va->va_filerev = 0;
525 va->va_vaflags = 0;
526
527 return rn;
528 }
529
530 static int
531 makevnode(struct mount *mp, struct rumpfs_node *rn, struct vnode **vpp)
532 {
533 struct vnode *vp;
534 int (**vpops)(void *);
535 struct vattr *va = &rn->rn_va;
536 int rv;
537
538 KASSERT(!mutex_owned(&reclock));
539
540 if (va->va_type == VCHR || va->va_type == VBLK) {
541 vpops = rump_specop_p;
542 } else {
543 vpops = rump_vnodeop_p;
544 }
545
546 rv = getnewvnode(VT_RUMP, mp, vpops, &vp);
547 if (rv)
548 return rv;
549
550 vp->v_size = vp->v_writesize = va->va_size;
551 vp->v_type = va->va_type;
552
553 if (vpops == rump_specop_p) {
554 spec_node_init(vp, va->va_rdev);
555 }
556 vp->v_data = rn;
557
558 genfs_node_init(vp, &rumpfs_genfsops);
559 vn_lock(vp, LK_RETRY | LK_EXCLUSIVE);
560 mutex_enter(&reclock);
561 rn->rn_vp = vp;
562 mutex_exit(&reclock);
563
564 *vpp = vp;
565
566 return 0;
567 }
568
569
570 static void
571 makedir(struct rumpfs_node *rnd,
572 struct componentname *cnp, struct rumpfs_node *rn)
573 {
574 struct rumpfs_dent *rdent;
575
576 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
577 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
578 rdent->rd_node = rn;
579 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
580 rdent->rd_namelen = strlen(rdent->rd_name);
581
582 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
583 }
584
585 static void
586 freedir(struct rumpfs_node *rnd, struct componentname *cnp)
587 {
588 struct rumpfs_dent *rd = NULL;
589
590 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
591 if (rd->rd_namelen == cnp->cn_namelen &&
592 strncmp(rd->rd_name, cnp->cn_nameptr,
593 cnp->cn_namelen) == 0)
594 break;
595 }
596 if (rd == NULL)
597 panic("could not find directory entry: %s", cnp->cn_nameptr);
598
599 LIST_REMOVE(rd, rd_entries);
600 kmem_free(rd->rd_name, rd->rd_namelen+1);
601 kmem_free(rd, sizeof(*rd));
602 }
603
604 /*
605 * Simple lookup for rump file systems.
606 *
607 * uhm, this is twisted. C F C C, hope of C C F C looming
608 */
609 static int
610 rump_vop_lookup(void *v)
611 {
612 struct vop_lookup_args /* {
613 struct vnode *a_dvp;
614 struct vnode **a_vpp;
615 struct componentname *a_cnp;
616 }; */ *ap = v;
617 struct componentname *cnp = ap->a_cnp;
618 struct vnode *dvp = ap->a_dvp;
619 struct vnode **vpp = ap->a_vpp;
620 struct vnode *vp;
621 struct rumpfs_node *rnd = dvp->v_data, *rn;
622 struct rumpfs_dent *rd = NULL;
623 struct etfs *et;
624 bool dotdot = (cnp->cn_flags & ISDOTDOT) != 0;
625 int rv = 0;
626
627 /* check for dot, return directly if the case */
628 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
629 vref(dvp);
630 *vpp = dvp;
631 return 0;
632 }
633
634 /* we don't do rename */
635 if (!(((cnp->cn_flags & ISLASTCN) == 0) || (cnp->cn_nameiop != RENAME)))
636 return EOPNOTSUPP;
637
638 /* check for etfs */
639 if (dvp == rootvnode && cnp->cn_nameiop == LOOKUP) {
640 bool found;
641 mutex_enter(&etfs_lock);
642 found = etfs_find(cnp->cn_pnbuf, &et, false);
643 mutex_exit(&etfs_lock);
644
645 if (found) {
646 char *offset;
647
648 offset = strstr(cnp->cn_pnbuf, et->et_key);
649 KASSERT(offset);
650
651 rn = et->et_rn;
652 cnp->cn_consume += et->et_keylen
653 - (cnp->cn_nameptr - offset) - cnp->cn_namelen;
654 if (rn->rn_va.va_type != VDIR)
655 cnp->cn_flags &= ~REQUIREDIR;
656 goto getvnode;
657 }
658 }
659
660 if (rnd->rn_flags & RUMPNODE_DIR_ET) {
661 uint64_t fsize;
662 char *newpath;
663 size_t newpathlen;
664 int hft, error;
665
666 if (dotdot)
667 return EOPNOTSUPP;
668
669 newpathlen = strlen(rnd->rn_hostpath) + 1 + cnp->cn_namelen + 1;
670 newpath = malloc(newpathlen, M_TEMP, M_WAITOK);
671
672 strlcpy(newpath, rnd->rn_hostpath, newpathlen);
673 strlcat(newpath, "/", newpathlen);
674 strlcat(newpath, cnp->cn_nameptr, newpathlen);
675
676 if (rumpuser_getfileinfo(newpath, &fsize, &hft, &error)) {
677 free(newpath, M_TEMP);
678 return error;
679 }
680
681 /* allow only dirs and regular files */
682 if (hft != RUMPUSER_FT_REG && hft != RUMPUSER_FT_DIR) {
683 free(newpath, M_TEMP);
684 return ENOENT;
685 }
686
687 rn = makeprivate(hft_to_vtype(hft), NODEV, fsize, true);
688 rn->rn_flags |= RUMPNODE_CANRECLAIM;
689 if (rnd->rn_flags & RUMPNODE_DIR_ETSUBS) {
690 rn->rn_flags |= RUMPNODE_DIR_ET | RUMPNODE_DIR_ETSUBS;
691 rn->rn_flags |= RUMPNODE_ET_PHONE_HOST;
692 }
693 rn->rn_hostpath = newpath;
694
695 goto getvnode;
696 } else {
697 if (dotdot) {
698 rn = rnd->rn_parent;
699 goto getvnode;
700 } else {
701 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
702 if (rd->rd_namelen == cnp->cn_namelen &&
703 strncmp(rd->rd_name, cnp->cn_nameptr,
704 cnp->cn_namelen) == 0)
705 break;
706 }
707 }
708 }
709
710 if (!rd && ((cnp->cn_flags & ISLASTCN) == 0||cnp->cn_nameiop != CREATE))
711 return ENOENT;
712
713 if (!rd && (cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
714 cnp->cn_flags |= SAVENAME;
715 return EJUSTRETURN;
716 }
717 if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == DELETE)
718 cnp->cn_flags |= SAVENAME;
719
720 rn = rd->rd_node;
721
722 getvnode:
723 KASSERT(rn);
724 if (dotdot)
725 VOP_UNLOCK(dvp);
726 mutex_enter(&reclock);
727 if ((vp = rn->rn_vp)) {
728 mutex_enter(&vp->v_interlock);
729 mutex_exit(&reclock);
730 if (vget(vp, LK_EXCLUSIVE)) {
731 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
732 goto getvnode;
733 }
734 *vpp = vp;
735 } else {
736 mutex_exit(&reclock);
737 rv = makevnode(dvp->v_mount, rn, vpp);
738 }
739 if (dotdot)
740 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
741
742 return rv;
743 }
744
745 static int
746 rump_vop_getattr(void *v)
747 {
748 struct vop_getattr_args /* {
749 struct vnode *a_vp;
750 struct vattr *a_vap;
751 kauth_cred_t a_cred;
752 } */ *ap = v;
753 struct rumpfs_node *rn = ap->a_vp->v_data;
754
755 memcpy(ap->a_vap, &rn->rn_va, sizeof(struct vattr));
756 return 0;
757 }
758
759 static int
760 rump_vop_mkdir(void *v)
761 {
762 struct vop_mkdir_args /* {
763 struct vnode *a_dvp;
764 struct vnode **a_vpp;
765 struct componentname *a_cnp;
766 struct vattr *a_vap;
767 }; */ *ap = v;
768 struct vnode *dvp = ap->a_dvp;
769 struct vnode **vpp = ap->a_vpp;
770 struct componentname *cnp = ap->a_cnp;
771 struct rumpfs_node *rnd = dvp->v_data, *rn;
772 int rv = 0;
773
774 rn = makeprivate(VDIR, NODEV, DEV_BSIZE, false);
775 rn->rn_parent = rnd;
776 rv = makevnode(dvp->v_mount, rn, vpp);
777 if (rv)
778 goto out;
779
780 makedir(rnd, cnp, rn);
781
782 out:
783 PNBUF_PUT(cnp->cn_pnbuf);
784 vput(dvp);
785 return rv;
786 }
787
788 static int
789 rump_vop_rmdir(void *v)
790 {
791 struct vop_rmdir_args /* {
792 struct vnode *a_dvp;
793 struct vnode *a_vp;
794 struct componentname *a_cnp;
795 }; */ *ap = v;
796 struct vnode *dvp = ap->a_dvp;
797 struct vnode *vp = ap->a_vp;
798 struct componentname *cnp = ap->a_cnp;
799 struct rumpfs_node *rnd = dvp->v_data;
800 struct rumpfs_node *rn = vp->v_data;
801 int rv = 0;
802
803 if (!LIST_EMPTY(&rn->rn_dir)) {
804 rv = ENOTEMPTY;
805 goto out;
806 }
807
808 freedir(rnd, cnp);
809 rn->rn_flags |= RUMPNODE_CANRECLAIM;
810
811 out:
812 PNBUF_PUT(cnp->cn_pnbuf);
813 vput(dvp);
814 vput(vp);
815
816 return rv;
817 }
818
819 static int
820 rump_vop_remove(void *v)
821 {
822 struct vop_rmdir_args /* {
823 struct vnode *a_dvp;
824 struct vnode *a_vp;
825 struct componentname *a_cnp;
826 }; */ *ap = v;
827 struct vnode *dvp = ap->a_dvp;
828 struct vnode *vp = ap->a_vp;
829 struct componentname *cnp = ap->a_cnp;
830 struct rumpfs_node *rnd = dvp->v_data;
831 struct rumpfs_node *rn = vp->v_data;
832 int rv = 0;
833
834 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
835 return EOPNOTSUPP;
836
837 if (vp->v_type == VREG) {
838 rump_hyperfree(rn->rn_data, rn->rn_dlen);
839 }
840
841 freedir(rnd, cnp);
842 rn->rn_flags |= RUMPNODE_CANRECLAIM;
843
844 PNBUF_PUT(cnp->cn_pnbuf);
845 vput(dvp);
846 vput(vp);
847
848 return rv;
849 }
850
851 static int
852 rump_vop_mknod(void *v)
853 {
854 struct vop_mknod_args /* {
855 struct vnode *a_dvp;
856 struct vnode **a_vpp;
857 struct componentname *a_cnp;
858 struct vattr *a_vap;
859 }; */ *ap = v;
860 struct vnode *dvp = ap->a_dvp;
861 struct vnode **vpp = ap->a_vpp;
862 struct componentname *cnp = ap->a_cnp;
863 struct vattr *va = ap->a_vap;
864 struct rumpfs_node *rnd = dvp->v_data, *rn;
865 int rv;
866
867 rn = makeprivate(va->va_type, va->va_rdev, DEV_BSIZE, false);
868 rv = makevnode(dvp->v_mount, rn, vpp);
869 if (rv)
870 goto out;
871
872 makedir(rnd, cnp, rn);
873
874 out:
875 PNBUF_PUT(cnp->cn_pnbuf);
876 vput(dvp);
877 return rv;
878 }
879
880 static int
881 rump_vop_create(void *v)
882 {
883 struct vop_create_args /* {
884 struct vnode *a_dvp;
885 struct vnode **a_vpp;
886 struct componentname *a_cnp;
887 struct vattr *a_vap;
888 }; */ *ap = v;
889 struct vnode *dvp = ap->a_dvp;
890 struct vnode **vpp = ap->a_vpp;
891 struct componentname *cnp = ap->a_cnp;
892 struct vattr *va = ap->a_vap;
893 struct rumpfs_node *rnd = dvp->v_data, *rn;
894 off_t newsize;
895 int rv;
896
897 newsize = va->va_type == VSOCK ? DEV_BSIZE : 0;
898 rn = makeprivate(va->va_type, NODEV, newsize, false);
899 rv = makevnode(dvp->v_mount, rn, vpp);
900 if (rv)
901 goto out;
902
903 makedir(rnd, cnp, rn);
904
905 out:
906 PNBUF_PUT(cnp->cn_pnbuf);
907 vput(dvp);
908 return rv;
909 }
910
911 static int
912 rump_vop_symlink(void *v)
913 {
914 struct vop_symlink_args /* {
915 struct vnode *a_dvp;
916 struct vnode **a_vpp;
917 struct componentname *a_cnp;
918 struct vattr *a_vap;
919 char *a_target;
920 }; */ *ap = v;
921 struct vnode *dvp = ap->a_dvp;
922 struct vnode **vpp = ap->a_vpp;
923 struct componentname *cnp = ap->a_cnp;
924 struct rumpfs_node *rnd = dvp->v_data, *rn;
925 const char *target = ap->a_target;
926 size_t linklen;
927 int rv;
928
929 linklen = strlen(target);
930 KASSERT(linklen < MAXPATHLEN);
931 rn = makeprivate(VLNK, NODEV, linklen, false);
932 rv = makevnode(dvp->v_mount, rn, vpp);
933 if (rv)
934 goto out;
935
936 makedir(rnd, cnp, rn);
937
938 KASSERT(linklen < MAXPATHLEN);
939 rn->rn_linktarg = PNBUF_GET();
940 rn->rn_linklen = linklen;
941 strcpy(rn->rn_linktarg, target);
942
943 out:
944 vput(dvp);
945 return rv;
946 }
947
948 static int
949 rump_vop_readlink(void *v)
950 {
951 struct vop_readlink_args /* {
952 struct vnode *a_vp;
953 struct uio *a_uio;
954 kauth_cred_t a_cred;
955 }; */ *ap = v;
956 struct vnode *vp = ap->a_vp;
957 struct rumpfs_node *rn = vp->v_data;
958 struct uio *uio = ap->a_uio;
959
960 return uiomove(rn->rn_linktarg, rn->rn_linklen, uio);
961 }
962
963 static int
964 rump_vop_whiteout(void *v)
965 {
966 struct vop_whiteout_args /* {
967 struct vnode *a_dvp;
968 struct componentname *a_cnp;
969 int a_flags;
970 } */ *ap = v;
971 struct vnode *dvp = ap->a_dvp;
972 struct rumpfs_node *rnd = dvp->v_data;
973 struct componentname *cnp = ap->a_cnp;
974 int flags = ap->a_flags;
975
976 switch (flags) {
977 case LOOKUP:
978 break;
979 case CREATE:
980 makedir(rnd, cnp, RUMPFS_WHITEOUT);
981 break;
982 case DELETE:
983 cnp->cn_flags &= ~DOWHITEOUT; /* cargo culting never fails ? */
984 freedir(rnd, cnp);
985 break;
986 default:
987 panic("unknown whiteout op %d", flags);
988 }
989
990 return 0;
991 }
992
993 static int
994 rump_vop_open(void *v)
995 {
996 struct vop_open_args /* {
997 struct vnode *a_vp;
998 int a_mode;
999 kauth_cred_t a_cred;
1000 } */ *ap = v;
1001 struct vnode *vp = ap->a_vp;
1002 struct rumpfs_node *rn = vp->v_data;
1003 int mode = ap->a_mode;
1004 int error = EINVAL;
1005
1006 if (vp->v_type != VREG || (rn->rn_flags & RUMPNODE_ET_PHONE_HOST) == 0)
1007 return 0;
1008
1009 if (mode & FREAD) {
1010 if (rn->rn_readfd != -1)
1011 return 0;
1012 rn->rn_readfd = rumpuser_open(rn->rn_hostpath,
1013 O_RDONLY, &error);
1014 }
1015
1016 if (mode & FWRITE) {
1017 if (rn->rn_writefd != -1)
1018 return 0;
1019 rn->rn_writefd = rumpuser_open(rn->rn_hostpath,
1020 O_WRONLY, &error);
1021 }
1022
1023 return error;
1024 }
1025
1026 /* simple readdir. event omits dotstuff and periods */
1027 static int
1028 rump_vop_readdir(void *v)
1029 {
1030 struct vop_readdir_args /* {
1031 struct vnode *a_vp;
1032 struct uio *a_uio;
1033 kauth_cred_t a_cred;
1034 int *a_eofflag;
1035 off_t **a_cookies;
1036 int *a_ncookies;
1037 } */ *ap = v;
1038 struct vnode *vp = ap->a_vp;
1039 struct uio *uio = ap->a_uio;
1040 struct rumpfs_node *rnd = vp->v_data;
1041 struct rumpfs_dent *rdent;
1042 unsigned i;
1043 int rv = 0;
1044
1045 /* seek to current entry */
1046 for (i = 0, rdent = LIST_FIRST(&rnd->rn_dir);
1047 (i < uio->uio_offset) && rdent;
1048 i++, rdent = LIST_NEXT(rdent, rd_entries))
1049 continue;
1050 if (!rdent)
1051 goto out;
1052
1053 /* copy entries */
1054 for (; rdent && uio->uio_resid > 0;
1055 rdent = LIST_NEXT(rdent, rd_entries), i++) {
1056 struct dirent dent;
1057
1058 strlcpy(dent.d_name, rdent->rd_name, sizeof(dent.d_name));
1059 dent.d_namlen = strlen(dent.d_name);
1060 dent.d_reclen = _DIRENT_RECLEN(&dent, dent.d_namlen);
1061
1062 if (__predict_false(RDENT_ISWHITEOUT(rdent))) {
1063 dent.d_fileno = INO_WHITEOUT;
1064 dent.d_type = DT_WHT;
1065 } else {
1066 dent.d_fileno = rdent->rd_node->rn_va.va_fileid;
1067 dent.d_type = vtype2dt(rdent->rd_node->rn_va.va_type);
1068 }
1069
1070 if (uio->uio_resid < dent.d_reclen) {
1071 i--;
1072 break;
1073 }
1074
1075 rv = uiomove(&dent, dent.d_reclen, uio);
1076 if (rv) {
1077 i--;
1078 break;
1079 }
1080 }
1081
1082 out:
1083 if (ap->a_cookies) {
1084 *ap->a_ncookies = 0;
1085 *ap->a_cookies = NULL;
1086 }
1087 if (rdent)
1088 *ap->a_eofflag = 0;
1089 else
1090 *ap->a_eofflag = 1;
1091 uio->uio_offset = i;
1092
1093 return rv;
1094 }
1095
1096 static int
1097 etread(struct rumpfs_node *rn, struct uio *uio)
1098 {
1099 uint8_t *buf;
1100 size_t bufsize;
1101 ssize_t n;
1102 int error = 0;
1103
1104 bufsize = uio->uio_resid;
1105 buf = kmem_alloc(bufsize, KM_SLEEP);
1106 if ((n = rumpuser_pread(rn->rn_readfd, buf, bufsize,
1107 uio->uio_offset + rn->rn_offset, &error)) == -1)
1108 goto out;
1109 KASSERT(n <= bufsize);
1110 error = uiomove(buf, n, uio);
1111
1112 out:
1113 kmem_free(buf, bufsize);
1114 return error;
1115
1116 }
1117
1118 static int
1119 rump_vop_read(void *v)
1120 {
1121 struct vop_read_args /* {
1122 struct vnode *a_vp;
1123 struct uio *a_uio;
1124 int ioflags a_ioflag;
1125 kauth_cred_t a_cred;
1126 }; */ *ap = v;
1127 struct vnode *vp = ap->a_vp;
1128 struct rumpfs_node *rn = vp->v_data;
1129 struct uio *uio = ap->a_uio;
1130 const int advice = IO_ADV_DECODE(ap->a_ioflag);
1131 off_t chunk;
1132 int error = 0;
1133
1134 /* et op? */
1135 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1136 return etread(rn, uio);
1137
1138 /* otherwise, it's off to ubc with us */
1139 while (uio->uio_resid > 0) {
1140 chunk = MIN(uio->uio_resid, (off_t)rn->rn_dlen-uio->uio_offset);
1141 if (chunk == 0)
1142 break;
1143 error = ubc_uiomove(&vp->v_uobj, uio, chunk, advice,
1144 UBC_READ | UBC_PARTIALOK | UBC_WANT_UNMAP(vp)?UBC_UNMAP:0);
1145 if (error)
1146 break;
1147 }
1148
1149 return error;
1150 }
1151
1152 static int
1153 etwrite(struct rumpfs_node *rn, struct uio *uio)
1154 {
1155 uint8_t *buf;
1156 size_t bufsize;
1157 ssize_t n;
1158 int error = 0;
1159
1160 bufsize = uio->uio_resid;
1161 buf = kmem_alloc(bufsize, KM_SLEEP);
1162 error = uiomove(buf, bufsize, uio);
1163 if (error)
1164 goto out;
1165 KASSERT(uio->uio_resid == 0);
1166 n = rumpuser_pwrite(rn->rn_writefd, buf, bufsize,
1167 (uio->uio_offset-bufsize) + rn->rn_offset, &error);
1168 if (n >= 0) {
1169 KASSERT(n <= bufsize);
1170 uio->uio_resid = bufsize - n;
1171 }
1172
1173 out:
1174 kmem_free(buf, bufsize);
1175 return error;
1176 }
1177
1178 static int
1179 rump_vop_write(void *v)
1180 {
1181 struct vop_read_args /* {
1182 struct vnode *a_vp;
1183 struct uio *a_uio;
1184 int ioflags a_ioflag;
1185 kauth_cred_t a_cred;
1186 }; */ *ap = v;
1187 struct vnode *vp = ap->a_vp;
1188 struct rumpfs_node *rn = vp->v_data;
1189 struct uio *uio = ap->a_uio;
1190 const int advice = IO_ADV_DECODE(ap->a_ioflag);
1191 void *olddata;
1192 size_t oldlen, newlen;
1193 off_t chunk;
1194 int error = 0;
1195 bool allocd = false;
1196
1197 /* consult et? */
1198 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1199 return etwrite(rn, uio);
1200
1201 /*
1202 * Otherwise, it's a case of ubcmove.
1203 */
1204
1205 /*
1206 * First, make sure we have enough storage.
1207 *
1208 * No, you don't need to tell me it's not very efficient.
1209 * No, it doesn't really support sparse files, just fakes it.
1210 */
1211 newlen = uio->uio_offset + uio->uio_resid;
1212 oldlen = 0; /* XXXgcc */
1213 olddata = NULL;
1214 if (rn->rn_dlen < newlen) {
1215 oldlen = rn->rn_dlen;
1216 olddata = rn->rn_data;
1217
1218 rn->rn_data = rump_hypermalloc(newlen, 0, true, "rumpfs");
1219 rn->rn_dlen = newlen;
1220 memset(rn->rn_data, 0, newlen);
1221 memcpy(rn->rn_data, olddata, oldlen);
1222 allocd = true;
1223 uvm_vnp_setsize(vp, newlen);
1224 }
1225
1226 /* ok, we have enough stooorage. write */
1227 while (uio->uio_resid > 0) {
1228 chunk = MIN(uio->uio_resid, (off_t)rn->rn_dlen-uio->uio_offset);
1229 if (chunk == 0)
1230 break;
1231 error = ubc_uiomove(&vp->v_uobj, uio, chunk, advice,
1232 UBC_WRITE | UBC_PARTIALOK | UBC_WANT_UNMAP(vp)?UBC_UNMAP:0);
1233 if (error)
1234 break;
1235 }
1236
1237 if (allocd) {
1238 if (error) {
1239 rump_hyperfree(rn->rn_data, newlen);
1240 rn->rn_data = olddata;
1241 rn->rn_dlen = oldlen;
1242 uvm_vnp_setsize(vp, oldlen);
1243 } else {
1244 rump_hyperfree(olddata, oldlen);
1245 }
1246 }
1247
1248 return error;
1249 }
1250
1251 static int
1252 rump_vop_bmap(void *v)
1253 {
1254 struct vop_bmap_args /* {
1255 struct vnode *a_vp;
1256 daddr_t a_bn;
1257 struct vnode **a_vpp;
1258 daddr_t *a_bnp;
1259 int *a_runp;
1260 } */ *ap = v;
1261
1262 /* 1:1 mapping */
1263 if (ap->a_vpp)
1264 *ap->a_vpp = ap->a_vp;
1265 if (ap->a_bnp)
1266 *ap->a_bnp = ap->a_bn;
1267 if (ap->a_runp)
1268 *ap->a_runp = 16;
1269
1270 return 0;
1271 }
1272
1273 static int
1274 rump_vop_strategy(void *v)
1275 {
1276 struct vop_strategy_args /* {
1277 struct vnode *a_vp;
1278 struct buf *a_bp;
1279 } */ *ap = v;
1280 struct vnode *vp = ap->a_vp;
1281 struct rumpfs_node *rn = vp->v_data;
1282 struct buf *bp = ap->a_bp;
1283 off_t copylen, copyoff;
1284 int error;
1285
1286 if (vp->v_type != VREG || rn->rn_flags & RUMPNODE_ET_PHONE_HOST) {
1287 error = EINVAL;
1288 goto out;
1289 }
1290
1291 copyoff = bp->b_blkno << DEV_BSHIFT;
1292 copylen = MIN(rn->rn_dlen - copyoff, bp->b_bcount);
1293 if (BUF_ISWRITE(bp)) {
1294 memcpy((uint8_t *)rn->rn_data + copyoff, bp->b_data, copylen);
1295 } else {
1296 memset((uint8_t*)bp->b_data + copylen, 0, bp->b_bcount-copylen);
1297 memcpy(bp->b_data, (uint8_t *)rn->rn_data + copyoff, copylen);
1298 }
1299 bp->b_resid = 0;
1300 error = 0;
1301
1302 out:
1303 bp->b_error = error;
1304 biodone(bp);
1305 return 0;
1306 }
1307
1308 static int
1309 rump_vop_pathconf(void *v)
1310 {
1311 struct vop_pathconf_args /* {
1312 struct vnode *a_vp;
1313 int a_name;
1314 register_t *a_retval;
1315 }; */ *ap = v;
1316 int name = ap->a_name;
1317 register_t *retval = ap->a_retval;
1318
1319 switch (name) {
1320 case _PC_LINK_MAX:
1321 *retval = LINK_MAX;
1322 return 0;
1323 case _PC_NAME_MAX:
1324 *retval = NAME_MAX;
1325 return 0;
1326 case _PC_PATH_MAX:
1327 *retval = PATH_MAX;
1328 return 0;
1329 case _PC_PIPE_BUF:
1330 *retval = PIPE_BUF;
1331 return 0;
1332 case _PC_CHOWN_RESTRICTED:
1333 *retval = 1;
1334 return 0;
1335 case _PC_NO_TRUNC:
1336 *retval = 1;
1337 return 0;
1338 case _PC_SYNC_IO:
1339 *retval = 1;
1340 return 0;
1341 case _PC_FILESIZEBITS:
1342 *retval = 43; /* this one goes to 11 */
1343 return 0;
1344 case _PC_SYMLINK_MAX:
1345 *retval = MAXPATHLEN;
1346 return 0;
1347 case _PC_2_SYMLINKS:
1348 *retval = 1;
1349 return 0;
1350 default:
1351 return EINVAL;
1352 }
1353 }
1354
1355 static int
1356 rump_vop_success(void *v)
1357 {
1358
1359 return 0;
1360 }
1361
1362 static int
1363 rump_vop_inactive(void *v)
1364 {
1365 struct vop_inactive_args /* {
1366 struct vnode *a_vp;
1367 bool *a_recycle;
1368 } */ *ap = v;
1369 struct vnode *vp = ap->a_vp;
1370 struct rumpfs_node *rn = vp->v_data;
1371 int error;
1372
1373 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST && vp->v_type == VREG) {
1374 if (rn->rn_readfd != -1) {
1375 rumpuser_close(rn->rn_readfd, &error);
1376 rn->rn_readfd = -1;
1377 }
1378 if (rn->rn_writefd != -1) {
1379 rumpuser_close(rn->rn_writefd, &error);
1380 rn->rn_writefd = -1;
1381 }
1382 }
1383 *ap->a_recycle = (rn->rn_flags & RUMPNODE_CANRECLAIM) ? true : false;
1384
1385 VOP_UNLOCK(vp);
1386 return 0;
1387 }
1388
1389 static int
1390 rump_vop_reclaim(void *v)
1391 {
1392 struct vop_reclaim_args /* {
1393 struct vnode *a_vp;
1394 } */ *ap = v;
1395 struct vnode *vp = ap->a_vp;
1396 struct rumpfs_node *rn = vp->v_data;
1397
1398 mutex_enter(&reclock);
1399 rn->rn_vp = NULL;
1400 mutex_exit(&reclock);
1401 genfs_node_destroy(vp);
1402 vp->v_data = NULL;
1403
1404 if (rn->rn_flags & RUMPNODE_CANRECLAIM) {
1405 if (vp->v_type == VLNK)
1406 PNBUF_PUT(rn->rn_linktarg);
1407 if (rn->rn_hostpath)
1408 free(rn->rn_hostpath, M_TEMP);
1409 kmem_free(rn, sizeof(*rn));
1410 }
1411
1412 return 0;
1413 }
1414
1415 static int
1416 rump_vop_spec(void *v)
1417 {
1418 struct vop_generic_args *ap = v;
1419 int (**opvec)(void *);
1420
1421 switch (ap->a_desc->vdesc_offset) {
1422 case VOP_ACCESS_DESCOFFSET:
1423 case VOP_GETATTR_DESCOFFSET:
1424 case VOP_LOCK_DESCOFFSET:
1425 case VOP_UNLOCK_DESCOFFSET:
1426 case VOP_RECLAIM_DESCOFFSET:
1427 opvec = rump_vnodeop_p;
1428 break;
1429 default:
1430 opvec = spec_vnodeop_p;
1431 break;
1432 }
1433
1434 return VOCALL(opvec, ap->a_desc->vdesc_offset, v);
1435 }
1436
1437 /*
1438 * Begin vfs-level stuff
1439 */
1440
1441 VFS_PROTOS(rumpfs);
1442 struct vfsops rumpfs_vfsops = {
1443 .vfs_name = MOUNT_RUMPFS,
1444 .vfs_min_mount_data = 0,
1445 .vfs_mount = rumpfs_mount,
1446 .vfs_start = (void *)nullop,
1447 .vfs_unmount = rumpfs_unmount,
1448 .vfs_root = rumpfs_root,
1449 .vfs_quotactl = (void *)eopnotsupp,
1450 .vfs_statvfs = genfs_statvfs,
1451 .vfs_sync = (void *)nullop,
1452 .vfs_vget = rumpfs_vget,
1453 .vfs_fhtovp = (void *)eopnotsupp,
1454 .vfs_vptofh = (void *)eopnotsupp,
1455 .vfs_init = rumpfs_init,
1456 .vfs_reinit = NULL,
1457 .vfs_done = rumpfs_done,
1458 .vfs_mountroot = rumpfs_mountroot,
1459 .vfs_snapshot = (void *)eopnotsupp,
1460 .vfs_extattrctl = (void *)eopnotsupp,
1461 .vfs_suspendctl = (void *)eopnotsupp,
1462 .vfs_renamelock_enter = genfs_renamelock_enter,
1463 .vfs_renamelock_exit = genfs_renamelock_exit,
1464 .vfs_opv_descs = rump_opv_descs,
1465 /* vfs_refcount */
1466 /* vfs_list */
1467 };
1468
1469 static int
1470 rumpfs_mountfs(struct mount *mp)
1471 {
1472 struct rumpfs_mount *rfsmp;
1473 struct rumpfs_node *rn;
1474 int error;
1475
1476 rfsmp = kmem_alloc(sizeof(*rfsmp), KM_SLEEP);
1477
1478 rn = makeprivate(VDIR, NODEV, DEV_BSIZE, false);
1479 rn->rn_parent = rn;
1480 if ((error = makevnode(mp, rn, &rfsmp->rfsmp_rvp)) != 0)
1481 return error;
1482
1483 rfsmp->rfsmp_rvp->v_vflag |= VV_ROOT;
1484 VOP_UNLOCK(rfsmp->rfsmp_rvp);
1485
1486 mp->mnt_data = rfsmp;
1487 mp->mnt_stat.f_namemax = MAXNAMLEN;
1488 mp->mnt_stat.f_iosize = 512;
1489 mp->mnt_flag |= MNT_LOCAL;
1490 mp->mnt_iflag |= IMNT_MPSAFE;
1491 mp->mnt_fs_bshift = DEV_BSHIFT;
1492 vfs_getnewfsid(mp);
1493
1494 return 0;
1495 }
1496
1497 int
1498 rumpfs_mount(struct mount *mp, const char *mntpath, void *arg, size_t *alen)
1499 {
1500 int error;
1501
1502 error = set_statvfs_info(mntpath, UIO_USERSPACE, "rumpfs", UIO_SYSSPACE,
1503 mp->mnt_op->vfs_name, mp, curlwp);
1504 if (error)
1505 return error;
1506
1507 return rumpfs_mountfs(mp);
1508 }
1509
1510 int
1511 rumpfs_unmount(struct mount *mp, int mntflags)
1512 {
1513 struct rumpfs_mount *rfsmp = mp->mnt_data;
1514 int flags = 0, error;
1515
1516 if (panicstr || mntflags & MNT_FORCE)
1517 flags |= FORCECLOSE;
1518
1519 if ((error = vflush(mp, rfsmp->rfsmp_rvp, flags)) != 0)
1520 return error;
1521 vgone(rfsmp->rfsmp_rvp); /* XXX */
1522
1523 kmem_free(rfsmp, sizeof(*rfsmp));
1524
1525 return 0;
1526 }
1527
1528 int
1529 rumpfs_root(struct mount *mp, struct vnode **vpp)
1530 {
1531 struct rumpfs_mount *rfsmp = mp->mnt_data;
1532
1533 vref(rfsmp->rfsmp_rvp);
1534 vn_lock(rfsmp->rfsmp_rvp, LK_EXCLUSIVE | LK_RETRY);
1535 *vpp = rfsmp->rfsmp_rvp;
1536 return 0;
1537 }
1538
1539 int
1540 rumpfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1541 {
1542
1543 return EOPNOTSUPP;
1544 }
1545
1546 void
1547 rumpfs_init()
1548 {
1549
1550 CTASSERT(RUMP_ETFS_SIZE_ENDOFF == RUMPBLK_SIZENOTSET);
1551
1552 mutex_init(&reclock, MUTEX_DEFAULT, IPL_NONE);
1553 mutex_init(&etfs_lock, MUTEX_DEFAULT, IPL_NONE);
1554 }
1555
1556 void
1557 rumpfs_done()
1558 {
1559
1560 mutex_destroy(&reclock);
1561 mutex_destroy(&etfs_lock);
1562 }
1563
1564 int
1565 rumpfs_mountroot()
1566 {
1567 struct mount *mp;
1568 int error;
1569
1570 if ((error = vfs_rootmountalloc(MOUNT_RUMPFS, "rootdev", &mp)) != 0) {
1571 vrele(rootvp);
1572 return error;
1573 }
1574
1575 if ((error = rumpfs_mountfs(mp)) != 0)
1576 panic("mounting rootfs failed: %d", error);
1577
1578 mutex_enter(&mountlist_lock);
1579 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
1580 mutex_exit(&mountlist_lock);
1581
1582 error = set_statvfs_info("/", UIO_SYSSPACE, "rumpfs", UIO_SYSSPACE,
1583 mp->mnt_op->vfs_name, mp, curlwp);
1584 if (error)
1585 panic("set_statvfs_info failed for rootfs: %d", error);
1586
1587 vfs_unbusy(mp, false, NULL);
1588
1589 return 0;
1590 }
1591