rumpfs.c revision 1.162 1 /* $NetBSD: rumpfs.c,v 1.162 2020/05/16 18:31:52 christos Exp $ */
2
3 /*
4 * Copyright (c) 2009, 2010, 2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: rumpfs.c,v 1.162 2020/05/16 18:31:52 christos Exp $");
30
31 #include <sys/param.h>
32 #include <sys/atomic.h>
33 #include <sys/buf.h>
34 #include <sys/dirent.h>
35 #include <sys/errno.h>
36 #include <sys/filedesc.h>
37 #include <sys/fcntl.h>
38 #include <sys/kauth.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/lock.h>
44 #include <sys/lockf.h>
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 #include <sys/syscallargs.h>
48 #include <sys/vnode.h>
49 #include <sys/fstrans.h>
50 #include <sys/unistd.h>
51
52 #include <miscfs/specfs/specdev.h>
53 #include <miscfs/genfs/genfs.h>
54 #include <miscfs/genfs/genfs_node.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <rump-sys/kern.h>
59 #include <rump-sys/vfs.h>
60
61 #include <rump/rumpfs.h>
62 #include <rump/rumpuser.h>
63
64 static int rump_vop_lookup(void *);
65 static int rump_vop_getattr(void *);
66 static int rump_vop_setattr(void *);
67 static int rump_vop_mkdir(void *);
68 static int rump_vop_rmdir(void *);
69 static int rump_vop_remove(void *);
70 static int rump_vop_mknod(void *);
71 static int rump_vop_create(void *);
72 static int rump_vop_inactive(void *);
73 static int rump_vop_reclaim(void *);
74 static int rump_vop_success(void *);
75 static int rump_vop_readdir(void *);
76 static int rump_vop_spec(void *);
77 static int rump_vop_read(void *);
78 static int rump_vop_write(void *);
79 static int rump_vop_open(void *);
80 static int rump_vop_symlink(void *);
81 static int rump_vop_readlink(void *);
82 static int rump_vop_whiteout(void *);
83 static int rump_vop_pathconf(void *);
84 static int rump_vop_bmap(void *);
85 static int rump_vop_strategy(void *);
86 static int rump_vop_advlock(void *);
87 static int rump_vop_access(void *);
88 static int rump_vop_fcntl(void *);
89
90 int (**rump_vnodeop_p)(void *);
91 const struct vnodeopv_entry_desc rump_vnodeop_entries[] = {
92 { &vop_default_desc, vn_default_error },
93 { &vop_lookup_desc, rump_vop_lookup },
94 { &vop_getattr_desc, rump_vop_getattr },
95 { &vop_setattr_desc, rump_vop_setattr },
96 { &vop_mkdir_desc, rump_vop_mkdir },
97 { &vop_rmdir_desc, rump_vop_rmdir },
98 { &vop_remove_desc, rump_vop_remove },
99 { &vop_mknod_desc, rump_vop_mknod },
100 { &vop_create_desc, rump_vop_create },
101 { &vop_symlink_desc, rump_vop_symlink },
102 { &vop_readlink_desc, rump_vop_readlink },
103 { &vop_access_desc, rump_vop_access },
104 { &vop_accessx_desc, genfs_accessx },
105 { &vop_readdir_desc, rump_vop_readdir },
106 { &vop_read_desc, rump_vop_read },
107 { &vop_write_desc, rump_vop_write },
108 { &vop_open_desc, rump_vop_open },
109 { &vop_close_desc, genfs_nullop },
110 { &vop_seek_desc, genfs_seek },
111 { &vop_getpages_desc, genfs_getpages },
112 { &vop_putpages_desc, genfs_putpages },
113 { &vop_whiteout_desc, rump_vop_whiteout },
114 { &vop_fsync_desc, rump_vop_success },
115 { &vop_lock_desc, genfs_lock },
116 { &vop_unlock_desc, genfs_unlock },
117 { &vop_islocked_desc, genfs_islocked },
118 { &vop_inactive_desc, rump_vop_inactive },
119 { &vop_reclaim_desc, rump_vop_reclaim },
120 { &vop_link_desc, genfs_eopnotsupp },
121 { &vop_pathconf_desc, rump_vop_pathconf },
122 { &vop_bmap_desc, rump_vop_bmap },
123 { &vop_strategy_desc, rump_vop_strategy },
124 { &vop_advlock_desc, rump_vop_advlock },
125 { &vop_fcntl_desc, rump_vop_fcntl },
126 { NULL, NULL }
127 };
128 const struct vnodeopv_desc rump_vnodeop_opv_desc =
129 { &rump_vnodeop_p, rump_vnodeop_entries };
130
131 int (**rump_specop_p)(void *);
132 const struct vnodeopv_entry_desc rump_specop_entries[] = {
133 { &vop_default_desc, rump_vop_spec },
134 { NULL, NULL }
135 };
136 const struct vnodeopv_desc rump_specop_opv_desc =
137 { &rump_specop_p, rump_specop_entries };
138
139 const struct vnodeopv_desc * const rump_opv_descs[] = {
140 &rump_vnodeop_opv_desc,
141 &rump_specop_opv_desc,
142 NULL
143 };
144
145 #define RUMPFS_WHITEOUT ((void *)-1)
146 #define RDENT_ISWHITEOUT(rdp) (rdp->rd_node == RUMPFS_WHITEOUT)
147 struct rumpfs_dent {
148 char *rd_name;
149 int rd_namelen;
150 struct rumpfs_node *rd_node;
151
152 LIST_ENTRY(rumpfs_dent) rd_entries;
153 };
154
155 struct genfs_ops rumpfs_genfsops = {
156 .gop_size = genfs_size,
157 .gop_write = genfs_gop_write,
158 .gop_putrange = genfs_gop_putrange,
159
160 /* optional */
161 .gop_alloc = NULL,
162 .gop_markupdate = NULL,
163 };
164
165 struct rumpfs_node {
166 struct genfs_node rn_gn;
167 struct vattr rn_va;
168 struct vnode *rn_vp;
169 char *rn_hostpath;
170 int rn_flags;
171 struct lockf *rn_lockf;
172
173 union {
174 struct { /* VREG */
175 int readfd;
176 int writefd;
177 uint64_t offset;
178 } reg;
179 struct {
180 void *data;
181 size_t dlen;
182 } reg_noet;
183 struct { /* VDIR */
184 LIST_HEAD(, rumpfs_dent) dents;
185 struct rumpfs_node *parent;
186 int flags;
187 } dir;
188 struct {
189 char *target;
190 size_t len;
191 } link;
192 } rn_u;
193 };
194 #define rn_readfd rn_u.reg.readfd
195 #define rn_writefd rn_u.reg.writefd
196 #define rn_offset rn_u.reg.offset
197 #define rn_data rn_u.reg_noet.data
198 #define rn_dlen rn_u.reg_noet.dlen
199 #define rn_dir rn_u.dir.dents
200 #define rn_parent rn_u.dir.parent
201 #define rn_linktarg rn_u.link.target
202 #define rn_linklen rn_u.link.len
203
204 #define RUMPNODE_CANRECLAIM 0x01
205 #define RUMPNODE_DIR_ET 0x02
206 #define RUMPNODE_DIR_ETSUBS 0x04
207 #define RUMPNODE_ET_PHONE_HOST 0x10
208 #define RUMPNODE_EXTSTORAGE 0x20
209
210 struct rumpfs_mount {
211 struct vnode *rfsmp_rvp;
212 };
213
214 #define INO_WHITEOUT 1
215 static int lastino = 2;
216 static kmutex_t reclock;
217
218 #define RUMPFS_DEFAULTMODE 0755
219 static void freedir(struct rumpfs_node *, struct componentname *);
220 static struct rumpfs_node *makeprivate(enum vtype, mode_t, dev_t, off_t, bool);
221 static void freeprivate(struct rumpfs_node *);
222
223 /*
224 * Extra Terrestrial stuff. We map a given key (pathname) to a file on
225 * the host FS. ET phones home only from the root node of rumpfs.
226 *
227 * When an etfs node is removed, a vnode potentially behind it is not
228 * immediately recycled.
229 */
230
231 struct etfs {
232 char et_key[MAXPATHLEN];
233 size_t et_keylen;
234 bool et_prefixkey;
235 bool et_removing;
236 devminor_t et_blkmin;
237
238 LIST_ENTRY(etfs) et_entries;
239
240 struct rumpfs_node *et_rn;
241 };
242 static kmutex_t etfs_lock;
243 static LIST_HEAD(, etfs) etfs_list = LIST_HEAD_INITIALIZER(etfs_list);
244
245 static enum vtype
246 ettype_to_vtype(enum rump_etfs_type et)
247 {
248 enum vtype vt;
249
250 switch (et) {
251 case RUMP_ETFS_REG:
252 vt = VREG;
253 break;
254 case RUMP_ETFS_BLK:
255 vt = VBLK;
256 break;
257 case RUMP_ETFS_CHR:
258 vt = VCHR;
259 break;
260 case RUMP_ETFS_DIR:
261 vt = VDIR;
262 break;
263 case RUMP_ETFS_DIR_SUBDIRS:
264 vt = VDIR;
265 break;
266 default:
267 panic("invalid et type: %d", et);
268 }
269
270 return vt;
271 }
272
273 static enum vtype
274 hft_to_vtype(int hft)
275 {
276 enum vtype vt;
277
278 switch (hft) {
279 case RUMPUSER_FT_OTHER:
280 vt = VNON;
281 break;
282 case RUMPUSER_FT_DIR:
283 vt = VDIR;
284 break;
285 case RUMPUSER_FT_REG:
286 vt = VREG;
287 break;
288 case RUMPUSER_FT_BLK:
289 vt = VBLK;
290 break;
291 case RUMPUSER_FT_CHR:
292 vt = VCHR;
293 break;
294 default:
295 vt = VNON;
296 break;
297 }
298
299 return vt;
300 }
301
302 static bool
303 etfs_find(const char *key, struct etfs **etp, bool forceprefix)
304 {
305 struct etfs *et;
306 size_t keylen = strlen(key);
307
308 KASSERT(mutex_owned(&etfs_lock));
309
310 LIST_FOREACH(et, &etfs_list, et_entries) {
311 if ((keylen == et->et_keylen || et->et_prefixkey || forceprefix)
312 && strncmp(key, et->et_key, et->et_keylen) == 0) {
313 if (etp)
314 *etp = et;
315 return true;
316 }
317 }
318
319 return false;
320 }
321
322 #define REGDIR(ftype) \
323 ((ftype) == RUMP_ETFS_DIR || (ftype) == RUMP_ETFS_DIR_SUBDIRS)
324 static int
325 etfsregister(const char *key, const char *hostpath,
326 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
327 {
328 char buf[9];
329 struct etfs *et;
330 struct rumpfs_node *rn;
331 uint64_t fsize;
332 dev_t rdev = NODEV;
333 devminor_t dmin = -1;
334 int hft, error;
335
336 if (key[0] != '/') {
337 return EINVAL;
338 }
339 while (key[0] == '/') {
340 key++;
341 }
342
343 if ((error = rumpuser_getfileinfo(hostpath, &fsize, &hft)) != 0)
344 return error;
345
346 /* etfs directory requires a directory on the host */
347 if (REGDIR(ftype)) {
348 if (hft != RUMPUSER_FT_DIR)
349 return ENOTDIR;
350 if (begin != 0)
351 return EISDIR;
352 if (size != RUMP_ETFS_SIZE_ENDOFF)
353 return EISDIR;
354 size = fsize;
355 } else {
356 if (begin > fsize)
357 return EINVAL;
358 if (size == RUMP_ETFS_SIZE_ENDOFF)
359 size = fsize - begin;
360 if (begin + size > fsize)
361 return EINVAL;
362 }
363
364 if (ftype == RUMP_ETFS_BLK || ftype == RUMP_ETFS_CHR) {
365 error = rumpblk_register(hostpath, &dmin, begin, size);
366 if (error != 0) {
367 return error;
368 }
369 rdev = makedev(RUMPBLK_DEVMAJOR, dmin);
370 }
371
372 et = kmem_alloc(sizeof(*et), KM_SLEEP);
373 strcpy(et->et_key, key);
374 et->et_keylen = strlen(et->et_key);
375 et->et_rn = rn = makeprivate(ettype_to_vtype(ftype), RUMPFS_DEFAULTMODE,
376 rdev, size, true);
377 et->et_removing = false;
378 et->et_blkmin = dmin;
379
380 rn->rn_flags |= RUMPNODE_ET_PHONE_HOST;
381
382 if (ftype == RUMP_ETFS_REG || REGDIR(ftype) || et->et_blkmin != -1) {
383 size_t len = strlen(hostpath)+1;
384
385 rn->rn_hostpath = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
386 memcpy(rn->rn_hostpath, hostpath, len);
387 rn->rn_offset = begin;
388 }
389
390 if (REGDIR(ftype)) {
391 rn->rn_flags |= RUMPNODE_DIR_ET;
392 et->et_prefixkey = true;
393 } else {
394 et->et_prefixkey = false;
395 }
396
397 if (ftype == RUMP_ETFS_DIR_SUBDIRS)
398 rn->rn_flags |= RUMPNODE_DIR_ETSUBS;
399
400 mutex_enter(&etfs_lock);
401 if (etfs_find(key, NULL, REGDIR(ftype))) {
402 mutex_exit(&etfs_lock);
403 if (et->et_blkmin != -1)
404 rumpblk_deregister(hostpath);
405 if (et->et_rn->rn_hostpath != NULL)
406 free(et->et_rn->rn_hostpath, M_TEMP);
407 freeprivate(et->et_rn);
408 kmem_free(et, sizeof(*et));
409 return EEXIST;
410 }
411 LIST_INSERT_HEAD(&etfs_list, et, et_entries);
412 mutex_exit(&etfs_lock);
413
414 if (ftype == RUMP_ETFS_BLK) {
415 format_bytes(buf, sizeof(buf), size);
416 aprint_verbose("/%s: hostpath %s (%s)\n", key, hostpath, buf);
417 }
418
419 return 0;
420 }
421 #undef REGDIR
422
423 /* remove etfs mapping. caller's responsibility to make sure it's not in use */
424 static int
425 etfsremove(const char *key)
426 {
427 struct etfs *et;
428 size_t keylen;
429 int rv __diagused;
430
431 if (key[0] != '/') {
432 return EINVAL;
433 }
434 while (key[0] == '/') {
435 key++;
436 }
437
438 keylen = strlen(key);
439
440 mutex_enter(&etfs_lock);
441 LIST_FOREACH(et, &etfs_list, et_entries) {
442 if (keylen == et->et_keylen && strcmp(et->et_key, key) == 0) {
443 if (et->et_removing)
444 et = NULL;
445 else
446 et->et_removing = true;
447 break;
448 }
449 }
450 mutex_exit(&etfs_lock);
451 if (!et)
452 return ENOENT;
453
454 /*
455 * ok, we know what we want to remove and have signalled there
456 * actually are men at work. first, unregister from rumpblk
457 */
458 if (et->et_blkmin != -1) {
459 rv = rumpblk_deregister(et->et_rn->rn_hostpath);
460 } else {
461 rv = 0;
462 }
463 KASSERT(rv == 0);
464
465 /* then do the actual removal */
466 mutex_enter(&etfs_lock);
467 LIST_REMOVE(et, et_entries);
468 mutex_exit(&etfs_lock);
469
470 /* node is unreachable, safe to nuke all device copies */
471 if (et->et_blkmin != -1) {
472 vdevgone(RUMPBLK_DEVMAJOR, et->et_blkmin, et->et_blkmin, VBLK);
473 } else {
474 struct vnode *vp;
475 struct mount *mp;
476 struct rumpfs_node *rn;
477
478 mutex_enter(&reclock);
479 if ((vp = et->et_rn->rn_vp) != NULL) {
480 mp = vp->v_mount;
481 rn = vp->v_data;
482 KASSERT(rn == et->et_rn);
483 } else {
484 mp = NULL;
485 }
486 mutex_exit(&reclock);
487 if (mp && vcache_get(mp, &rn, sizeof(rn), &vp) == 0) {
488 rv = vfs_suspend(mp, 0);
489 KASSERT(rv == 0);
490 vgone(vp);
491 vfs_resume(mp);
492 }
493 }
494
495 if (et->et_rn->rn_hostpath != NULL)
496 free(et->et_rn->rn_hostpath, M_TEMP);
497 freeprivate(et->et_rn);
498 kmem_free(et, sizeof(*et));
499
500 return 0;
501 }
502
503 /*
504 * rumpfs
505 */
506
507 static struct rumpfs_node *
508 makeprivate(enum vtype vt, mode_t mode, dev_t rdev, off_t size, bool et)
509 {
510 struct rumpfs_node *rn;
511 struct vattr *va;
512 struct timespec ts;
513
514 KASSERT((mode & ~ALLPERMS) == 0);
515 rn = kmem_zalloc(sizeof(*rn), KM_SLEEP);
516
517 switch (vt) {
518 case VDIR:
519 LIST_INIT(&rn->rn_dir);
520 break;
521 case VREG:
522 if (et) {
523 rn->rn_readfd = -1;
524 rn->rn_writefd = -1;
525 }
526 break;
527 default:
528 break;
529 }
530
531 nanotime(&ts);
532
533 va = &rn->rn_va;
534 va->va_type = vt;
535 va->va_mode = mode;
536 if (vt == VDIR)
537 va->va_nlink = 2;
538 else
539 va->va_nlink = 1;
540 va->va_uid = 0;
541 va->va_gid = 0;
542 va->va_fsid =
543 va->va_fileid = atomic_inc_uint_nv(&lastino);
544 va->va_size = size;
545 va->va_blocksize = 512;
546 va->va_atime = ts;
547 va->va_mtime = ts;
548 va->va_ctime = ts;
549 va->va_birthtime = ts;
550 va->va_gen = 0;
551 va->va_flags = 0;
552 va->va_rdev = rdev;
553 va->va_bytes = 512;
554 va->va_filerev = 0;
555 va->va_vaflags = 0;
556
557 return rn;
558 }
559
560 static void
561 freeprivate(struct rumpfs_node *rn)
562 {
563
564 kmem_free(rn, sizeof(*rn));
565 }
566
567 static void
568 makedir(struct rumpfs_node *rnd,
569 struct componentname *cnp, struct rumpfs_node *rn)
570 {
571 struct rumpfs_dent *rdent;
572
573 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
574 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
575 rdent->rd_node = rn;
576 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
577 rdent->rd_namelen = strlen(rdent->rd_name);
578
579 if ((cnp->cn_flags & ISWHITEOUT) != 0) {
580 KASSERT((cnp->cn_flags & DOWHITEOUT) == 0);
581 freedir(rnd, cnp);
582 }
583 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
584 }
585
586 static void
587 freedir(struct rumpfs_node *rnd, struct componentname *cnp)
588 {
589 struct rumpfs_dent *rd = NULL;
590
591 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
592 if (rd->rd_namelen == cnp->cn_namelen &&
593 strncmp(rd->rd_name, cnp->cn_nameptr,
594 cnp->cn_namelen) == 0)
595 break;
596 }
597 if (rd == NULL)
598 panic("could not find directory entry: %s", cnp->cn_nameptr);
599
600 if (cnp->cn_flags & DOWHITEOUT) {
601 rd->rd_node = RUMPFS_WHITEOUT;
602 } else {
603 LIST_REMOVE(rd, rd_entries);
604 kmem_free(rd->rd_name, rd->rd_namelen+1);
605 kmem_free(rd, sizeof(*rd));
606 }
607 }
608
609 #define RUMPFS_ACCESS 1
610 #define RUMPFS_MODIFY 2
611 #define RUMPFS_CHANGE 4
612
613 static int
614 rumpfs_update(int flags, struct vnode *vp, const struct timespec *acc,
615 const struct timespec *mod, const struct timespec *chg)
616 {
617 struct rumpfs_node *rn = vp->v_data;
618
619 if (flags == 0)
620 return 0;
621
622 if (vp->v_mount->mnt_flag & MNT_RDONLY)
623 return EROFS;
624
625 if (flags & RUMPFS_ACCESS)
626 rn->rn_va.va_atime = *acc;
627 if (flags & RUMPFS_MODIFY)
628 rn->rn_va.va_mtime = *mod;
629 if (flags & RUMPFS_CHANGE)
630 rn->rn_va.va_ctime = *chg;
631
632 return 0;
633 }
634
635 /*
636 * Simple lookup for rump file systems.
637 *
638 * uhm, this is twisted. C F C C, hope of C C F C looming
639 */
640 static int
641 rump_vop_lookup(void *v)
642 {
643 struct vop_lookup_v2_args /* {
644 struct vnode *a_dvp;
645 struct vnode **a_vpp;
646 struct componentname *a_cnp;
647 }; */ *ap = v;
648 struct componentname *cnp = ap->a_cnp;
649 struct vnode *dvp = ap->a_dvp;
650 struct vnode **vpp = ap->a_vpp;
651 struct rumpfs_node *rnd = dvp->v_data, *rn;
652 struct rumpfs_dent *rd = NULL;
653 struct etfs *et;
654 bool dotdot = (cnp->cn_flags & ISDOTDOT) != 0;
655 int rv = 0;
656 const char *cp;
657
658 *vpp = NULL;
659
660 rv = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
661 if (rv)
662 return rv;
663
664 if ((cnp->cn_flags & ISLASTCN)
665 && (dvp->v_mount->mnt_flag & MNT_RDONLY)
666 && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
667 return EROFS;
668
669 /* check for dot, return directly if the case */
670 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
671 vref(dvp);
672 *vpp = dvp;
673 return 0;
674 }
675
676 /* we don't do rename */
677 if (!(((cnp->cn_flags & ISLASTCN) == 0) || (cnp->cn_nameiop != RENAME)))
678 return EOPNOTSUPP;
679
680 /* check for etfs */
681 if (dvp == rootvnode &&
682 (cnp->cn_nameiop == LOOKUP || cnp->cn_nameiop == CREATE)) {
683 bool found;
684 mutex_enter(&etfs_lock);
685 found = etfs_find(cnp->cn_nameptr, &et, false);
686 mutex_exit(&etfs_lock);
687
688 if (found) {
689 rn = et->et_rn;
690 cnp->cn_consume += et->et_keylen - cnp->cn_namelen;
691 /*
692 * consume trailing slashes if any and clear
693 * REQUIREDIR if we consumed the full path.
694 */
695 cp = &cnp->cn_nameptr[cnp->cn_namelen];
696 cp += cnp->cn_consume;
697 KASSERT(*cp == '\0' || *cp == '/');
698 if (*cp == '\0' && rn->rn_va.va_type != VDIR)
699 cnp->cn_flags &= ~REQUIREDIR;
700 while (*cp++ == '/')
701 cnp->cn_consume++;
702 goto getvnode;
703 }
704 }
705
706 if (rnd->rn_flags & RUMPNODE_DIR_ET) {
707 uint64_t fsize;
708 char *newpath;
709 size_t newpathlen;
710 int hft, error;
711
712 if (dotdot)
713 return EOPNOTSUPP;
714
715 newpathlen = strlen(rnd->rn_hostpath) + 1 + cnp->cn_namelen + 1;
716 newpath = malloc(newpathlen, M_TEMP, M_WAITOK);
717
718 strlcpy(newpath, rnd->rn_hostpath, newpathlen);
719 strlcat(newpath, "/", newpathlen);
720 strlcat(newpath, cnp->cn_nameptr, newpathlen);
721
722 if ((error = rumpuser_getfileinfo(newpath, &fsize, &hft)) != 0){
723 free(newpath, M_TEMP);
724 return error;
725 }
726
727 /* allow only dirs and regular files */
728 if (hft != RUMPUSER_FT_REG && hft != RUMPUSER_FT_DIR) {
729 free(newpath, M_TEMP);
730 return ENOENT;
731 }
732
733 rn = makeprivate(hft_to_vtype(hft), RUMPFS_DEFAULTMODE,
734 NODEV, fsize, true);
735 rn->rn_flags |= RUMPNODE_CANRECLAIM;
736 if (rnd->rn_flags & RUMPNODE_DIR_ETSUBS) {
737 rn->rn_flags |= RUMPNODE_DIR_ET | RUMPNODE_DIR_ETSUBS;
738 rn->rn_flags |= RUMPNODE_ET_PHONE_HOST;
739 }
740 rn->rn_hostpath = newpath;
741
742 goto getvnode;
743 } else {
744 if (dotdot) {
745 if ((rn = rnd->rn_parent) != NULL)
746 goto getvnode;
747 } else {
748 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
749 if (rd->rd_namelen == cnp->cn_namelen &&
750 strncmp(rd->rd_name, cnp->cn_nameptr,
751 cnp->cn_namelen) == 0)
752 break;
753 }
754 }
755 }
756
757 if (!rd && ((cnp->cn_flags & ISLASTCN) == 0||cnp->cn_nameiop != CREATE))
758 return ENOENT;
759
760 if (!rd && (cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
761 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
762 return EROFS;
763 rv = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
764 if (rv)
765 return rv;
766 return EJUSTRETURN;
767 }
768
769 if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == DELETE) {
770 rv = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
771 if (rv)
772 return rv;
773 }
774
775 if (RDENT_ISWHITEOUT(rd)) {
776 cnp->cn_flags |= ISWHITEOUT;
777 if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE)
778 return EJUSTRETURN;
779 return ENOENT;
780 }
781
782 rn = rd->rd_node;
783
784 getvnode:
785 KASSERT(rn);
786 rv = vcache_get(dvp->v_mount, &rn, sizeof(rn), vpp);
787 if (rv) {
788 if (rnd->rn_flags & RUMPNODE_DIR_ET)
789 freeprivate(rn);
790 return rv;
791 }
792
793 return 0;
794 }
795
796 static int
797 rump_check_possible(struct vnode *vp, struct rumpfs_node *rnode,
798 mode_t mode)
799 {
800
801 if ((mode & VWRITE) == 0)
802 return 0;
803
804 switch (vp->v_type) {
805 case VDIR:
806 case VLNK:
807 case VREG:
808 break;
809 default:
810 /* special file is always writable. */
811 return 0;
812 }
813
814 return vp->v_mount->mnt_flag & MNT_RDONLY ? EROFS : 0;
815 }
816
817 static int
818 rump_check_permitted(struct vnode *vp, struct rumpfs_node *rnode,
819 accmode_t accmode, kauth_cred_t cred)
820 {
821 struct vattr *attr = &rnode->rn_va;
822
823 return kauth_authorize_vnode(cred, KAUTH_ACCESS_ACTION(accmode,
824 vp->v_type, attr->va_mode), vp, NULL, genfs_can_access(vp, cred,
825 attr->va_uid, attr->va_gid, attr->va_mode, NULL, accmode));
826 }
827
828 int
829 rump_vop_access(void *v)
830 {
831 struct vop_access_args /* {
832 const struct vnodeop_desc *a_desc;
833 struct vnode *a_vp;
834 int a_mode;
835 kauth_cred_t a_cred;
836 } */ *ap = v;
837 struct vnode *vp = ap->a_vp;
838 struct rumpfs_node *rn = vp->v_data;
839 int error;
840
841 error = rump_check_possible(vp, rn, ap->a_accmode);
842 if (error)
843 return error;
844
845 error = rump_check_permitted(vp, rn, ap->a_accmode, ap->a_cred);
846
847 return error;
848 }
849
850 static int
851 rump_vop_getattr(void *v)
852 {
853 struct vop_getattr_args /* {
854 struct vnode *a_vp;
855 struct vattr *a_vap;
856 kauth_cred_t a_cred;
857 } */ *ap = v;
858 struct vnode *vp = ap->a_vp;
859 struct rumpfs_node *rn = vp->v_data;
860 struct vattr *vap = ap->a_vap;
861
862 memcpy(vap, &rn->rn_va, sizeof(struct vattr));
863 vap->va_size = vp->v_size;
864 return 0;
865 }
866
867 static int
868 rump_vop_setattr(void *v)
869 {
870 struct vop_setattr_args /* {
871 struct vnode *a_vp;
872 struct vattr *a_vap;
873 kauth_cred_t a_cred;
874 } */ *ap = v;
875 struct vnode *vp = ap->a_vp;
876 struct vattr *vap = ap->a_vap;
877 struct rumpfs_node *rn = vp->v_data;
878 struct vattr *attr = &rn->rn_va;
879 struct timespec now;
880 kauth_cred_t cred = ap->a_cred;
881 int error;
882
883 #define CHANGED(a, t) (vap->a != (t)VNOVAL)
884 #define SETIFVAL(a,t) if (CHANGED(a, t)) rn->rn_va.a = vap->a
885 if (CHANGED(va_atime.tv_sec, time_t) ||
886 CHANGED(va_ctime.tv_sec, time_t) ||
887 CHANGED(va_mtime.tv_sec, time_t) ||
888 CHANGED(va_birthtime.tv_sec, time_t) ||
889 CHANGED(va_atime.tv_nsec, long) ||
890 CHANGED(va_ctime.tv_nsec, long) ||
891 CHANGED(va_mtime.tv_nsec, long) ||
892 CHANGED(va_birthtime.tv_nsec, long)) {
893 error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_TIMES, vp,
894 NULL, genfs_can_chtimes(vp, cred, attr->va_uid,
895 vap->va_vaflags));
896 if (error)
897 return error;
898 }
899
900 int flags = 0;
901 getnanotime(&now);
902 if (vap->va_atime.tv_sec != VNOVAL)
903 if (!(vp->v_mount->mnt_flag & MNT_NOATIME))
904 flags |= RUMPFS_ACCESS;
905 if (vap->va_mtime.tv_sec != VNOVAL) {
906 flags |= RUMPFS_CHANGE | RUMPFS_MODIFY;
907 if (vp->v_mount->mnt_flag & MNT_RELATIME)
908 flags |= RUMPFS_ACCESS;
909 } else if (vap->va_size == 0) {
910 flags |= RUMPFS_MODIFY;
911 vap->va_mtime = now;
912 }
913 SETIFVAL(va_birthtime.tv_sec, time_t);
914 SETIFVAL(va_birthtime.tv_nsec, long);
915 flags |= RUMPFS_CHANGE;
916 error = rumpfs_update(flags, vp, &vap->va_atime, &vap->va_mtime, &now);
917 if (error)
918 return error;
919
920 if (CHANGED(va_flags, u_long)) {
921 /* XXX Can we handle system flags here...? */
922 error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_FLAGS, vp,
923 NULL, genfs_can_chflags(vp, cred, attr->va_uid, false));
924 if (error)
925 return error;
926 }
927
928 SETIFVAL(va_flags, u_long);
929 #undef SETIFVAL
930 #undef CHANGED
931
932 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (uid_t)VNOVAL) {
933 uid_t uid =
934 (vap->va_uid != (uid_t)VNOVAL) ? vap->va_uid : attr->va_uid;
935 gid_t gid =
936 (vap->va_gid != (gid_t)VNOVAL) ? vap->va_gid : attr->va_gid;
937 error = kauth_authorize_vnode(cred,
938 KAUTH_VNODE_CHANGE_OWNERSHIP, vp, NULL,
939 genfs_can_chown(vp, cred, attr->va_uid, attr->va_gid, uid,
940 gid));
941 if (error)
942 return error;
943 attr->va_uid = uid;
944 attr->va_gid = gid;
945 }
946
947 if (vap->va_mode != (mode_t)VNOVAL) {
948 mode_t mode = vap->va_mode;
949 error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_SECURITY,
950 vp, NULL, genfs_can_chmod(vp, cred, attr->va_uid,
951 attr->va_gid, mode));
952 if (error)
953 return error;
954 attr->va_mode = mode;
955 }
956
957 if (vp->v_type == VREG &&
958 vap->va_size != VSIZENOTSET &&
959 vap->va_size != rn->rn_dlen &&
960 (rn->rn_flags & RUMPNODE_ET_PHONE_HOST) == 0) {
961 void *newdata;
962 size_t copylen, newlen;
963
964 newlen = vap->va_size;
965 newdata = rump_hypermalloc(newlen, 0, false, "rumpfs");
966 if (newdata == NULL)
967 return ENOSPC;
968
969 copylen = MIN(rn->rn_dlen, newlen);
970 if (copylen > 0)
971 memcpy(newdata, rn->rn_data, copylen);
972 memset((char *)newdata + copylen, 0, newlen - copylen);
973
974 if ((rn->rn_flags & RUMPNODE_EXTSTORAGE) == 0) {
975 rump_hyperfree(rn->rn_data, rn->rn_dlen);
976 } else {
977 rn->rn_flags &= ~RUMPNODE_EXTSTORAGE;
978 }
979
980 rn->rn_data = newdata;
981 rn->rn_dlen = newlen;
982 uvm_vnp_setsize(vp, newlen);
983 }
984 return 0;
985 }
986
987 static int
988 rump_vop_mkdir(void *v)
989 {
990 struct vop_mkdir_v3_args /* {
991 struct vnode *a_dvp;
992 struct vnode **a_vpp;
993 struct componentname *a_cnp;
994 struct vattr *a_vap;
995 }; */ *ap = v;
996 struct vnode *dvp = ap->a_dvp;
997 struct vnode **vpp = ap->a_vpp;
998 struct componentname *cnp = ap->a_cnp;
999 struct vattr *va = ap->a_vap;
1000 struct rumpfs_node *rnd = dvp->v_data, *rn;
1001 int rv = 0;
1002
1003 rn = makeprivate(VDIR, va->va_mode & ALLPERMS, NODEV, DEV_BSIZE, false);
1004 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1005 rn->rn_va.va_flags |= UF_OPAQUE;
1006 rn->rn_parent = rnd;
1007 rv = vcache_get(dvp->v_mount, &rn, sizeof(rn), vpp);
1008 if (rv) {
1009 freeprivate(rn);
1010 return rv;
1011 }
1012
1013 makedir(rnd, cnp, rn);
1014
1015 return rv;
1016 }
1017
1018 static int
1019 rump_vop_rmdir(void *v)
1020 {
1021 struct vop_rmdir_v2_args /* {
1022 struct vnode *a_dvp;
1023 struct vnode *a_vp;
1024 struct componentname *a_cnp;
1025 }; */ *ap = v;
1026 struct vnode *dvp = ap->a_dvp;
1027 struct vnode *vp = ap->a_vp;
1028 struct componentname *cnp = ap->a_cnp;
1029 struct rumpfs_node *rnd = dvp->v_data;
1030 struct rumpfs_node *rn = vp->v_data;
1031 struct rumpfs_dent *rd;
1032 int rv = 0;
1033
1034 LIST_FOREACH(rd, &rn->rn_dir, rd_entries) {
1035 if (rd->rd_node != RUMPFS_WHITEOUT) {
1036 rv = ENOTEMPTY;
1037 goto out;
1038 }
1039 }
1040 while ((rd = LIST_FIRST(&rn->rn_dir)) != NULL) {
1041 KASSERT(rd->rd_node == RUMPFS_WHITEOUT);
1042 LIST_REMOVE(rd, rd_entries);
1043 kmem_free(rd->rd_name, rd->rd_namelen+1);
1044 kmem_free(rd, sizeof(*rd));
1045 }
1046
1047 freedir(rnd, cnp);
1048 rn->rn_flags |= RUMPNODE_CANRECLAIM;
1049 rn->rn_parent = NULL;
1050 rn->rn_va.va_nlink = 0;
1051
1052 out:
1053 vput(vp);
1054 return rv;
1055 }
1056
1057 static int
1058 rump_vop_remove(void *v)
1059 {
1060 struct vop_remove_v2_args /* {
1061 struct vnode *a_dvp;
1062 struct vnode *a_vp;
1063 struct componentname *a_cnp;
1064 }; */ *ap = v;
1065 struct vnode *dvp = ap->a_dvp;
1066 struct vnode *vp = ap->a_vp;
1067 struct componentname *cnp = ap->a_cnp;
1068 struct rumpfs_node *rnd = dvp->v_data;
1069 struct rumpfs_node *rn = vp->v_data;
1070 int rv = 0;
1071
1072 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1073 return EOPNOTSUPP;
1074
1075 freedir(rnd, cnp);
1076 rn->rn_flags |= RUMPNODE_CANRECLAIM;
1077 rn->rn_va.va_nlink = 0;
1078
1079 vput(vp);
1080 return rv;
1081 }
1082
1083 static int
1084 rump_vop_mknod(void *v)
1085 {
1086 struct vop_mknod_v3_args /* {
1087 struct vnode *a_dvp;
1088 struct vnode **a_vpp;
1089 struct componentname *a_cnp;
1090 struct vattr *a_vap;
1091 }; */ *ap = v;
1092 struct vnode *dvp = ap->a_dvp;
1093 struct vnode **vpp = ap->a_vpp;
1094 struct componentname *cnp = ap->a_cnp;
1095 struct vattr *va = ap->a_vap;
1096 struct rumpfs_node *rnd = dvp->v_data, *rn;
1097 int rv;
1098
1099 rn = makeprivate(va->va_type, va->va_mode & ALLPERMS, va->va_rdev,
1100 DEV_BSIZE, false);
1101 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1102 rn->rn_va.va_flags |= UF_OPAQUE;
1103 rv = vcache_get(dvp->v_mount, &rn, sizeof(rn), vpp);
1104 if (rv) {
1105 freeprivate(rn);
1106 return rv;
1107 }
1108
1109 makedir(rnd, cnp, rn);
1110
1111 return rv;
1112 }
1113
1114 static int
1115 rump_vop_create(void *v)
1116 {
1117 struct vop_create_v3_args /* {
1118 struct vnode *a_dvp;
1119 struct vnode **a_vpp;
1120 struct componentname *a_cnp;
1121 struct vattr *a_vap;
1122 }; */ *ap = v;
1123 struct vnode *dvp = ap->a_dvp;
1124 struct vnode **vpp = ap->a_vpp;
1125 struct componentname *cnp = ap->a_cnp;
1126 struct vattr *va = ap->a_vap;
1127 struct rumpfs_node *rnd = dvp->v_data, *rn;
1128 off_t newsize;
1129 int rv;
1130
1131 newsize = va->va_type == VSOCK ? DEV_BSIZE : 0;
1132 rn = makeprivate(va->va_type, va->va_mode & ALLPERMS, NODEV,
1133 newsize, false);
1134 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1135 rn->rn_va.va_flags |= UF_OPAQUE;
1136 rv = vcache_get(dvp->v_mount, &rn, sizeof(rn), vpp);
1137 if (rv) {
1138 freeprivate(rn);
1139 return rv;
1140 }
1141
1142 makedir(rnd, cnp, rn);
1143
1144 return rv;
1145 }
1146
1147 static int
1148 rump_vop_symlink(void *v)
1149 {
1150 struct vop_symlink_v3_args /* {
1151 struct vnode *a_dvp;
1152 struct vnode **a_vpp;
1153 struct componentname *a_cnp;
1154 struct vattr *a_vap;
1155 char *a_target;
1156 }; */ *ap = v;
1157 struct vnode *dvp = ap->a_dvp;
1158 struct vnode **vpp = ap->a_vpp;
1159 struct componentname *cnp = ap->a_cnp;
1160 struct vattr *va = ap->a_vap;
1161 struct rumpfs_node *rnd = dvp->v_data, *rn;
1162 const char *target = ap->a_target;
1163 size_t linklen;
1164 int rv;
1165
1166 linklen = strlen(target);
1167 KASSERT(linklen < MAXPATHLEN);
1168 rn = makeprivate(VLNK, va->va_mode & ALLPERMS, NODEV, linklen, false);
1169 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1170 rn->rn_va.va_flags |= UF_OPAQUE;
1171 rv = vcache_get(dvp->v_mount, &rn, sizeof(rn), vpp);
1172 if (rv) {
1173 freeprivate(rn);
1174 return rv;
1175 }
1176
1177 makedir(rnd, cnp, rn);
1178
1179 KASSERT(linklen < MAXPATHLEN);
1180 rn->rn_linktarg = PNBUF_GET();
1181 rn->rn_linklen = linklen;
1182 strcpy(rn->rn_linktarg, target);
1183
1184 return rv;
1185 }
1186
1187 static int
1188 rump_vop_readlink(void *v)
1189 {
1190 struct vop_readlink_args /* {
1191 struct vnode *a_vp;
1192 struct uio *a_uio;
1193 kauth_cred_t a_cred;
1194 }; */ *ap = v;
1195 struct vnode *vp = ap->a_vp;
1196 struct rumpfs_node *rn = vp->v_data;
1197 struct uio *uio = ap->a_uio;
1198
1199 return uiomove(rn->rn_linktarg, rn->rn_linklen, uio);
1200 }
1201
1202 static int
1203 rump_vop_whiteout(void *v)
1204 {
1205 struct vop_whiteout_args /* {
1206 struct vnode *a_dvp;
1207 struct componentname *a_cnp;
1208 int a_flags;
1209 } */ *ap = v;
1210 struct vnode *dvp = ap->a_dvp;
1211 struct rumpfs_node *rnd = dvp->v_data;
1212 struct componentname *cnp = ap->a_cnp;
1213 int flags = ap->a_flags;
1214
1215 switch (flags) {
1216 case LOOKUP:
1217 break;
1218 case CREATE:
1219 makedir(rnd, cnp, RUMPFS_WHITEOUT);
1220 break;
1221 case DELETE:
1222 cnp->cn_flags &= ~DOWHITEOUT; /* cargo culting never fails ? */
1223 freedir(rnd, cnp);
1224 break;
1225 default:
1226 panic("unknown whiteout op %d", flags);
1227 }
1228
1229 return 0;
1230 }
1231
1232 static int
1233 rump_vop_open(void *v)
1234 {
1235 struct vop_open_args /* {
1236 struct vnode *a_vp;
1237 int a_mode;
1238 kauth_cred_t a_cred;
1239 } */ *ap = v;
1240 struct vnode *vp = ap->a_vp;
1241 struct rumpfs_node *rn = vp->v_data;
1242 int mode = ap->a_mode;
1243 int error = EINVAL;
1244
1245 if (vp->v_type != VREG || (rn->rn_flags & RUMPNODE_ET_PHONE_HOST) == 0)
1246 return 0;
1247
1248 if (mode & FREAD) {
1249 if (rn->rn_readfd != -1)
1250 return 0;
1251 error = rumpuser_open(rn->rn_hostpath,
1252 RUMPUSER_OPEN_RDONLY, &rn->rn_readfd);
1253 }
1254
1255 if (mode & FWRITE) {
1256 if (rn->rn_writefd != -1)
1257 return 0;
1258 error = rumpuser_open(rn->rn_hostpath,
1259 RUMPUSER_OPEN_WRONLY, &rn->rn_writefd);
1260 }
1261
1262 return error;
1263 }
1264
1265 /* simple readdir. even omits dotstuff and periods */
1266 static int
1267 rump_vop_readdir(void *v)
1268 {
1269 struct vop_readdir_args /* {
1270 struct vnode *a_vp;
1271 struct uio *a_uio;
1272 kauth_cred_t a_cred;
1273 int *a_eofflag;
1274 off_t **a_cookies;
1275 int *a_ncookies;
1276 } */ *ap = v;
1277 struct vnode *vp = ap->a_vp;
1278 struct uio *uio = ap->a_uio;
1279 struct rumpfs_node *rnd = vp->v_data;
1280 struct rumpfs_dent *rdent;
1281 struct dirent *dentp = NULL;
1282 unsigned i;
1283 int rv = 0;
1284
1285 /* seek to current entry */
1286 for (i = 0, rdent = LIST_FIRST(&rnd->rn_dir);
1287 (i < uio->uio_offset) && rdent;
1288 i++, rdent = LIST_NEXT(rdent, rd_entries))
1289 continue;
1290 if (!rdent)
1291 goto out;
1292
1293 /* copy entries */
1294 dentp = kmem_alloc(sizeof(*dentp), KM_SLEEP);
1295 for (; rdent && uio->uio_resid > 0;
1296 rdent = LIST_NEXT(rdent, rd_entries), i++) {
1297 strlcpy(dentp->d_name, rdent->rd_name, sizeof(dentp->d_name));
1298 dentp->d_namlen = strlen(dentp->d_name);
1299 dentp->d_reclen = _DIRENT_RECLEN(dentp, dentp->d_namlen);
1300
1301 if (__predict_false(RDENT_ISWHITEOUT(rdent))) {
1302 dentp->d_fileno = INO_WHITEOUT;
1303 dentp->d_type = DT_WHT;
1304 } else {
1305 dentp->d_fileno = rdent->rd_node->rn_va.va_fileid;
1306 dentp->d_type = vtype2dt(rdent->rd_node->rn_va.va_type);
1307 }
1308
1309 if (uio->uio_resid < dentp->d_reclen) {
1310 i--;
1311 break;
1312 }
1313
1314 rv = uiomove(dentp, dentp->d_reclen, uio);
1315 if (rv) {
1316 i--;
1317 break;
1318 }
1319 }
1320 kmem_free(dentp, sizeof(*dentp));
1321 dentp = NULL;
1322
1323 out:
1324 KASSERT(dentp == NULL);
1325 if (ap->a_cookies) {
1326 *ap->a_ncookies = 0;
1327 *ap->a_cookies = NULL;
1328 }
1329 if (rdent)
1330 *ap->a_eofflag = 0;
1331 else
1332 *ap->a_eofflag = 1;
1333 uio->uio_offset = i;
1334
1335 return rv;
1336 }
1337
1338 static int
1339 etread(struct rumpfs_node *rn, struct uio *uio)
1340 {
1341 struct rumpuser_iovec iov;
1342 uint8_t *buf;
1343 size_t bufsize, n;
1344 int error = 0;
1345
1346 bufsize = uio->uio_resid;
1347 if (bufsize == 0)
1348 return 0;
1349 buf = kmem_alloc(bufsize, KM_SLEEP);
1350
1351 iov.iov_base = buf;
1352 iov.iov_len = bufsize;
1353 if ((error = rumpuser_iovread(rn->rn_readfd, &iov, 1,
1354 uio->uio_offset + rn->rn_offset, &n)) == 0) {
1355 KASSERT(n <= bufsize);
1356 error = uiomove(buf, n, uio);
1357 }
1358
1359 kmem_free(buf, bufsize);
1360 return error;
1361 }
1362
1363 static int
1364 rump_vop_read(void *v)
1365 {
1366 struct vop_read_args /* {
1367 struct vnode *a_vp;
1368 struct uio *a_uio;
1369 int ioflags a_ioflag;
1370 kauth_cred_t a_cred;
1371 }; */ *ap = v;
1372 struct vnode *vp = ap->a_vp;
1373 struct rumpfs_node *rn = vp->v_data;
1374 struct uio *uio = ap->a_uio;
1375 const int advice = IO_ADV_DECODE(ap->a_ioflag);
1376 off_t chunk;
1377 int error = 0;
1378 struct timespec ts;
1379
1380 if (vp->v_type == VDIR)
1381 return EISDIR;
1382
1383 /* et op? */
1384 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1385 return etread(rn, uio);
1386
1387 getnanotime(&ts);
1388 (void)rumpfs_update(RUMPFS_ACCESS, vp, &ts, &ts, &ts);
1389
1390 /* otherwise, it's off to ubc with us */
1391 while (uio->uio_resid > 0) {
1392 chunk = MIN(uio->uio_resid, (off_t)rn->rn_dlen-uio->uio_offset);
1393 if (chunk == 0)
1394 break;
1395 error = ubc_uiomove(&vp->v_uobj, uio, chunk, advice,
1396 UBC_READ | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
1397 if (error)
1398 break;
1399 }
1400
1401 return error;
1402 }
1403
1404 static int
1405 etwrite(struct rumpfs_node *rn, struct uio *uio)
1406 {
1407 struct rumpuser_iovec iov;
1408 uint8_t *buf;
1409 size_t bufsize, n;
1410 int error = 0;
1411
1412 bufsize = uio->uio_resid;
1413 if (bufsize == 0)
1414 return 0;
1415 buf = kmem_alloc(bufsize, KM_SLEEP);
1416 error = uiomove(buf, bufsize, uio);
1417 if (error)
1418 goto out;
1419
1420 KASSERT(uio->uio_resid == 0);
1421 iov.iov_base = buf;
1422 iov.iov_len = bufsize;
1423 if ((error = rumpuser_iovwrite(rn->rn_writefd, &iov, 1,
1424 (uio->uio_offset-bufsize) + rn->rn_offset, &n)) == 0) {
1425 KASSERT(n <= bufsize);
1426 uio->uio_resid = bufsize - n;
1427 }
1428
1429 out:
1430 kmem_free(buf, bufsize);
1431 return error;
1432 }
1433
1434 static int
1435 rump_vop_write(void *v)
1436 {
1437 struct vop_write_args /* {
1438 struct vnode *a_vp;
1439 struct uio *a_uio;
1440 int ioflags a_ioflag;
1441 kauth_cred_t a_cred;
1442 }; */ *ap = v;
1443 struct vnode *vp = ap->a_vp;
1444 struct rumpfs_node *rn = vp->v_data;
1445 struct uio *uio = ap->a_uio;
1446 const int advice = IO_ADV_DECODE(ap->a_ioflag);
1447 void *olddata;
1448 size_t oldlen, newlen;
1449 off_t chunk;
1450 int error = 0;
1451 bool allocd = false;
1452 struct timespec ts;
1453
1454 getnanotime(&ts);
1455 (void)rumpfs_update(RUMPFS_MODIFY, vp, &ts, &ts, &ts);
1456
1457 if (ap->a_ioflag & IO_APPEND)
1458 uio->uio_offset = vp->v_size;
1459
1460 /* consult et? */
1461 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1462 return etwrite(rn, uio);
1463
1464 /*
1465 * Otherwise, it's a case of ubcmove.
1466 */
1467
1468 /*
1469 * First, make sure we have enough storage.
1470 *
1471 * No, you don't need to tell me it's not very efficient.
1472 * No, it doesn't really support sparse files, just fakes it.
1473 */
1474 newlen = uio->uio_offset + uio->uio_resid;
1475 oldlen = 0; /* XXXgcc */
1476 olddata = NULL;
1477 if (rn->rn_dlen < newlen) {
1478 oldlen = rn->rn_dlen;
1479 olddata = rn->rn_data;
1480
1481 rn->rn_data = rump_hypermalloc(newlen, 0, false, "rumpfs");
1482 if (rn->rn_data == NULL)
1483 return ENOSPC;
1484 rn->rn_dlen = newlen;
1485 memset(rn->rn_data, 0, newlen);
1486 if (oldlen > 0)
1487 memcpy(rn->rn_data, olddata, oldlen);
1488 allocd = true;
1489 uvm_vnp_setsize(vp, newlen);
1490 }
1491
1492 /* ok, we have enough stooorage. write */
1493 while (uio->uio_resid > 0) {
1494 chunk = MIN(uio->uio_resid, (off_t)rn->rn_dlen-uio->uio_offset);
1495 if (chunk == 0)
1496 break;
1497 error = ubc_uiomove(&vp->v_uobj, uio, chunk, advice,
1498 UBC_WRITE | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
1499 if (error)
1500 break;
1501 }
1502
1503 if (allocd) {
1504 if (error) {
1505 rump_hyperfree(rn->rn_data, newlen);
1506 rn->rn_data = olddata;
1507 rn->rn_dlen = oldlen;
1508 uvm_vnp_setsize(vp, oldlen);
1509 } else {
1510 if ((rn->rn_flags & RUMPNODE_EXTSTORAGE) == 0) {
1511 rump_hyperfree(olddata, oldlen);
1512 } else {
1513 rn->rn_flags &= ~RUMPNODE_EXTSTORAGE;
1514 }
1515 }
1516 }
1517
1518 return error;
1519 }
1520
1521 static int
1522 rump_vop_bmap(void *v)
1523 {
1524 struct vop_bmap_args /* {
1525 struct vnode *a_vp;
1526 daddr_t a_bn;
1527 struct vnode **a_vpp;
1528 daddr_t *a_bnp;
1529 int *a_runp;
1530 } */ *ap = v;
1531
1532 /* 1:1 mapping */
1533 if (ap->a_vpp)
1534 *ap->a_vpp = ap->a_vp;
1535 if (ap->a_bnp)
1536 *ap->a_bnp = ap->a_bn;
1537 if (ap->a_runp)
1538 *ap->a_runp = 16;
1539
1540 return 0;
1541 }
1542
1543 static int
1544 rump_vop_strategy(void *v)
1545 {
1546 struct vop_strategy_args /* {
1547 struct vnode *a_vp;
1548 struct buf *a_bp;
1549 } */ *ap = v;
1550 struct vnode *vp = ap->a_vp;
1551 struct rumpfs_node *rn = vp->v_data;
1552 struct buf *bp = ap->a_bp;
1553 off_t copylen, copyoff;
1554 int error;
1555
1556 if (vp->v_type != VREG || rn->rn_flags & RUMPNODE_ET_PHONE_HOST) {
1557 error = EINVAL;
1558 goto out;
1559 }
1560
1561 copyoff = bp->b_blkno << DEV_BSHIFT;
1562 copylen = MIN(rn->rn_dlen - copyoff, bp->b_bcount);
1563 if (BUF_ISWRITE(bp)) {
1564 memcpy((uint8_t *)rn->rn_data + copyoff, bp->b_data, copylen);
1565 } else {
1566 memset((uint8_t*)bp->b_data + copylen, 0, bp->b_bcount-copylen);
1567 memcpy(bp->b_data, (uint8_t *)rn->rn_data + copyoff, copylen);
1568 }
1569 bp->b_resid = 0;
1570 error = 0;
1571
1572 out:
1573 bp->b_error = error;
1574 biodone(bp);
1575 return 0;
1576 }
1577
1578 static int
1579 rump_vop_pathconf(void *v)
1580 {
1581 struct vop_pathconf_args /* {
1582 struct vnode *a_vp;
1583 int a_name;
1584 register_t *a_retval;
1585 }; */ *ap = v;
1586 int name = ap->a_name;
1587 register_t *retval = ap->a_retval;
1588
1589 switch (name) {
1590 case _PC_LINK_MAX:
1591 *retval = LINK_MAX;
1592 return 0;
1593 case _PC_NAME_MAX:
1594 *retval = RUMPFS_MAXNAMLEN;
1595 return 0;
1596 case _PC_PATH_MAX:
1597 *retval = PATH_MAX;
1598 return 0;
1599 case _PC_PIPE_BUF:
1600 *retval = PIPE_BUF;
1601 return 0;
1602 case _PC_CHOWN_RESTRICTED:
1603 *retval = 1;
1604 return 0;
1605 case _PC_NO_TRUNC:
1606 *retval = 1;
1607 return 0;
1608 case _PC_SYNC_IO:
1609 *retval = 1;
1610 return 0;
1611 case _PC_FILESIZEBITS:
1612 *retval = 43; /* this one goes to 11 */
1613 return 0;
1614 case _PC_SYMLINK_MAX:
1615 *retval = MAXPATHLEN;
1616 return 0;
1617 case _PC_2_SYMLINKS:
1618 *retval = 1;
1619 return 0;
1620 default:
1621 return EINVAL;
1622 }
1623 }
1624
1625 static int
1626 rump_vop_success(void *v)
1627 {
1628
1629 return 0;
1630 }
1631
1632 static int
1633 rump_vop_inactive(void *v)
1634 {
1635 struct vop_inactive_v2_args /* {
1636 struct vnode *a_vp;
1637 bool *a_recycle;
1638 } */ *ap = v;
1639 struct vnode *vp = ap->a_vp;
1640 struct rumpfs_node *rn = vp->v_data;
1641
1642 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST && vp->v_type == VREG) {
1643 if (rn->rn_readfd != -1) {
1644 rumpuser_close(rn->rn_readfd);
1645 rn->rn_readfd = -1;
1646 }
1647 if (rn->rn_writefd != -1) {
1648 rumpuser_close(rn->rn_writefd);
1649 rn->rn_writefd = -1;
1650 }
1651 }
1652 *ap->a_recycle = (rn->rn_flags & RUMPNODE_CANRECLAIM) ? true : false;
1653
1654 return 0;
1655 }
1656
1657 static int
1658 rump_vop_reclaim(void *v)
1659 {
1660 struct vop_reclaim_v2_args /* {
1661 struct vnode *a_vp;
1662 } */ *ap = v;
1663 struct vnode *vp = ap->a_vp;
1664 struct rumpfs_node *rn = vp->v_data;
1665
1666 VOP_UNLOCK(vp);
1667
1668 mutex_enter(&reclock);
1669 rn->rn_vp = NULL;
1670 mutex_exit(&reclock);
1671 genfs_node_destroy(vp);
1672 vp->v_data = NULL;
1673
1674 if (rn->rn_flags & RUMPNODE_CANRECLAIM) {
1675 if (vp->v_type == VREG
1676 && (rn->rn_flags & RUMPNODE_ET_PHONE_HOST) == 0
1677 && rn->rn_data) {
1678 if ((rn->rn_flags & RUMPNODE_EXTSTORAGE) == 0) {
1679 rump_hyperfree(rn->rn_data, rn->rn_dlen);
1680 } else {
1681 rn->rn_flags &= ~RUMPNODE_EXTSTORAGE;
1682 }
1683 rn->rn_data = NULL;
1684 }
1685
1686 if (vp->v_type == VLNK)
1687 PNBUF_PUT(rn->rn_linktarg);
1688 if (rn->rn_hostpath)
1689 free(rn->rn_hostpath, M_TEMP);
1690 freeprivate(rn);
1691 }
1692
1693 return 0;
1694 }
1695
1696 static int
1697 rump_vop_spec(void *v)
1698 {
1699 struct vop_generic_args *ap = v;
1700 int (**opvec)(void *);
1701
1702 switch (ap->a_desc->vdesc_offset) {
1703 case VOP_ACCESS_DESCOFFSET:
1704 case VOP_GETATTR_DESCOFFSET:
1705 case VOP_SETATTR_DESCOFFSET:
1706 case VOP_LOCK_DESCOFFSET:
1707 case VOP_UNLOCK_DESCOFFSET:
1708 case VOP_ISLOCKED_DESCOFFSET:
1709 case VOP_INACTIVE_DESCOFFSET:
1710 case VOP_RECLAIM_DESCOFFSET:
1711 opvec = rump_vnodeop_p;
1712 break;
1713 default:
1714 opvec = spec_vnodeop_p;
1715 break;
1716 }
1717
1718 return VOCALL(opvec, ap->a_desc->vdesc_offset, v);
1719 }
1720
1721 static int
1722 rump_vop_advlock(void *v)
1723 {
1724 struct vop_advlock_args /* {
1725 const struct vnodeop_desc *a_desc;
1726 struct vnode *a_vp;
1727 void *a_id;
1728 int a_op;
1729 struct flock *a_fl;
1730 int a_flags;
1731 } */ *ap = v;
1732 struct vnode *vp = ap->a_vp;
1733 struct rumpfs_node *rn = vp->v_data;
1734
1735 return lf_advlock(ap, &rn->rn_lockf, vp->v_size);
1736 }
1737
1738 static int
1739 rump_vop_fcntl(void *v)
1740 {
1741 struct vop_fcntl_args /* {
1742 struct vnode *a_vp;
1743 u_int a_command;
1744 void *a_data;
1745 int a_fflag;
1746 kauth_cred_t a_cred;
1747 } */ *ap = v;
1748 struct proc *p = curproc;
1749 struct vnode *vp = ap->a_vp;
1750 struct rumpfs_node *rn = vp->v_data;
1751 u_int cmd = ap->a_command;
1752 int fflag = ap->a_fflag;
1753 struct rumpfs_extstorage *rfse = ap->a_data;
1754 int error = 0;
1755
1756 /* none of the current rumpfs fcntlops are defined for remotes */
1757 if (!RUMP_LOCALPROC_P(p))
1758 return EINVAL;
1759
1760 switch (cmd) {
1761 case RUMPFS_FCNTL_EXTSTORAGE_ADD:
1762 break;
1763 default:
1764 return EINVAL;
1765 }
1766
1767 if ((fflag & FWRITE) == 0)
1768 return EBADF;
1769
1770 if (vp->v_type != VREG || (rn->rn_flags & RUMPNODE_ET_PHONE_HOST))
1771 return EINVAL;
1772
1773 if (rfse->rfse_flags != 0)
1774 return EINVAL;
1775
1776 /*
1777 * Ok, we are good to go. Process.
1778 */
1779
1780 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1781
1782 KASSERT(cmd == RUMPFS_FCNTL_EXTSTORAGE_ADD);
1783 if (rn->rn_data && (rn->rn_flags & RUMPNODE_EXTSTORAGE) == 0) {
1784 rump_hyperfree(rn->rn_data, rn->rn_dlen);
1785 }
1786
1787 rn->rn_data = rfse->rfse_data;
1788 rn->rn_dlen = rfse->rfse_dlen;
1789 uvm_vnp_setsize(vp, rn->rn_dlen);
1790 rn->rn_flags |= RUMPNODE_EXTSTORAGE;
1791
1792 VOP_UNLOCK(vp);
1793
1794 return error;
1795 }
1796
1797 /*
1798 * Begin vfs-level stuff
1799 */
1800
1801 VFS_PROTOS(rumpfs);
1802 struct vfsops rumpfs_vfsops = {
1803 .vfs_name = MOUNT_RUMPFS,
1804 .vfs_min_mount_data = 0,
1805 .vfs_mount = rumpfs_mount,
1806 .vfs_start = (void *)nullop,
1807 .vfs_unmount = rumpfs_unmount,
1808 .vfs_root = rumpfs_root,
1809 .vfs_quotactl = (void *)eopnotsupp,
1810 .vfs_statvfs = genfs_statvfs,
1811 .vfs_sync = (void *)nullop,
1812 .vfs_vget = rumpfs_vget,
1813 .vfs_loadvnode = rumpfs_loadvnode,
1814 .vfs_fhtovp = (void *)eopnotsupp,
1815 .vfs_vptofh = (void *)eopnotsupp,
1816 .vfs_init = rumpfs_init,
1817 .vfs_reinit = NULL,
1818 .vfs_done = rumpfs_done,
1819 .vfs_mountroot = rumpfs_mountroot,
1820 .vfs_snapshot = (void *)eopnotsupp,
1821 .vfs_extattrctl = (void *)eopnotsupp,
1822 .vfs_suspendctl = genfs_suspendctl,
1823 .vfs_renamelock_enter = genfs_renamelock_enter,
1824 .vfs_renamelock_exit = genfs_renamelock_exit,
1825 .vfs_opv_descs = rump_opv_descs,
1826 /* vfs_refcount */
1827 /* vfs_list */
1828 };
1829
1830 static int
1831 rumpfs_mountfs(struct mount *mp)
1832 {
1833 struct rumpfs_mount *rfsmp;
1834 struct rumpfs_node *rn;
1835 int error;
1836
1837 rfsmp = kmem_alloc(sizeof(*rfsmp), KM_SLEEP);
1838
1839 rn = makeprivate(VDIR, RUMPFS_DEFAULTMODE, NODEV, DEV_BSIZE, false);
1840 rn->rn_parent = rn;
1841 if ((error = vcache_get(mp, &rn, sizeof(rn), &rfsmp->rfsmp_rvp))
1842 != 0) {
1843 freeprivate(rn);
1844 kmem_free(rfsmp, sizeof(*rfsmp));
1845 return error;
1846 }
1847
1848 rfsmp->rfsmp_rvp->v_vflag |= VV_ROOT;
1849
1850 mp->mnt_data = rfsmp;
1851 mp->mnt_stat.f_namemax = RUMPFS_MAXNAMLEN;
1852 mp->mnt_stat.f_iosize = 512;
1853 mp->mnt_flag |= MNT_LOCAL;
1854 mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO;
1855 mp->mnt_fs_bshift = DEV_BSHIFT;
1856 vfs_getnewfsid(mp);
1857
1858 return 0;
1859 }
1860
1861 int
1862 rumpfs_mount(struct mount *mp, const char *mntpath, void *arg, size_t *alen)
1863 {
1864 int error, flags;
1865
1866 if (mp->mnt_flag & MNT_GETARGS) {
1867 return 0;
1868 }
1869 if (mp->mnt_flag & MNT_UPDATE) {
1870 if ((mp->mnt_iflag & IMNT_WANTRDONLY)) {
1871 /* Changing from read/write to read-only. */
1872 flags = WRITECLOSE;
1873 if ((mp->mnt_flag & MNT_FORCE))
1874 flags |= FORCECLOSE;
1875 error = vflush(mp, NULL, flags);
1876 if (error)
1877 return error;
1878 }
1879 return 0;
1880 }
1881
1882 error = set_statvfs_info(mntpath, UIO_USERSPACE, "rumpfs", UIO_SYSSPACE,
1883 mp->mnt_op->vfs_name, mp, curlwp);
1884 if (error)
1885 return error;
1886
1887 return rumpfs_mountfs(mp);
1888 }
1889
1890 int
1891 rumpfs_unmount(struct mount *mp, int mntflags)
1892 {
1893 struct rumpfs_mount *rfsmp = mp->mnt_data;
1894 int flags = 0, error;
1895
1896 if (panicstr || mntflags & MNT_FORCE)
1897 flags |= FORCECLOSE;
1898
1899 if (vrefcnt(rfsmp->rfsmp_rvp) > 1 && (flags & FORCECLOSE) == 0)
1900 return EBUSY;
1901
1902 if ((error = vflush(mp, rfsmp->rfsmp_rvp, flags)) != 0)
1903 return error;
1904 vgone(rfsmp->rfsmp_rvp);
1905
1906 kmem_free(rfsmp, sizeof(*rfsmp));
1907
1908 return 0;
1909 }
1910
1911 int
1912 rumpfs_root(struct mount *mp, int lktype, struct vnode **vpp)
1913 {
1914 struct rumpfs_mount *rfsmp = mp->mnt_data;
1915
1916 vref(rfsmp->rfsmp_rvp);
1917 vn_lock(rfsmp->rfsmp_rvp, lktype | LK_RETRY);
1918 *vpp = rfsmp->rfsmp_rvp;
1919 return 0;
1920 }
1921
1922 int
1923 rumpfs_vget(struct mount *mp, ino_t ino, int lktype, struct vnode **vpp)
1924 {
1925
1926 return EOPNOTSUPP;
1927 }
1928
1929 int
1930 rumpfs_loadvnode(struct mount *mp, struct vnode *vp,
1931 const void *key, size_t key_len, const void **new_key)
1932 {
1933 struct rumpfs_node *rn;
1934 struct vattr *va;
1935
1936 KASSERT(!mutex_owned(&reclock));
1937
1938 KASSERT(key_len == sizeof(rn));
1939 memcpy(&rn, key, key_len);
1940
1941 va = &rn->rn_va;
1942
1943 vp->v_tag = VT_RUMP;
1944 vp->v_type = va->va_type;
1945 switch (vp->v_type) {
1946 case VCHR:
1947 case VBLK:
1948 vp->v_op = rump_specop_p;
1949 spec_node_init(vp, va->va_rdev);
1950 break;
1951 default:
1952 vp->v_op = rump_vnodeop_p;
1953 break;
1954 }
1955 vp->v_size = vp->v_writesize = va->va_size;
1956 vp->v_data = rn;
1957
1958 genfs_node_init(vp, &rumpfs_genfsops);
1959 mutex_enter(&reclock);
1960 rn->rn_vp = vp;
1961 mutex_exit(&reclock);
1962
1963 *new_key = &vp->v_data;
1964
1965 return 0;
1966 }
1967
1968 void
1969 rumpfs_init()
1970 {
1971 extern rump_etfs_register_withsize_fn rump__etfs_register;
1972 extern rump_etfs_remove_fn rump__etfs_remove;
1973 extern struct rump_boot_etfs *ebstart;
1974 struct rump_boot_etfs *eb;
1975
1976 CTASSERT(RUMP_ETFS_SIZE_ENDOFF == RUMPBLK_SIZENOTSET);
1977
1978 mutex_init(&reclock, MUTEX_DEFAULT, IPL_NONE);
1979 mutex_init(&etfs_lock, MUTEX_DEFAULT, IPL_NONE);
1980
1981 rump__etfs_register = etfsregister;
1982 rump__etfs_remove = etfsremove;
1983
1984 for (eb = ebstart; eb; eb = eb->_eb_next) {
1985 eb->eb_status = etfsregister(eb->eb_key, eb->eb_hostpath,
1986 eb->eb_type, eb->eb_begin, eb->eb_size);
1987 }
1988 }
1989
1990 void
1991 rumpfs_done()
1992 {
1993
1994 mutex_destroy(&reclock);
1995 mutex_destroy(&etfs_lock);
1996 }
1997
1998 int
1999 rumpfs_mountroot()
2000 {
2001 struct mount *mp;
2002 int error;
2003
2004 if ((error = vfs_rootmountalloc(MOUNT_RUMPFS, "rootdev", &mp)) != 0) {
2005 vrele(rootvp);
2006 return error;
2007 }
2008
2009 if ((error = rumpfs_mountfs(mp)) != 0)
2010 panic("mounting rootfs failed: %d", error);
2011
2012 mountlist_append(mp);
2013
2014 error = set_statvfs_info("/", UIO_SYSSPACE, "rumpfs", UIO_SYSSPACE,
2015 mp->mnt_op->vfs_name, mp, curlwp);
2016 if (error)
2017 panic("set_statvfs_info failed for rootfs: %d", error);
2018
2019 mp->mnt_flag &= ~MNT_RDONLY;
2020 vfs_unbusy(mp);
2021
2022 return 0;
2023 }
2024