rumpfs.c revision 1.106 1 /* $NetBSD: rumpfs.c,v 1.106 2012/01/31 19:00:03 njoly Exp $ */
2
3 /*
4 * Copyright (c) 2009, 2010, 2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: rumpfs.c,v 1.106 2012/01/31 19:00:03 njoly Exp $");
30
31 #include <sys/param.h>
32 #include <sys/atomic.h>
33 #include <sys/buf.h>
34 #include <sys/dirent.h>
35 #include <sys/errno.h>
36 #include <sys/filedesc.h>
37 #include <sys/fcntl.h>
38 #include <sys/kauth.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/lock.h>
44 #include <sys/lockf.h>
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 #include <sys/syscallargs.h>
48 #include <sys/vnode.h>
49 #include <sys/unistd.h>
50
51 #include <miscfs/fifofs/fifo.h>
52 #include <miscfs/specfs/specdev.h>
53 #include <miscfs/genfs/genfs.h>
54 #include <miscfs/genfs/genfs_node.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <rump/rumpuser.h>
59
60 #include "rump_private.h"
61 #include "rump_vfs_private.h"
62
63 static int rump_vop_lookup(void *);
64 static int rump_vop_getattr(void *);
65 static int rump_vop_setattr(void *);
66 static int rump_vop_mkdir(void *);
67 static int rump_vop_rmdir(void *);
68 static int rump_vop_remove(void *);
69 static int rump_vop_mknod(void *);
70 static int rump_vop_create(void *);
71 static int rump_vop_inactive(void *);
72 static int rump_vop_reclaim(void *);
73 static int rump_vop_success(void *);
74 static int rump_vop_readdir(void *);
75 static int rump_vop_spec(void *);
76 static int rump_vop_read(void *);
77 static int rump_vop_write(void *);
78 static int rump_vop_open(void *);
79 static int rump_vop_symlink(void *);
80 static int rump_vop_readlink(void *);
81 static int rump_vop_whiteout(void *);
82 static int rump_vop_pathconf(void *);
83 static int rump_vop_bmap(void *);
84 static int rump_vop_strategy(void *);
85 static int rump_vop_advlock(void *);
86 static int rump_vop_access(void *);
87
88 int (**fifo_vnodeop_p)(void *);
89 const struct vnodeopv_entry_desc fifo_vnodeop_entries[] = {
90 { &vop_default_desc, vn_default_error },
91 { NULL, NULL }
92 };
93 const struct vnodeopv_desc fifo_vnodeop_opv_desc =
94 { &fifo_vnodeop_p, fifo_vnodeop_entries };
95
96 int (**rump_vnodeop_p)(void *);
97 const struct vnodeopv_entry_desc rump_vnodeop_entries[] = {
98 { &vop_default_desc, vn_default_error },
99 { &vop_lookup_desc, rump_vop_lookup },
100 { &vop_getattr_desc, rump_vop_getattr },
101 { &vop_setattr_desc, rump_vop_setattr },
102 { &vop_mkdir_desc, rump_vop_mkdir },
103 { &vop_rmdir_desc, rump_vop_rmdir },
104 { &vop_remove_desc, rump_vop_remove },
105 { &vop_mknod_desc, rump_vop_mknod },
106 { &vop_create_desc, rump_vop_create },
107 { &vop_symlink_desc, rump_vop_symlink },
108 { &vop_readlink_desc, rump_vop_readlink },
109 { &vop_access_desc, rump_vop_access },
110 { &vop_readdir_desc, rump_vop_readdir },
111 { &vop_read_desc, rump_vop_read },
112 { &vop_write_desc, rump_vop_write },
113 { &vop_open_desc, rump_vop_open },
114 { &vop_close_desc, genfs_nullop },
115 { &vop_seek_desc, genfs_seek },
116 { &vop_getpages_desc, genfs_getpages },
117 { &vop_putpages_desc, genfs_putpages },
118 { &vop_whiteout_desc, rump_vop_whiteout },
119 { &vop_fsync_desc, rump_vop_success },
120 { &vop_lock_desc, genfs_lock },
121 { &vop_unlock_desc, genfs_unlock },
122 { &vop_islocked_desc, genfs_islocked },
123 { &vop_inactive_desc, rump_vop_inactive },
124 { &vop_reclaim_desc, rump_vop_reclaim },
125 { &vop_link_desc, genfs_eopnotsupp },
126 { &vop_pathconf_desc, rump_vop_pathconf },
127 { &vop_bmap_desc, rump_vop_bmap },
128 { &vop_strategy_desc, rump_vop_strategy },
129 { &vop_advlock_desc, rump_vop_advlock },
130 { NULL, NULL }
131 };
132 const struct vnodeopv_desc rump_vnodeop_opv_desc =
133 { &rump_vnodeop_p, rump_vnodeop_entries };
134
135 int (**rump_specop_p)(void *);
136 const struct vnodeopv_entry_desc rump_specop_entries[] = {
137 { &vop_default_desc, rump_vop_spec },
138 { NULL, NULL }
139 };
140 const struct vnodeopv_desc rump_specop_opv_desc =
141 { &rump_specop_p, rump_specop_entries };
142
143 const struct vnodeopv_desc * const rump_opv_descs[] = {
144 &rump_vnodeop_opv_desc,
145 &rump_specop_opv_desc,
146 NULL
147 };
148
149 #define RUMPFS_WHITEOUT ((void *)-1)
150 #define RDENT_ISWHITEOUT(rdp) (rdp->rd_node == RUMPFS_WHITEOUT)
151 struct rumpfs_dent {
152 char *rd_name;
153 int rd_namelen;
154 struct rumpfs_node *rd_node;
155
156 LIST_ENTRY(rumpfs_dent) rd_entries;
157 };
158
159 struct genfs_ops rumpfs_genfsops = {
160 .gop_size = genfs_size,
161 .gop_write = genfs_gop_write,
162
163 /* optional */
164 .gop_alloc = NULL,
165 .gop_markupdate = NULL,
166 };
167
168 struct rumpfs_node {
169 struct genfs_node rn_gn;
170 struct vattr rn_va;
171 struct vnode *rn_vp;
172 char *rn_hostpath;
173 int rn_flags;
174 struct lockf *rn_lockf;
175
176 union {
177 struct { /* VREG */
178 int readfd;
179 int writefd;
180 uint64_t offset;
181 } reg;
182 struct {
183 void *data;
184 size_t dlen;
185 } reg_noet;
186 struct { /* VDIR */
187 LIST_HEAD(, rumpfs_dent) dents;
188 struct rumpfs_node *parent;
189 int flags;
190 } dir;
191 struct {
192 char *target;
193 size_t len;
194 } link;
195 } rn_u;
196 };
197 #define rn_readfd rn_u.reg.readfd
198 #define rn_writefd rn_u.reg.writefd
199 #define rn_offset rn_u.reg.offset
200 #define rn_data rn_u.reg_noet.data
201 #define rn_dlen rn_u.reg_noet.dlen
202 #define rn_dir rn_u.dir.dents
203 #define rn_parent rn_u.dir.parent
204 #define rn_linktarg rn_u.link.target
205 #define rn_linklen rn_u.link.len
206
207 #define RUMPNODE_CANRECLAIM 0x01
208 #define RUMPNODE_DIR_ET 0x02
209 #define RUMPNODE_DIR_ETSUBS 0x04
210 #define RUMPNODE_ET_PHONE_HOST 0x10
211
212 struct rumpfs_mount {
213 struct vnode *rfsmp_rvp;
214 };
215
216 #define INO_WHITEOUT 1
217 static int lastino = 2;
218 static kmutex_t reclock;
219
220 static void freedir(struct rumpfs_node *, struct componentname *);
221 static struct rumpfs_node *makeprivate(enum vtype, dev_t, off_t, bool);
222
223 /*
224 * Extra Terrestrial stuff. We map a given key (pathname) to a file on
225 * the host FS. ET phones home only from the root node of rumpfs.
226 *
227 * When an etfs node is removed, a vnode potentially behind it is not
228 * immediately recycled.
229 */
230
231 struct etfs {
232 char et_key[MAXPATHLEN];
233 size_t et_keylen;
234 bool et_prefixkey;
235 bool et_removing;
236 devminor_t et_blkmin;
237
238 LIST_ENTRY(etfs) et_entries;
239
240 struct rumpfs_node *et_rn;
241 };
242 static kmutex_t etfs_lock;
243 static LIST_HEAD(, etfs) etfs_list = LIST_HEAD_INITIALIZER(etfs_list);
244
245 static enum vtype
246 ettype_to_vtype(enum rump_etfs_type et)
247 {
248 enum vtype vt;
249
250 switch (et) {
251 case RUMP_ETFS_REG:
252 vt = VREG;
253 break;
254 case RUMP_ETFS_BLK:
255 vt = VBLK;
256 break;
257 case RUMP_ETFS_CHR:
258 vt = VCHR;
259 break;
260 case RUMP_ETFS_DIR:
261 vt = VDIR;
262 break;
263 case RUMP_ETFS_DIR_SUBDIRS:
264 vt = VDIR;
265 break;
266 default:
267 panic("invalid et type: %d", et);
268 }
269
270 return vt;
271 }
272
273 static enum vtype
274 hft_to_vtype(int hft)
275 {
276 enum vtype vt;
277
278 switch (hft) {
279 case RUMPUSER_FT_OTHER:
280 vt = VNON;
281 break;
282 case RUMPUSER_FT_DIR:
283 vt = VDIR;
284 break;
285 case RUMPUSER_FT_REG:
286 vt = VREG;
287 break;
288 case RUMPUSER_FT_BLK:
289 vt = VBLK;
290 break;
291 case RUMPUSER_FT_CHR:
292 vt = VCHR;
293 break;
294 default:
295 vt = VNON;
296 break;
297 }
298
299 return vt;
300 }
301
302 static bool
303 etfs_find(const char *key, struct etfs **etp, bool forceprefix)
304 {
305 struct etfs *et;
306 size_t keylen = strlen(key);
307
308 KASSERT(mutex_owned(&etfs_lock));
309
310 LIST_FOREACH(et, &etfs_list, et_entries) {
311 if ((keylen == et->et_keylen || et->et_prefixkey || forceprefix)
312 && strncmp(key, et->et_key, et->et_keylen) == 0) {
313 if (etp)
314 *etp = et;
315 return true;
316 }
317 }
318
319 return false;
320 }
321
322 #define REGDIR(ftype) \
323 ((ftype) == RUMP_ETFS_DIR || (ftype) == RUMP_ETFS_DIR_SUBDIRS)
324 static int
325 doregister(const char *key, const char *hostpath,
326 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
327 {
328 char buf[9];
329 struct etfs *et;
330 struct rumpfs_node *rn;
331 uint64_t fsize;
332 dev_t rdev = NODEV;
333 devminor_t dmin = -1;
334 int hft, error;
335
336 if (key[0] != '/') {
337 return EINVAL;
338 }
339 while (key[0] == '/') {
340 key++;
341 }
342
343 if (rumpuser_getfileinfo(hostpath, &fsize, &hft, &error))
344 return error;
345
346 /* etfs directory requires a directory on the host */
347 if (REGDIR(ftype)) {
348 if (hft != RUMPUSER_FT_DIR)
349 return ENOTDIR;
350 if (begin != 0)
351 return EISDIR;
352 if (size != RUMP_ETFS_SIZE_ENDOFF)
353 return EISDIR;
354 size = fsize;
355 } else {
356 if (begin > fsize)
357 return EINVAL;
358 if (size == RUMP_ETFS_SIZE_ENDOFF)
359 size = fsize - begin;
360 if (begin + size > fsize)
361 return EINVAL;
362 }
363
364 if (ftype == RUMP_ETFS_BLK || ftype == RUMP_ETFS_CHR) {
365 error = rumpblk_register(hostpath, &dmin, begin, size);
366 if (error != 0) {
367 return error;
368 }
369 rdev = makedev(RUMPBLK_DEVMAJOR, dmin);
370 }
371
372 et = kmem_alloc(sizeof(*et), KM_SLEEP);
373 strcpy(et->et_key, key);
374 et->et_keylen = strlen(et->et_key);
375 et->et_rn = rn = makeprivate(ettype_to_vtype(ftype), rdev, size, true);
376 et->et_removing = false;
377 et->et_blkmin = dmin;
378
379 rn->rn_flags |= RUMPNODE_ET_PHONE_HOST;
380
381 if (ftype == RUMP_ETFS_REG || REGDIR(ftype) || et->et_blkmin != -1) {
382 size_t len = strlen(hostpath)+1;
383
384 rn->rn_hostpath = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
385 memcpy(rn->rn_hostpath, hostpath, len);
386 rn->rn_offset = begin;
387 }
388
389 if (REGDIR(ftype)) {
390 rn->rn_flags |= RUMPNODE_DIR_ET;
391 et->et_prefixkey = true;
392 } else {
393 et->et_prefixkey = false;
394 }
395
396 if (ftype == RUMP_ETFS_DIR_SUBDIRS)
397 rn->rn_flags |= RUMPNODE_DIR_ETSUBS;
398
399 mutex_enter(&etfs_lock);
400 if (etfs_find(key, NULL, REGDIR(ftype))) {
401 mutex_exit(&etfs_lock);
402 if (et->et_blkmin != -1)
403 rumpblk_deregister(hostpath);
404 if (et->et_rn->rn_hostpath != NULL)
405 free(et->et_rn->rn_hostpath, M_TEMP);
406 kmem_free(et->et_rn, sizeof(*et->et_rn));
407 kmem_free(et, sizeof(*et));
408 return EEXIST;
409 }
410 LIST_INSERT_HEAD(&etfs_list, et, et_entries);
411 mutex_exit(&etfs_lock);
412
413 if (ftype == RUMP_ETFS_BLK) {
414 format_bytes(buf, sizeof(buf), size);
415 aprint_verbose("/%s: hostpath %s (%s)\n", key, hostpath, buf);
416 }
417
418 return 0;
419 }
420 #undef REGDIR
421
422 int
423 rump_etfs_register(const char *key, const char *hostpath,
424 enum rump_etfs_type ftype)
425 {
426
427 return doregister(key, hostpath, ftype, 0, RUMP_ETFS_SIZE_ENDOFF);
428 }
429
430 int
431 rump_etfs_register_withsize(const char *key, const char *hostpath,
432 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
433 {
434
435 return doregister(key, hostpath, ftype, begin, size);
436 }
437
438 /* remove etfs mapping. caller's responsibility to make sure it's not in use */
439 int
440 rump_etfs_remove(const char *key)
441 {
442 struct etfs *et;
443 size_t keylen;
444 int rv;
445
446 if (key[0] != '/') {
447 return EINVAL;
448 }
449 while (key[0] == '/') {
450 key++;
451 }
452
453 keylen = strlen(key);
454
455 mutex_enter(&etfs_lock);
456 LIST_FOREACH(et, &etfs_list, et_entries) {
457 if (keylen == et->et_keylen && strcmp(et->et_key, key) == 0) {
458 if (et->et_removing)
459 et = NULL;
460 else
461 et->et_removing = true;
462 break;
463 }
464 }
465 mutex_exit(&etfs_lock);
466 if (!et)
467 return ENOENT;
468
469 /*
470 * ok, we know what we want to remove and have signalled there
471 * actually are men at work. first, unregister from rumpblk
472 */
473 if (et->et_blkmin != -1) {
474 rv = rumpblk_deregister(et->et_rn->rn_hostpath);
475 } else {
476 rv = 0;
477 }
478 KASSERT(rv == 0);
479
480 /* then do the actual removal */
481 mutex_enter(&etfs_lock);
482 LIST_REMOVE(et, et_entries);
483 mutex_exit(&etfs_lock);
484
485 /* node is unreachable, safe to nuke all device copies */
486 if (et->et_blkmin != -1) {
487 vdevgone(RUMPBLK_DEVMAJOR, et->et_blkmin, et->et_blkmin, VBLK);
488 } else {
489 struct vnode *vp;
490
491 mutex_enter(&reclock);
492 if ((vp = et->et_rn->rn_vp) != NULL)
493 mutex_enter(vp->v_interlock);
494 mutex_exit(&reclock);
495 if (vp && vget(vp, 0) == 0)
496 vgone(vp);
497 }
498
499 if (et->et_rn->rn_hostpath != NULL)
500 free(et->et_rn->rn_hostpath, M_TEMP);
501 kmem_free(et->et_rn, sizeof(*et->et_rn));
502 kmem_free(et, sizeof(*et));
503
504 return 0;
505 }
506
507 /*
508 * rumpfs
509 */
510
511 static struct rumpfs_node *
512 makeprivate(enum vtype vt, dev_t rdev, off_t size, bool et)
513 {
514 struct rumpfs_node *rn;
515 struct vattr *va;
516 struct timespec ts;
517
518 rn = kmem_zalloc(sizeof(*rn), KM_SLEEP);
519
520 switch (vt) {
521 case VDIR:
522 LIST_INIT(&rn->rn_dir);
523 break;
524 case VREG:
525 if (et) {
526 rn->rn_readfd = -1;
527 rn->rn_writefd = -1;
528 }
529 break;
530 default:
531 break;
532 }
533
534 nanotime(&ts);
535
536 va = &rn->rn_va;
537 va->va_type = vt;
538 va->va_mode = 0755;
539 if (vt == VDIR)
540 va->va_nlink = 2;
541 else
542 va->va_nlink = 1;
543 va->va_uid = 0;
544 va->va_gid = 0;
545 va->va_fsid =
546 va->va_fileid = atomic_inc_uint_nv(&lastino);
547 va->va_size = size;
548 va->va_blocksize = 512;
549 va->va_atime = ts;
550 va->va_mtime = ts;
551 va->va_ctime = ts;
552 va->va_birthtime = ts;
553 va->va_gen = 0;
554 va->va_flags = 0;
555 va->va_rdev = rdev;
556 va->va_bytes = 512;
557 va->va_filerev = 0;
558 va->va_vaflags = 0;
559
560 return rn;
561 }
562
563 static int
564 makevnode(struct mount *mp, struct rumpfs_node *rn, struct vnode **vpp)
565 {
566 struct vnode *vp;
567 int (**vpops)(void *);
568 struct vattr *va = &rn->rn_va;
569 int rv;
570
571 KASSERT(!mutex_owned(&reclock));
572
573 if (va->va_type == VCHR || va->va_type == VBLK) {
574 vpops = rump_specop_p;
575 } else {
576 vpops = rump_vnodeop_p;
577 }
578
579 rv = getnewvnode(VT_RUMP, mp, vpops, NULL, &vp);
580 if (rv)
581 return rv;
582
583 vp->v_size = vp->v_writesize = va->va_size;
584 vp->v_type = va->va_type;
585
586 if (vpops == rump_specop_p) {
587 spec_node_init(vp, va->va_rdev);
588 }
589 vp->v_data = rn;
590
591 genfs_node_init(vp, &rumpfs_genfsops);
592 vn_lock(vp, LK_RETRY | LK_EXCLUSIVE);
593 mutex_enter(&reclock);
594 rn->rn_vp = vp;
595 mutex_exit(&reclock);
596
597 *vpp = vp;
598
599 return 0;
600 }
601
602
603 static void
604 makedir(struct rumpfs_node *rnd,
605 struct componentname *cnp, struct rumpfs_node *rn)
606 {
607 struct rumpfs_dent *rdent;
608
609 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
610 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
611 rdent->rd_node = rn;
612 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
613 rdent->rd_namelen = strlen(rdent->rd_name);
614
615 if ((cnp->cn_flags & ISWHITEOUT) != 0) {
616 KASSERT((cnp->cn_flags & DOWHITEOUT) == 0);
617 freedir(rnd, cnp);
618 }
619 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
620 }
621
622 static void
623 freedir(struct rumpfs_node *rnd, struct componentname *cnp)
624 {
625 struct rumpfs_dent *rd = NULL;
626
627 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
628 if (rd->rd_namelen == cnp->cn_namelen &&
629 strncmp(rd->rd_name, cnp->cn_nameptr,
630 cnp->cn_namelen) == 0)
631 break;
632 }
633 if (rd == NULL)
634 panic("could not find directory entry: %s", cnp->cn_nameptr);
635
636 if (cnp->cn_flags & DOWHITEOUT) {
637 rd->rd_node = RUMPFS_WHITEOUT;
638 } else {
639 LIST_REMOVE(rd, rd_entries);
640 kmem_free(rd->rd_name, rd->rd_namelen+1);
641 kmem_free(rd, sizeof(*rd));
642 }
643 }
644
645 /*
646 * Simple lookup for rump file systems.
647 *
648 * uhm, this is twisted. C F C C, hope of C C F C looming
649 */
650 static int
651 rump_vop_lookup(void *v)
652 {
653 struct vop_lookup_args /* {
654 struct vnode *a_dvp;
655 struct vnode **a_vpp;
656 struct componentname *a_cnp;
657 }; */ *ap = v;
658 struct componentname *cnp = ap->a_cnp;
659 struct vnode *dvp = ap->a_dvp;
660 struct vnode **vpp = ap->a_vpp;
661 struct vnode *vp;
662 struct rumpfs_node *rnd = dvp->v_data, *rn;
663 struct rumpfs_dent *rd = NULL;
664 struct etfs *et;
665 bool dotdot = (cnp->cn_flags & ISDOTDOT) != 0;
666 int rv = 0;
667 const char *cp;
668
669 *vpp = NULL;
670
671 rv = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
672 if (rv)
673 return rv;
674
675 if ((cnp->cn_flags & ISLASTCN)
676 && (dvp->v_mount->mnt_flag & MNT_RDONLY)
677 && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
678 return EROFS;
679
680 /* check for dot, return directly if the case */
681 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
682 vref(dvp);
683 *vpp = dvp;
684 return 0;
685 }
686
687 /* we don't do rename */
688 if (!(((cnp->cn_flags & ISLASTCN) == 0) || (cnp->cn_nameiop != RENAME)))
689 return EOPNOTSUPP;
690
691 /* check for etfs */
692 if (dvp == rootvnode &&
693 (cnp->cn_nameiop == LOOKUP || cnp->cn_nameiop == CREATE)) {
694 bool found;
695 mutex_enter(&etfs_lock);
696 found = etfs_find(cnp->cn_nameptr, &et, false);
697 mutex_exit(&etfs_lock);
698
699 if (found) {
700 rn = et->et_rn;
701 cnp->cn_consume += et->et_keylen - cnp->cn_namelen;
702 /*
703 * consume trailing slashes if any and clear
704 * REQUIREDIR if we consumed the full path.
705 */
706 cp = &cnp->cn_nameptr[cnp->cn_namelen];
707 cp += cnp->cn_consume;
708 KASSERT(*cp == '\0' || *cp == '/');
709 if (*cp == '\0' && rn->rn_va.va_type != VDIR)
710 cnp->cn_flags &= ~REQUIREDIR;
711 while (*cp++ == '/')
712 cnp->cn_consume++;
713 goto getvnode;
714 }
715 }
716
717 if (rnd->rn_flags & RUMPNODE_DIR_ET) {
718 uint64_t fsize;
719 char *newpath;
720 size_t newpathlen;
721 int hft, error;
722
723 if (dotdot)
724 return EOPNOTSUPP;
725
726 newpathlen = strlen(rnd->rn_hostpath) + 1 + cnp->cn_namelen + 1;
727 newpath = malloc(newpathlen, M_TEMP, M_WAITOK);
728
729 strlcpy(newpath, rnd->rn_hostpath, newpathlen);
730 strlcat(newpath, "/", newpathlen);
731 strlcat(newpath, cnp->cn_nameptr, newpathlen);
732
733 if (rumpuser_getfileinfo(newpath, &fsize, &hft, &error)) {
734 free(newpath, M_TEMP);
735 return error;
736 }
737
738 /* allow only dirs and regular files */
739 if (hft != RUMPUSER_FT_REG && hft != RUMPUSER_FT_DIR) {
740 free(newpath, M_TEMP);
741 return ENOENT;
742 }
743
744 rn = makeprivate(hft_to_vtype(hft), NODEV, fsize, true);
745 rn->rn_flags |= RUMPNODE_CANRECLAIM;
746 if (rnd->rn_flags & RUMPNODE_DIR_ETSUBS) {
747 rn->rn_flags |= RUMPNODE_DIR_ET | RUMPNODE_DIR_ETSUBS;
748 rn->rn_flags |= RUMPNODE_ET_PHONE_HOST;
749 }
750 rn->rn_hostpath = newpath;
751
752 goto getvnode;
753 } else {
754 if (dotdot) {
755 if ((rn = rnd->rn_parent) != NULL)
756 goto getvnode;
757 } else {
758 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
759 if (rd->rd_namelen == cnp->cn_namelen &&
760 strncmp(rd->rd_name, cnp->cn_nameptr,
761 cnp->cn_namelen) == 0)
762 break;
763 }
764 }
765 }
766
767 if (!rd && ((cnp->cn_flags & ISLASTCN) == 0||cnp->cn_nameiop != CREATE))
768 return ENOENT;
769
770 if (!rd && (cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
771 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
772 return EROFS;
773 rv = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
774 if (rv)
775 return rv;
776 return EJUSTRETURN;
777 }
778
779 if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == DELETE) {
780 rv = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
781 if (rv)
782 return rv;
783 }
784
785 if (RDENT_ISWHITEOUT(rd)) {
786 cnp->cn_flags |= ISWHITEOUT;
787 if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE)
788 return EJUSTRETURN;
789 return ENOENT;
790 }
791
792 rn = rd->rd_node;
793
794 getvnode:
795 KASSERT(rn);
796 if (dotdot)
797 VOP_UNLOCK(dvp);
798 mutex_enter(&reclock);
799 if ((vp = rn->rn_vp)) {
800 mutex_enter(vp->v_interlock);
801 mutex_exit(&reclock);
802 if (vget(vp, LK_EXCLUSIVE)) {
803 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
804 goto getvnode;
805 }
806 *vpp = vp;
807 } else {
808 mutex_exit(&reclock);
809 rv = makevnode(dvp->v_mount, rn, vpp);
810 }
811 if (dotdot)
812 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
813
814 return rv;
815 }
816
817 static int
818 rump_check_possible(struct vnode *vp, struct rumpfs_node *rnode,
819 mode_t mode)
820 {
821
822 if ((mode & VWRITE) == 0)
823 return 0;
824
825 switch (vp->v_type) {
826 case VDIR:
827 case VLNK:
828 case VREG:
829 break;
830 default:
831 /* special file is always writable. */
832 return 0;
833 }
834
835 return vp->v_mount->mnt_flag & MNT_RDONLY ? EROFS : 0;
836 }
837
838 static int
839 rump_check_permitted(struct vnode *vp, struct rumpfs_node *rnode,
840 mode_t mode, kauth_cred_t cred)
841 {
842 struct vattr *attr = &rnode->rn_va;
843
844 return genfs_can_access(vp->v_type, attr->va_mode, attr->va_uid,
845 attr->va_gid, mode, cred);
846 }
847
848 int
849 rump_vop_access(void *v)
850 {
851 struct vop_access_args /* {
852 const struct vnodeop_desc *a_desc;
853 struct vnode *a_vp;
854 int a_mode;
855 kauth_cred_t a_cred;
856 } */ *ap = v;
857 struct vnode *vp = ap->a_vp;
858 struct rumpfs_node *rn = vp->v_data;
859 int error;
860
861 error = rump_check_possible(vp, rn, ap->a_mode);
862 if (error)
863 return error;
864
865 error = rump_check_permitted(vp, rn, ap->a_mode, ap->a_cred);
866
867 return error;
868 }
869
870 static int
871 rump_vop_getattr(void *v)
872 {
873 struct vop_getattr_args /* {
874 struct vnode *a_vp;
875 struct vattr *a_vap;
876 kauth_cred_t a_cred;
877 } */ *ap = v;
878 struct vnode *vp = ap->a_vp;
879 struct rumpfs_node *rn = vp->v_data;
880 struct vattr *vap = ap->a_vap;
881
882 memcpy(vap, &rn->rn_va, sizeof(struct vattr));
883 vap->va_size = vp->v_size;
884 return 0;
885 }
886
887 static int
888 rump_vop_setattr(void *v)
889 {
890 struct vop_getattr_args /* {
891 struct vnode *a_vp;
892 struct vattr *a_vap;
893 kauth_cred_t a_cred;
894 } */ *ap = v;
895 struct vnode *vp = ap->a_vp;
896 struct vattr *vap = ap->a_vap;
897 struct rumpfs_node *rn = vp->v_data;
898 struct vattr *attr = &rn->rn_va;
899 kauth_cred_t cred = ap->a_cred;
900 int error;
901
902 #define SETIFVAL(a,t) if (vap->a != (t)VNOVAL) rn->rn_va.a = vap->a
903 SETIFVAL(va_atime.tv_sec, time_t);
904 SETIFVAL(va_ctime.tv_sec, time_t);
905 SETIFVAL(va_mtime.tv_sec, time_t);
906 SETIFVAL(va_birthtime.tv_sec, time_t);
907 SETIFVAL(va_atime.tv_nsec, long);
908 SETIFVAL(va_ctime.tv_nsec, long);
909 SETIFVAL(va_mtime.tv_nsec, long);
910 SETIFVAL(va_birthtime.tv_nsec, long);
911 SETIFVAL(va_flags, u_long);
912 #undef SETIFVAL
913
914 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (uid_t)VNOVAL) {
915 uid_t uid =
916 (vap->va_uid != (uid_t)VNOVAL) ? vap->va_uid : attr->va_uid;
917 gid_t gid =
918 (vap->va_gid != (gid_t)VNOVAL) ? vap->va_gid : attr->va_gid;
919 error = kauth_authorize_vnode(cred,
920 KAUTH_VNODE_CHANGE_OWNERSHIP, vp, NULL,
921 genfs_can_chown(vp, cred, attr->va_uid, attr->va_gid, uid,
922 gid));
923 if (error)
924 return error;
925 attr->va_uid = uid;
926 attr->va_gid = gid;
927 }
928
929 if (vap->va_mode != (mode_t)VNOVAL) {
930 mode_t mode = vap->va_mode;
931 error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_SECURITY,
932 vp, NULL, genfs_can_chmod(vp, cred, attr->va_uid,
933 attr->va_gid, mode));
934 if (error)
935 return error;
936 attr->va_mode = mode;
937 }
938
939 if (vp->v_type == VREG &&
940 vap->va_size != VSIZENOTSET &&
941 vap->va_size != rn->rn_dlen) {
942 void *newdata;
943 size_t copylen, newlen;
944
945 newlen = vap->va_size;
946 newdata = rump_hypermalloc(newlen, 0, true, "rumpfs");
947
948 copylen = MIN(rn->rn_dlen, newlen);
949 memset(newdata, 0, newlen);
950 memcpy(newdata, rn->rn_data, copylen);
951 rump_hyperfree(rn->rn_data, rn->rn_dlen);
952
953 rn->rn_data = newdata;
954 rn->rn_dlen = newlen;
955 uvm_vnp_setsize(vp, newlen);
956 }
957 return 0;
958 }
959
960 static int
961 rump_vop_mkdir(void *v)
962 {
963 struct vop_mkdir_args /* {
964 struct vnode *a_dvp;
965 struct vnode **a_vpp;
966 struct componentname *a_cnp;
967 struct vattr *a_vap;
968 }; */ *ap = v;
969 struct vnode *dvp = ap->a_dvp;
970 struct vnode **vpp = ap->a_vpp;
971 struct componentname *cnp = ap->a_cnp;
972 struct rumpfs_node *rnd = dvp->v_data, *rn;
973 int rv = 0;
974
975 rn = makeprivate(VDIR, NODEV, DEV_BSIZE, false);
976 if ((cnp->cn_flags & ISWHITEOUT) != 0)
977 rn->rn_va.va_flags |= UF_OPAQUE;
978 rn->rn_parent = rnd;
979 rv = makevnode(dvp->v_mount, rn, vpp);
980 if (rv)
981 goto out;
982
983 makedir(rnd, cnp, rn);
984
985 out:
986 vput(dvp);
987 return rv;
988 }
989
990 static int
991 rump_vop_rmdir(void *v)
992 {
993 struct vop_rmdir_args /* {
994 struct vnode *a_dvp;
995 struct vnode *a_vp;
996 struct componentname *a_cnp;
997 }; */ *ap = v;
998 struct vnode *dvp = ap->a_dvp;
999 struct vnode *vp = ap->a_vp;
1000 struct componentname *cnp = ap->a_cnp;
1001 struct rumpfs_node *rnd = dvp->v_data;
1002 struct rumpfs_node *rn = vp->v_data;
1003 struct rumpfs_dent *rd;
1004 int rv = 0;
1005
1006 LIST_FOREACH(rd, &rn->rn_dir, rd_entries) {
1007 if (rd->rd_node != RUMPFS_WHITEOUT) {
1008 rv = ENOTEMPTY;
1009 goto out;
1010 }
1011 }
1012 while ((rd = LIST_FIRST(&rn->rn_dir)) != NULL) {
1013 KASSERT(rd->rd_node == RUMPFS_WHITEOUT);
1014 LIST_REMOVE(rd, rd_entries);
1015 kmem_free(rd->rd_name, rd->rd_namelen+1);
1016 kmem_free(rd, sizeof(*rd));
1017 }
1018
1019 freedir(rnd, cnp);
1020 rn->rn_flags |= RUMPNODE_CANRECLAIM;
1021 rn->rn_parent = NULL;
1022
1023 out:
1024 vput(dvp);
1025 vput(vp);
1026
1027 return rv;
1028 }
1029
1030 static int
1031 rump_vop_remove(void *v)
1032 {
1033 struct vop_rmdir_args /* {
1034 struct vnode *a_dvp;
1035 struct vnode *a_vp;
1036 struct componentname *a_cnp;
1037 }; */ *ap = v;
1038 struct vnode *dvp = ap->a_dvp;
1039 struct vnode *vp = ap->a_vp;
1040 struct componentname *cnp = ap->a_cnp;
1041 struct rumpfs_node *rnd = dvp->v_data;
1042 struct rumpfs_node *rn = vp->v_data;
1043 int rv = 0;
1044
1045 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1046 return EOPNOTSUPP;
1047
1048 if (vp->v_type == VREG) {
1049 rump_hyperfree(rn->rn_data, rn->rn_dlen);
1050 }
1051
1052 freedir(rnd, cnp);
1053 rn->rn_flags |= RUMPNODE_CANRECLAIM;
1054
1055 vput(dvp);
1056 vput(vp);
1057
1058 return rv;
1059 }
1060
1061 static int
1062 rump_vop_mknod(void *v)
1063 {
1064 struct vop_mknod_args /* {
1065 struct vnode *a_dvp;
1066 struct vnode **a_vpp;
1067 struct componentname *a_cnp;
1068 struct vattr *a_vap;
1069 }; */ *ap = v;
1070 struct vnode *dvp = ap->a_dvp;
1071 struct vnode **vpp = ap->a_vpp;
1072 struct componentname *cnp = ap->a_cnp;
1073 struct vattr *va = ap->a_vap;
1074 struct rumpfs_node *rnd = dvp->v_data, *rn;
1075 int rv;
1076
1077 rn = makeprivate(va->va_type, va->va_rdev, DEV_BSIZE, false);
1078 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1079 rn->rn_va.va_flags |= UF_OPAQUE;
1080 rv = makevnode(dvp->v_mount, rn, vpp);
1081 if (rv)
1082 goto out;
1083
1084 makedir(rnd, cnp, rn);
1085
1086 out:
1087 vput(dvp);
1088 return rv;
1089 }
1090
1091 static int
1092 rump_vop_create(void *v)
1093 {
1094 struct vop_create_args /* {
1095 struct vnode *a_dvp;
1096 struct vnode **a_vpp;
1097 struct componentname *a_cnp;
1098 struct vattr *a_vap;
1099 }; */ *ap = v;
1100 struct vnode *dvp = ap->a_dvp;
1101 struct vnode **vpp = ap->a_vpp;
1102 struct componentname *cnp = ap->a_cnp;
1103 struct vattr *va = ap->a_vap;
1104 struct rumpfs_node *rnd = dvp->v_data, *rn;
1105 off_t newsize;
1106 int rv;
1107
1108 newsize = va->va_type == VSOCK ? DEV_BSIZE : 0;
1109 rn = makeprivate(va->va_type, NODEV, newsize, false);
1110 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1111 rn->rn_va.va_flags |= UF_OPAQUE;
1112 rv = makevnode(dvp->v_mount, rn, vpp);
1113 if (rv)
1114 goto out;
1115
1116 makedir(rnd, cnp, rn);
1117
1118 out:
1119 vput(dvp);
1120 return rv;
1121 }
1122
1123 static int
1124 rump_vop_symlink(void *v)
1125 {
1126 struct vop_symlink_args /* {
1127 struct vnode *a_dvp;
1128 struct vnode **a_vpp;
1129 struct componentname *a_cnp;
1130 struct vattr *a_vap;
1131 char *a_target;
1132 }; */ *ap = v;
1133 struct vnode *dvp = ap->a_dvp;
1134 struct vnode **vpp = ap->a_vpp;
1135 struct componentname *cnp = ap->a_cnp;
1136 struct rumpfs_node *rnd = dvp->v_data, *rn;
1137 const char *target = ap->a_target;
1138 size_t linklen;
1139 int rv;
1140
1141 linklen = strlen(target);
1142 KASSERT(linklen < MAXPATHLEN);
1143 rn = makeprivate(VLNK, NODEV, linklen, false);
1144 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1145 rn->rn_va.va_flags |= UF_OPAQUE;
1146 rv = makevnode(dvp->v_mount, rn, vpp);
1147 if (rv)
1148 goto out;
1149
1150 makedir(rnd, cnp, rn);
1151
1152 KASSERT(linklen < MAXPATHLEN);
1153 rn->rn_linktarg = PNBUF_GET();
1154 rn->rn_linklen = linklen;
1155 strcpy(rn->rn_linktarg, target);
1156
1157 out:
1158 vput(dvp);
1159 return rv;
1160 }
1161
1162 static int
1163 rump_vop_readlink(void *v)
1164 {
1165 struct vop_readlink_args /* {
1166 struct vnode *a_vp;
1167 struct uio *a_uio;
1168 kauth_cred_t a_cred;
1169 }; */ *ap = v;
1170 struct vnode *vp = ap->a_vp;
1171 struct rumpfs_node *rn = vp->v_data;
1172 struct uio *uio = ap->a_uio;
1173
1174 return uiomove(rn->rn_linktarg, rn->rn_linklen, uio);
1175 }
1176
1177 static int
1178 rump_vop_whiteout(void *v)
1179 {
1180 struct vop_whiteout_args /* {
1181 struct vnode *a_dvp;
1182 struct componentname *a_cnp;
1183 int a_flags;
1184 } */ *ap = v;
1185 struct vnode *dvp = ap->a_dvp;
1186 struct rumpfs_node *rnd = dvp->v_data;
1187 struct componentname *cnp = ap->a_cnp;
1188 int flags = ap->a_flags;
1189
1190 switch (flags) {
1191 case LOOKUP:
1192 break;
1193 case CREATE:
1194 makedir(rnd, cnp, RUMPFS_WHITEOUT);
1195 break;
1196 case DELETE:
1197 cnp->cn_flags &= ~DOWHITEOUT; /* cargo culting never fails ? */
1198 freedir(rnd, cnp);
1199 break;
1200 default:
1201 panic("unknown whiteout op %d", flags);
1202 }
1203
1204 return 0;
1205 }
1206
1207 static int
1208 rump_vop_open(void *v)
1209 {
1210 struct vop_open_args /* {
1211 struct vnode *a_vp;
1212 int a_mode;
1213 kauth_cred_t a_cred;
1214 } */ *ap = v;
1215 struct vnode *vp = ap->a_vp;
1216 struct rumpfs_node *rn = vp->v_data;
1217 int mode = ap->a_mode;
1218 int error = EINVAL;
1219
1220 if (vp->v_type != VREG || (rn->rn_flags & RUMPNODE_ET_PHONE_HOST) == 0)
1221 return 0;
1222
1223 if (mode & FREAD) {
1224 if (rn->rn_readfd != -1)
1225 return 0;
1226 rn->rn_readfd = rumpuser_open(rn->rn_hostpath,
1227 O_RDONLY, &error);
1228 }
1229
1230 if (mode & FWRITE) {
1231 if (rn->rn_writefd != -1)
1232 return 0;
1233 rn->rn_writefd = rumpuser_open(rn->rn_hostpath,
1234 O_WRONLY, &error);
1235 }
1236
1237 return error;
1238 }
1239
1240 /* simple readdir. event omits dotstuff and periods */
1241 static int
1242 rump_vop_readdir(void *v)
1243 {
1244 struct vop_readdir_args /* {
1245 struct vnode *a_vp;
1246 struct uio *a_uio;
1247 kauth_cred_t a_cred;
1248 int *a_eofflag;
1249 off_t **a_cookies;
1250 int *a_ncookies;
1251 } */ *ap = v;
1252 struct vnode *vp = ap->a_vp;
1253 struct uio *uio = ap->a_uio;
1254 struct rumpfs_node *rnd = vp->v_data;
1255 struct rumpfs_dent *rdent;
1256 unsigned i;
1257 int rv = 0;
1258
1259 /* seek to current entry */
1260 for (i = 0, rdent = LIST_FIRST(&rnd->rn_dir);
1261 (i < uio->uio_offset) && rdent;
1262 i++, rdent = LIST_NEXT(rdent, rd_entries))
1263 continue;
1264 if (!rdent)
1265 goto out;
1266
1267 /* copy entries */
1268 for (; rdent && uio->uio_resid > 0;
1269 rdent = LIST_NEXT(rdent, rd_entries), i++) {
1270 struct dirent dent;
1271
1272 strlcpy(dent.d_name, rdent->rd_name, sizeof(dent.d_name));
1273 dent.d_namlen = strlen(dent.d_name);
1274 dent.d_reclen = _DIRENT_RECLEN(&dent, dent.d_namlen);
1275
1276 if (__predict_false(RDENT_ISWHITEOUT(rdent))) {
1277 dent.d_fileno = INO_WHITEOUT;
1278 dent.d_type = DT_WHT;
1279 } else {
1280 dent.d_fileno = rdent->rd_node->rn_va.va_fileid;
1281 dent.d_type = vtype2dt(rdent->rd_node->rn_va.va_type);
1282 }
1283
1284 if (uio->uio_resid < dent.d_reclen) {
1285 i--;
1286 break;
1287 }
1288
1289 rv = uiomove(&dent, dent.d_reclen, uio);
1290 if (rv) {
1291 i--;
1292 break;
1293 }
1294 }
1295
1296 out:
1297 if (ap->a_cookies) {
1298 *ap->a_ncookies = 0;
1299 *ap->a_cookies = NULL;
1300 }
1301 if (rdent)
1302 *ap->a_eofflag = 0;
1303 else
1304 *ap->a_eofflag = 1;
1305 uio->uio_offset = i;
1306
1307 return rv;
1308 }
1309
1310 static int
1311 etread(struct rumpfs_node *rn, struct uio *uio)
1312 {
1313 uint8_t *buf;
1314 size_t bufsize;
1315 ssize_t n;
1316 int error = 0;
1317
1318 bufsize = uio->uio_resid;
1319 if (bufsize == 0)
1320 return 0;
1321 buf = kmem_alloc(bufsize, KM_SLEEP);
1322 if ((n = rumpuser_pread(rn->rn_readfd, buf, bufsize,
1323 uio->uio_offset + rn->rn_offset, &error)) == -1)
1324 goto out;
1325 KASSERT(n <= bufsize);
1326 error = uiomove(buf, n, uio);
1327
1328 out:
1329 kmem_free(buf, bufsize);
1330 return error;
1331
1332 }
1333
1334 static int
1335 rump_vop_read(void *v)
1336 {
1337 struct vop_read_args /* {
1338 struct vnode *a_vp;
1339 struct uio *a_uio;
1340 int ioflags a_ioflag;
1341 kauth_cred_t a_cred;
1342 }; */ *ap = v;
1343 struct vnode *vp = ap->a_vp;
1344 struct rumpfs_node *rn = vp->v_data;
1345 struct uio *uio = ap->a_uio;
1346 const int advice = IO_ADV_DECODE(ap->a_ioflag);
1347 off_t chunk;
1348 int error = 0;
1349
1350 if (vp->v_type == VDIR)
1351 return EISDIR;
1352
1353 /* et op? */
1354 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1355 return etread(rn, uio);
1356
1357 /* otherwise, it's off to ubc with us */
1358 while (uio->uio_resid > 0) {
1359 chunk = MIN(uio->uio_resid, (off_t)rn->rn_dlen-uio->uio_offset);
1360 if (chunk == 0)
1361 break;
1362 error = ubc_uiomove(&vp->v_uobj, uio, chunk, advice,
1363 UBC_READ | UBC_PARTIALOK | UBC_UNMAP_FLAG(vp));
1364 if (error)
1365 break;
1366 }
1367
1368 return error;
1369 }
1370
1371 static int
1372 etwrite(struct rumpfs_node *rn, struct uio *uio)
1373 {
1374 uint8_t *buf;
1375 size_t bufsize;
1376 ssize_t n;
1377 int error = 0;
1378
1379 bufsize = uio->uio_resid;
1380 if (bufsize == 0)
1381 return 0;
1382 buf = kmem_alloc(bufsize, KM_SLEEP);
1383 error = uiomove(buf, bufsize, uio);
1384 if (error)
1385 goto out;
1386 KASSERT(uio->uio_resid == 0);
1387 n = rumpuser_pwrite(rn->rn_writefd, buf, bufsize,
1388 (uio->uio_offset-bufsize) + rn->rn_offset, &error);
1389 if (n >= 0) {
1390 KASSERT(n <= bufsize);
1391 uio->uio_resid = bufsize - n;
1392 }
1393
1394 out:
1395 kmem_free(buf, bufsize);
1396 return error;
1397 }
1398
1399 static int
1400 rump_vop_write(void *v)
1401 {
1402 struct vop_read_args /* {
1403 struct vnode *a_vp;
1404 struct uio *a_uio;
1405 int ioflags a_ioflag;
1406 kauth_cred_t a_cred;
1407 }; */ *ap = v;
1408 struct vnode *vp = ap->a_vp;
1409 struct rumpfs_node *rn = vp->v_data;
1410 struct uio *uio = ap->a_uio;
1411 const int advice = IO_ADV_DECODE(ap->a_ioflag);
1412 void *olddata;
1413 size_t oldlen, newlen;
1414 off_t chunk;
1415 int error = 0;
1416 bool allocd = false;
1417
1418 if (ap->a_ioflag & IO_APPEND)
1419 uio->uio_offset = vp->v_size;
1420
1421 /* consult et? */
1422 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1423 return etwrite(rn, uio);
1424
1425 /*
1426 * Otherwise, it's a case of ubcmove.
1427 */
1428
1429 /*
1430 * First, make sure we have enough storage.
1431 *
1432 * No, you don't need to tell me it's not very efficient.
1433 * No, it doesn't really support sparse files, just fakes it.
1434 */
1435 newlen = uio->uio_offset + uio->uio_resid;
1436 oldlen = 0; /* XXXgcc */
1437 olddata = NULL;
1438 if (rn->rn_dlen < newlen) {
1439 oldlen = rn->rn_dlen;
1440 olddata = rn->rn_data;
1441
1442 rn->rn_data = rump_hypermalloc(newlen, 0, true, "rumpfs");
1443 rn->rn_dlen = newlen;
1444 memset(rn->rn_data, 0, newlen);
1445 memcpy(rn->rn_data, olddata, oldlen);
1446 allocd = true;
1447 uvm_vnp_setsize(vp, newlen);
1448 }
1449
1450 /* ok, we have enough stooorage. write */
1451 while (uio->uio_resid > 0) {
1452 chunk = MIN(uio->uio_resid, (off_t)rn->rn_dlen-uio->uio_offset);
1453 if (chunk == 0)
1454 break;
1455 error = ubc_uiomove(&vp->v_uobj, uio, chunk, advice,
1456 UBC_WRITE | UBC_PARTIALOK | UBC_UNMAP_FLAG(vp));
1457 if (error)
1458 break;
1459 }
1460
1461 if (allocd) {
1462 if (error) {
1463 rump_hyperfree(rn->rn_data, newlen);
1464 rn->rn_data = olddata;
1465 rn->rn_dlen = oldlen;
1466 uvm_vnp_setsize(vp, oldlen);
1467 } else {
1468 rump_hyperfree(olddata, oldlen);
1469 }
1470 }
1471
1472 return error;
1473 }
1474
1475 static int
1476 rump_vop_bmap(void *v)
1477 {
1478 struct vop_bmap_args /* {
1479 struct vnode *a_vp;
1480 daddr_t a_bn;
1481 struct vnode **a_vpp;
1482 daddr_t *a_bnp;
1483 int *a_runp;
1484 } */ *ap = v;
1485
1486 /* 1:1 mapping */
1487 if (ap->a_vpp)
1488 *ap->a_vpp = ap->a_vp;
1489 if (ap->a_bnp)
1490 *ap->a_bnp = ap->a_bn;
1491 if (ap->a_runp)
1492 *ap->a_runp = 16;
1493
1494 return 0;
1495 }
1496
1497 static int
1498 rump_vop_strategy(void *v)
1499 {
1500 struct vop_strategy_args /* {
1501 struct vnode *a_vp;
1502 struct buf *a_bp;
1503 } */ *ap = v;
1504 struct vnode *vp = ap->a_vp;
1505 struct rumpfs_node *rn = vp->v_data;
1506 struct buf *bp = ap->a_bp;
1507 off_t copylen, copyoff;
1508 int error;
1509
1510 if (vp->v_type != VREG || rn->rn_flags & RUMPNODE_ET_PHONE_HOST) {
1511 error = EINVAL;
1512 goto out;
1513 }
1514
1515 copyoff = bp->b_blkno << DEV_BSHIFT;
1516 copylen = MIN(rn->rn_dlen - copyoff, bp->b_bcount);
1517 if (BUF_ISWRITE(bp)) {
1518 memcpy((uint8_t *)rn->rn_data + copyoff, bp->b_data, copylen);
1519 } else {
1520 memset((uint8_t*)bp->b_data + copylen, 0, bp->b_bcount-copylen);
1521 memcpy(bp->b_data, (uint8_t *)rn->rn_data + copyoff, copylen);
1522 }
1523 bp->b_resid = 0;
1524 error = 0;
1525
1526 out:
1527 bp->b_error = error;
1528 biodone(bp);
1529 return 0;
1530 }
1531
1532 static int
1533 rump_vop_pathconf(void *v)
1534 {
1535 struct vop_pathconf_args /* {
1536 struct vnode *a_vp;
1537 int a_name;
1538 register_t *a_retval;
1539 }; */ *ap = v;
1540 int name = ap->a_name;
1541 register_t *retval = ap->a_retval;
1542
1543 switch (name) {
1544 case _PC_LINK_MAX:
1545 *retval = LINK_MAX;
1546 return 0;
1547 case _PC_NAME_MAX:
1548 *retval = RUMPFS_MAXNAMLEN;
1549 return 0;
1550 case _PC_PATH_MAX:
1551 *retval = PATH_MAX;
1552 return 0;
1553 case _PC_PIPE_BUF:
1554 *retval = PIPE_BUF;
1555 return 0;
1556 case _PC_CHOWN_RESTRICTED:
1557 *retval = 1;
1558 return 0;
1559 case _PC_NO_TRUNC:
1560 *retval = 1;
1561 return 0;
1562 case _PC_SYNC_IO:
1563 *retval = 1;
1564 return 0;
1565 case _PC_FILESIZEBITS:
1566 *retval = 43; /* this one goes to 11 */
1567 return 0;
1568 case _PC_SYMLINK_MAX:
1569 *retval = MAXPATHLEN;
1570 return 0;
1571 case _PC_2_SYMLINKS:
1572 *retval = 1;
1573 return 0;
1574 default:
1575 return EINVAL;
1576 }
1577 }
1578
1579 static int
1580 rump_vop_success(void *v)
1581 {
1582
1583 return 0;
1584 }
1585
1586 static int
1587 rump_vop_inactive(void *v)
1588 {
1589 struct vop_inactive_args /* {
1590 struct vnode *a_vp;
1591 bool *a_recycle;
1592 } */ *ap = v;
1593 struct vnode *vp = ap->a_vp;
1594 struct rumpfs_node *rn = vp->v_data;
1595 int error;
1596
1597 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST && vp->v_type == VREG) {
1598 if (rn->rn_readfd != -1) {
1599 rumpuser_close(rn->rn_readfd, &error);
1600 rn->rn_readfd = -1;
1601 }
1602 if (rn->rn_writefd != -1) {
1603 rumpuser_close(rn->rn_writefd, &error);
1604 rn->rn_writefd = -1;
1605 }
1606 }
1607 *ap->a_recycle = (rn->rn_flags & RUMPNODE_CANRECLAIM) ? true : false;
1608
1609 VOP_UNLOCK(vp);
1610 return 0;
1611 }
1612
1613 static int
1614 rump_vop_reclaim(void *v)
1615 {
1616 struct vop_reclaim_args /* {
1617 struct vnode *a_vp;
1618 } */ *ap = v;
1619 struct vnode *vp = ap->a_vp;
1620 struct rumpfs_node *rn = vp->v_data;
1621
1622 mutex_enter(&reclock);
1623 rn->rn_vp = NULL;
1624 mutex_exit(&reclock);
1625 genfs_node_destroy(vp);
1626 vp->v_data = NULL;
1627
1628 if (rn->rn_flags & RUMPNODE_CANRECLAIM) {
1629 if (vp->v_type == VLNK)
1630 PNBUF_PUT(rn->rn_linktarg);
1631 if (rn->rn_hostpath)
1632 free(rn->rn_hostpath, M_TEMP);
1633 kmem_free(rn, sizeof(*rn));
1634 }
1635
1636 return 0;
1637 }
1638
1639 static int
1640 rump_vop_spec(void *v)
1641 {
1642 struct vop_generic_args *ap = v;
1643 int (**opvec)(void *);
1644
1645 switch (ap->a_desc->vdesc_offset) {
1646 case VOP_ACCESS_DESCOFFSET:
1647 case VOP_GETATTR_DESCOFFSET:
1648 case VOP_SETATTR_DESCOFFSET:
1649 case VOP_LOCK_DESCOFFSET:
1650 case VOP_UNLOCK_DESCOFFSET:
1651 case VOP_ISLOCKED_DESCOFFSET:
1652 case VOP_RECLAIM_DESCOFFSET:
1653 opvec = rump_vnodeop_p;
1654 break;
1655 default:
1656 opvec = spec_vnodeop_p;
1657 break;
1658 }
1659
1660 return VOCALL(opvec, ap->a_desc->vdesc_offset, v);
1661 }
1662
1663 static int
1664 rump_vop_advlock(void *v)
1665 {
1666 struct vop_advlock_args /* {
1667 const struct vnodeop_desc *a_desc;
1668 struct vnode *a_vp;
1669 void *a_id;
1670 int a_op;
1671 struct flock *a_fl;
1672 int a_flags;
1673 } */ *ap = v;
1674 struct vnode *vp = ap->a_vp;
1675 struct rumpfs_node *rn = vp->v_data;
1676
1677 return lf_advlock(ap, &rn->rn_lockf, vp->v_size);
1678 }
1679
1680 /*
1681 * Begin vfs-level stuff
1682 */
1683
1684 VFS_PROTOS(rumpfs);
1685 struct vfsops rumpfs_vfsops = {
1686 .vfs_name = MOUNT_RUMPFS,
1687 .vfs_min_mount_data = 0,
1688 .vfs_mount = rumpfs_mount,
1689 .vfs_start = (void *)nullop,
1690 .vfs_unmount = rumpfs_unmount,
1691 .vfs_root = rumpfs_root,
1692 .vfs_quotactl = (void *)eopnotsupp,
1693 .vfs_statvfs = genfs_statvfs,
1694 .vfs_sync = (void *)nullop,
1695 .vfs_vget = rumpfs_vget,
1696 .vfs_fhtovp = (void *)eopnotsupp,
1697 .vfs_vptofh = (void *)eopnotsupp,
1698 .vfs_init = rumpfs_init,
1699 .vfs_reinit = NULL,
1700 .vfs_done = rumpfs_done,
1701 .vfs_mountroot = rumpfs_mountroot,
1702 .vfs_snapshot = (void *)eopnotsupp,
1703 .vfs_extattrctl = (void *)eopnotsupp,
1704 .vfs_suspendctl = (void *)eopnotsupp,
1705 .vfs_renamelock_enter = genfs_renamelock_enter,
1706 .vfs_renamelock_exit = genfs_renamelock_exit,
1707 .vfs_opv_descs = rump_opv_descs,
1708 /* vfs_refcount */
1709 /* vfs_list */
1710 };
1711
1712 static int
1713 rumpfs_mountfs(struct mount *mp)
1714 {
1715 struct rumpfs_mount *rfsmp;
1716 struct rumpfs_node *rn;
1717 int error;
1718
1719 rfsmp = kmem_alloc(sizeof(*rfsmp), KM_SLEEP);
1720
1721 rn = makeprivate(VDIR, NODEV, DEV_BSIZE, false);
1722 rn->rn_parent = rn;
1723 if ((error = makevnode(mp, rn, &rfsmp->rfsmp_rvp)) != 0)
1724 return error;
1725
1726 rfsmp->rfsmp_rvp->v_vflag |= VV_ROOT;
1727 VOP_UNLOCK(rfsmp->rfsmp_rvp);
1728
1729 mp->mnt_data = rfsmp;
1730 mp->mnt_stat.f_namemax = RUMPFS_MAXNAMLEN;
1731 mp->mnt_stat.f_iosize = 512;
1732 mp->mnt_flag |= MNT_LOCAL;
1733 mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO;
1734 mp->mnt_fs_bshift = DEV_BSHIFT;
1735 vfs_getnewfsid(mp);
1736
1737 return 0;
1738 }
1739
1740 int
1741 rumpfs_mount(struct mount *mp, const char *mntpath, void *arg, size_t *alen)
1742 {
1743 int error;
1744
1745 if (mp->mnt_flag & MNT_UPDATE) {
1746 return 0;
1747 }
1748
1749 error = set_statvfs_info(mntpath, UIO_USERSPACE, "rumpfs", UIO_SYSSPACE,
1750 mp->mnt_op->vfs_name, mp, curlwp);
1751 if (error)
1752 return error;
1753
1754 return rumpfs_mountfs(mp);
1755 }
1756
1757 int
1758 rumpfs_unmount(struct mount *mp, int mntflags)
1759 {
1760 struct rumpfs_mount *rfsmp = mp->mnt_data;
1761 int flags = 0, error;
1762
1763 if (panicstr || mntflags & MNT_FORCE)
1764 flags |= FORCECLOSE;
1765
1766 if ((error = vflush(mp, rfsmp->rfsmp_rvp, flags)) != 0)
1767 return error;
1768 vgone(rfsmp->rfsmp_rvp); /* XXX */
1769
1770 kmem_free(rfsmp, sizeof(*rfsmp));
1771
1772 return 0;
1773 }
1774
1775 int
1776 rumpfs_root(struct mount *mp, struct vnode **vpp)
1777 {
1778 struct rumpfs_mount *rfsmp = mp->mnt_data;
1779
1780 vref(rfsmp->rfsmp_rvp);
1781 vn_lock(rfsmp->rfsmp_rvp, LK_EXCLUSIVE | LK_RETRY);
1782 *vpp = rfsmp->rfsmp_rvp;
1783 return 0;
1784 }
1785
1786 int
1787 rumpfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1788 {
1789
1790 return EOPNOTSUPP;
1791 }
1792
1793 void
1794 rumpfs_init()
1795 {
1796
1797 CTASSERT(RUMP_ETFS_SIZE_ENDOFF == RUMPBLK_SIZENOTSET);
1798
1799 mutex_init(&reclock, MUTEX_DEFAULT, IPL_NONE);
1800 mutex_init(&etfs_lock, MUTEX_DEFAULT, IPL_NONE);
1801 }
1802
1803 void
1804 rumpfs_done()
1805 {
1806
1807 mutex_destroy(&reclock);
1808 mutex_destroy(&etfs_lock);
1809 }
1810
1811 int
1812 rumpfs_mountroot()
1813 {
1814 struct mount *mp;
1815 int error;
1816
1817 if ((error = vfs_rootmountalloc(MOUNT_RUMPFS, "rootdev", &mp)) != 0) {
1818 vrele(rootvp);
1819 return error;
1820 }
1821
1822 if ((error = rumpfs_mountfs(mp)) != 0)
1823 panic("mounting rootfs failed: %d", error);
1824
1825 mutex_enter(&mountlist_lock);
1826 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
1827 mutex_exit(&mountlist_lock);
1828
1829 error = set_statvfs_info("/", UIO_SYSSPACE, "rumpfs", UIO_SYSSPACE,
1830 mp->mnt_op->vfs_name, mp, curlwp);
1831 if (error)
1832 panic("set_statvfs_info failed for rootfs: %d", error);
1833
1834 mp->mnt_flag &= ~MNT_RDONLY;
1835 vfs_unbusy(mp, false, NULL);
1836
1837 return 0;
1838 }
1839