rumpfs.c revision 1.87 1 /* $NetBSD: rumpfs.c,v 1.87 2011/01/13 07:27:35 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2009, 2010 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: rumpfs.c,v 1.87 2011/01/13 07:27:35 pooka Exp $");
30
31 #include <sys/param.h>
32 #include <sys/atomic.h>
33 #include <sys/buf.h>
34 #include <sys/dirent.h>
35 #include <sys/errno.h>
36 #include <sys/filedesc.h>
37 #include <sys/fcntl.h>
38 #include <sys/kauth.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/lock.h>
44 #include <sys/lockf.h>
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 #include <sys/syscallargs.h>
48 #include <sys/vnode.h>
49 #include <sys/unistd.h>
50
51 #include <miscfs/fifofs/fifo.h>
52 #include <miscfs/specfs/specdev.h>
53 #include <miscfs/genfs/genfs.h>
54 #include <miscfs/genfs/genfs_node.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <rump/rumpuser.h>
59
60 #include "rump_private.h"
61 #include "rump_vfs_private.h"
62
63 static int rump_vop_lookup(void *);
64 static int rump_vop_getattr(void *);
65 static int rump_vop_setattr(void *);
66 static int rump_vop_mkdir(void *);
67 static int rump_vop_rmdir(void *);
68 static int rump_vop_remove(void *);
69 static int rump_vop_mknod(void *);
70 static int rump_vop_create(void *);
71 static int rump_vop_inactive(void *);
72 static int rump_vop_reclaim(void *);
73 static int rump_vop_success(void *);
74 static int rump_vop_readdir(void *);
75 static int rump_vop_spec(void *);
76 static int rump_vop_read(void *);
77 static int rump_vop_write(void *);
78 static int rump_vop_open(void *);
79 static int rump_vop_symlink(void *);
80 static int rump_vop_readlink(void *);
81 static int rump_vop_whiteout(void *);
82 static int rump_vop_pathconf(void *);
83 static int rump_vop_bmap(void *);
84 static int rump_vop_strategy(void *);
85 static int rump_vop_advlock(void *);
86 static int rump_vop_access(void *);
87
88 int (**fifo_vnodeop_p)(void *);
89 const struct vnodeopv_entry_desc fifo_vnodeop_entries[] = {
90 { &vop_default_desc, vn_default_error },
91 { NULL, NULL }
92 };
93 const struct vnodeopv_desc fifo_vnodeop_opv_desc =
94 { &fifo_vnodeop_p, fifo_vnodeop_entries };
95
96 int (**rump_vnodeop_p)(void *);
97 const struct vnodeopv_entry_desc rump_vnodeop_entries[] = {
98 { &vop_default_desc, vn_default_error },
99 { &vop_lookup_desc, rump_vop_lookup },
100 { &vop_getattr_desc, rump_vop_getattr },
101 { &vop_setattr_desc, rump_vop_setattr },
102 { &vop_mkdir_desc, rump_vop_mkdir },
103 { &vop_rmdir_desc, rump_vop_rmdir },
104 { &vop_remove_desc, rump_vop_remove },
105 { &vop_mknod_desc, rump_vop_mknod },
106 { &vop_create_desc, rump_vop_create },
107 { &vop_symlink_desc, rump_vop_symlink },
108 { &vop_readlink_desc, rump_vop_readlink },
109 { &vop_access_desc, rump_vop_access },
110 { &vop_readdir_desc, rump_vop_readdir },
111 { &vop_read_desc, rump_vop_read },
112 { &vop_write_desc, rump_vop_write },
113 { &vop_open_desc, rump_vop_open },
114 { &vop_close_desc, genfs_nullop },
115 { &vop_seek_desc, genfs_seek },
116 { &vop_getpages_desc, genfs_getpages },
117 { &vop_putpages_desc, genfs_putpages },
118 { &vop_whiteout_desc, rump_vop_whiteout },
119 { &vop_fsync_desc, rump_vop_success },
120 { &vop_lock_desc, genfs_lock },
121 { &vop_unlock_desc, genfs_unlock },
122 { &vop_islocked_desc, genfs_islocked },
123 { &vop_inactive_desc, rump_vop_inactive },
124 { &vop_reclaim_desc, rump_vop_reclaim },
125 { &vop_link_desc, genfs_eopnotsupp },
126 { &vop_pathconf_desc, rump_vop_pathconf },
127 { &vop_bmap_desc, rump_vop_bmap },
128 { &vop_strategy_desc, rump_vop_strategy },
129 { &vop_advlock_desc, rump_vop_advlock },
130 { NULL, NULL }
131 };
132 const struct vnodeopv_desc rump_vnodeop_opv_desc =
133 { &rump_vnodeop_p, rump_vnodeop_entries };
134
135 int (**rump_specop_p)(void *);
136 const struct vnodeopv_entry_desc rump_specop_entries[] = {
137 { &vop_default_desc, rump_vop_spec },
138 { NULL, NULL }
139 };
140 const struct vnodeopv_desc rump_specop_opv_desc =
141 { &rump_specop_p, rump_specop_entries };
142
143 const struct vnodeopv_desc * const rump_opv_descs[] = {
144 &rump_vnodeop_opv_desc,
145 &rump_specop_opv_desc,
146 NULL
147 };
148
149 #define RUMPFS_WHITEOUT ((void *)-1)
150 #define RDENT_ISWHITEOUT(rdp) (rdp->rd_node == RUMPFS_WHITEOUT)
151 struct rumpfs_dent {
152 char *rd_name;
153 int rd_namelen;
154 struct rumpfs_node *rd_node;
155
156 LIST_ENTRY(rumpfs_dent) rd_entries;
157 };
158
159 struct genfs_ops rumpfs_genfsops = {
160 .gop_size = genfs_size,
161 .gop_write = genfs_gop_write,
162
163 /* optional */
164 .gop_alloc = NULL,
165 .gop_markupdate = NULL,
166 };
167
168 struct rumpfs_node {
169 struct genfs_node rn_gn;
170 struct vattr rn_va;
171 struct vnode *rn_vp;
172 char *rn_hostpath;
173 int rn_flags;
174 struct lockf *rn_lockf;
175
176 union {
177 struct { /* VREG */
178 int readfd;
179 int writefd;
180 uint64_t offset;
181 } reg;
182 struct {
183 void *data;
184 size_t dlen;
185 } reg_noet;
186 struct { /* VDIR */
187 LIST_HEAD(, rumpfs_dent) dents;
188 struct rumpfs_node *parent;
189 int flags;
190 } dir;
191 struct {
192 char *target;
193 size_t len;
194 } link;
195 } rn_u;
196 };
197 #define rn_readfd rn_u.reg.readfd
198 #define rn_writefd rn_u.reg.writefd
199 #define rn_offset rn_u.reg.offset
200 #define rn_data rn_u.reg_noet.data
201 #define rn_dlen rn_u.reg_noet.dlen
202 #define rn_dir rn_u.dir.dents
203 #define rn_parent rn_u.dir.parent
204 #define rn_linktarg rn_u.link.target
205 #define rn_linklen rn_u.link.len
206
207 #define RUMPNODE_CANRECLAIM 0x01
208 #define RUMPNODE_DIR_ET 0x02
209 #define RUMPNODE_DIR_ETSUBS 0x04
210 #define RUMPNODE_ET_PHONE_HOST 0x10
211
212 struct rumpfs_mount {
213 struct vnode *rfsmp_rvp;
214 };
215
216 static struct rumpfs_node *makeprivate(enum vtype, dev_t, off_t, bool);
217
218 /*
219 * Extra Terrestrial stuff. We map a given key (pathname) to a file on
220 * the host FS. ET phones home only from the root node of rumpfs.
221 *
222 * When an etfs node is removed, a vnode potentially behind it is not
223 * immediately recycled.
224 */
225
226 struct etfs {
227 char et_key[MAXPATHLEN];
228 size_t et_keylen;
229 bool et_prefixkey;
230 bool et_removing;
231 devminor_t et_blkmin;
232
233 LIST_ENTRY(etfs) et_entries;
234
235 struct rumpfs_node *et_rn;
236 };
237 static kmutex_t etfs_lock;
238 static LIST_HEAD(, etfs) etfs_list = LIST_HEAD_INITIALIZER(etfs_list);
239
240 static enum vtype
241 ettype_to_vtype(enum rump_etfs_type et)
242 {
243 enum vtype vt;
244
245 switch (et) {
246 case RUMP_ETFS_REG:
247 vt = VREG;
248 break;
249 case RUMP_ETFS_BLK:
250 vt = VBLK;
251 break;
252 case RUMP_ETFS_CHR:
253 vt = VCHR;
254 break;
255 case RUMP_ETFS_DIR:
256 vt = VDIR;
257 break;
258 case RUMP_ETFS_DIR_SUBDIRS:
259 vt = VDIR;
260 break;
261 default:
262 panic("invalid et type: %d", et);
263 }
264
265 return vt;
266 }
267
268 static enum vtype
269 hft_to_vtype(int hft)
270 {
271 enum vtype vt;
272
273 switch (hft) {
274 case RUMPUSER_FT_OTHER:
275 vt = VNON;
276 break;
277 case RUMPUSER_FT_DIR:
278 vt = VDIR;
279 break;
280 case RUMPUSER_FT_REG:
281 vt = VREG;
282 break;
283 case RUMPUSER_FT_BLK:
284 vt = VBLK;
285 break;
286 case RUMPUSER_FT_CHR:
287 vt = VCHR;
288 break;
289 default:
290 vt = VNON;
291 break;
292 }
293
294 return vt;
295 }
296
297 static bool
298 etfs_find(const char *key, struct etfs **etp, bool forceprefix)
299 {
300 struct etfs *et;
301 size_t keylen = strlen(key);
302
303 KASSERT(mutex_owned(&etfs_lock));
304
305 LIST_FOREACH(et, &etfs_list, et_entries) {
306 if ((keylen == et->et_keylen || et->et_prefixkey || forceprefix)
307 && strncmp(key, et->et_key, et->et_keylen) == 0) {
308 if (etp)
309 *etp = et;
310 return true;
311 }
312 }
313
314 return false;
315 }
316
317 #define REGDIR(ftype) \
318 ((ftype) == RUMP_ETFS_DIR || (ftype) == RUMP_ETFS_DIR_SUBDIRS)
319 static int
320 doregister(const char *key, const char *hostpath,
321 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
322 {
323 char buf[9];
324 struct etfs *et;
325 struct rumpfs_node *rn;
326 uint64_t fsize;
327 dev_t rdev = NODEV;
328 devminor_t dmin = -1;
329 int hft, error;
330
331 if (key[0] != '/') {
332 return EINVAL;
333 }
334 while (key[0] == '/') {
335 key++;
336 }
337
338 if (rumpuser_getfileinfo(hostpath, &fsize, &hft, &error))
339 return error;
340
341 /* etfs directory requires a directory on the host */
342 if (REGDIR(ftype)) {
343 if (hft != RUMPUSER_FT_DIR)
344 return ENOTDIR;
345 if (begin != 0)
346 return EISDIR;
347 if (size != RUMP_ETFS_SIZE_ENDOFF)
348 return EISDIR;
349 size = fsize;
350 } else {
351 if (begin > fsize)
352 return EINVAL;
353 if (size == RUMP_ETFS_SIZE_ENDOFF)
354 size = fsize - begin;
355 if (begin + size > fsize)
356 return EINVAL;
357 }
358
359 if (ftype == RUMP_ETFS_BLK || ftype == RUMP_ETFS_CHR) {
360 error = rumpblk_register(hostpath, &dmin, begin, size);
361 if (error != 0) {
362 return error;
363 }
364 rdev = makedev(RUMPBLK_DEVMAJOR, dmin);
365 }
366
367 et = kmem_alloc(sizeof(*et), KM_SLEEP);
368 strcpy(et->et_key, key);
369 et->et_keylen = strlen(et->et_key);
370 et->et_rn = rn = makeprivate(ettype_to_vtype(ftype), rdev, size, true);
371 et->et_removing = false;
372 et->et_blkmin = dmin;
373
374 rn->rn_flags |= RUMPNODE_ET_PHONE_HOST;
375
376 if (ftype == RUMP_ETFS_REG || REGDIR(ftype) || et->et_blkmin != -1) {
377 size_t len = strlen(hostpath)+1;
378
379 rn->rn_hostpath = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
380 memcpy(rn->rn_hostpath, hostpath, len);
381 rn->rn_offset = begin;
382 }
383
384 if (REGDIR(ftype)) {
385 rn->rn_flags |= RUMPNODE_DIR_ET;
386 et->et_prefixkey = true;
387 } else {
388 et->et_prefixkey = false;
389 }
390
391 if (ftype == RUMP_ETFS_DIR_SUBDIRS)
392 rn->rn_flags |= RUMPNODE_DIR_ETSUBS;
393
394 mutex_enter(&etfs_lock);
395 if (etfs_find(key, NULL, REGDIR(ftype))) {
396 mutex_exit(&etfs_lock);
397 if (et->et_blkmin != -1)
398 rumpblk_deregister(hostpath);
399 if (et->et_rn->rn_hostpath != NULL)
400 free(et->et_rn->rn_hostpath, M_TEMP);
401 kmem_free(et->et_rn, sizeof(*et->et_rn));
402 kmem_free(et, sizeof(*et));
403 return EEXIST;
404 }
405 LIST_INSERT_HEAD(&etfs_list, et, et_entries);
406 mutex_exit(&etfs_lock);
407
408 if (ftype == RUMP_ETFS_BLK) {
409 format_bytes(buf, sizeof(buf), size);
410 aprint_verbose("/%s: hostpath %s (%s)\n", key, hostpath, buf);
411 }
412
413 return 0;
414 }
415 #undef REGDIR
416
417 int
418 rump_etfs_register(const char *key, const char *hostpath,
419 enum rump_etfs_type ftype)
420 {
421
422 return doregister(key, hostpath, ftype, 0, RUMP_ETFS_SIZE_ENDOFF);
423 }
424
425 int
426 rump_etfs_register_withsize(const char *key, const char *hostpath,
427 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
428 {
429
430 return doregister(key, hostpath, ftype, begin, size);
431 }
432
433 /* remove etfs mapping. caller's responsibility to make sure it's not in use */
434 int
435 rump_etfs_remove(const char *key)
436 {
437 struct etfs *et;
438 size_t keylen;
439 int rv;
440
441 if (key[0] != '/') {
442 return EINVAL;
443 }
444 while (key[0] == '/') {
445 key++;
446 }
447
448 keylen = strlen(key);
449
450 mutex_enter(&etfs_lock);
451 LIST_FOREACH(et, &etfs_list, et_entries) {
452 if (keylen == et->et_keylen && strcmp(et->et_key, key) == 0) {
453 if (et->et_removing)
454 et = NULL;
455 else
456 et->et_removing = true;
457 break;
458 }
459 }
460 mutex_exit(&etfs_lock);
461 if (!et)
462 return ENOENT;
463
464 /*
465 * ok, we know what we want to remove and have signalled there
466 * actually are men at work. first, unregister from rumpblk
467 */
468 if (et->et_blkmin != -1) {
469 rv = rumpblk_deregister(et->et_rn->rn_hostpath);
470 } else {
471 rv = 0;
472 }
473 KASSERT(rv == 0);
474
475 /* then do the actual removal */
476 mutex_enter(&etfs_lock);
477 LIST_REMOVE(et, et_entries);
478 mutex_exit(&etfs_lock);
479
480 /* node is unreachable, safe to nuke all device copies */
481 if (et->et_blkmin != -1)
482 vdevgone(RUMPBLK_DEVMAJOR, et->et_blkmin, et->et_blkmin, VBLK);
483
484 if (et->et_rn->rn_hostpath != NULL)
485 free(et->et_rn->rn_hostpath, M_TEMP);
486 kmem_free(et->et_rn, sizeof(*et->et_rn));
487 kmem_free(et, sizeof(*et));
488
489 return 0;
490 }
491
492 /*
493 * rumpfs
494 */
495
496 #define INO_WHITEOUT 1
497 static int lastino = 2;
498 static kmutex_t reclock;
499
500 static struct rumpfs_node *
501 makeprivate(enum vtype vt, dev_t rdev, off_t size, bool et)
502 {
503 struct rumpfs_node *rn;
504 struct vattr *va;
505 struct timespec ts;
506
507 rn = kmem_zalloc(sizeof(*rn), KM_SLEEP);
508
509 switch (vt) {
510 case VDIR:
511 LIST_INIT(&rn->rn_dir);
512 break;
513 case VREG:
514 if (et) {
515 rn->rn_readfd = -1;
516 rn->rn_writefd = -1;
517 }
518 break;
519 default:
520 break;
521 }
522
523 nanotime(&ts);
524
525 va = &rn->rn_va;
526 va->va_type = vt;
527 va->va_mode = 0755;
528 if (vt == VDIR)
529 va->va_nlink = 2;
530 else
531 va->va_nlink = 1;
532 va->va_uid = 0;
533 va->va_gid = 0;
534 va->va_fsid =
535 va->va_fileid = atomic_inc_uint_nv(&lastino);
536 va->va_size = size;
537 va->va_blocksize = 512;
538 va->va_atime = ts;
539 va->va_mtime = ts;
540 va->va_ctime = ts;
541 va->va_birthtime = ts;
542 va->va_gen = 0;
543 va->va_flags = 0;
544 va->va_rdev = rdev;
545 va->va_bytes = 512;
546 va->va_filerev = 0;
547 va->va_vaflags = 0;
548
549 return rn;
550 }
551
552 static int
553 makevnode(struct mount *mp, struct rumpfs_node *rn, struct vnode **vpp)
554 {
555 struct vnode *vp;
556 int (**vpops)(void *);
557 struct vattr *va = &rn->rn_va;
558 int rv;
559
560 KASSERT(!mutex_owned(&reclock));
561
562 if (va->va_type == VCHR || va->va_type == VBLK) {
563 vpops = rump_specop_p;
564 } else {
565 vpops = rump_vnodeop_p;
566 }
567
568 rv = getnewvnode(VT_RUMP, mp, vpops, &vp);
569 if (rv)
570 return rv;
571
572 vp->v_size = vp->v_writesize = va->va_size;
573 vp->v_type = va->va_type;
574
575 if (vpops == rump_specop_p) {
576 spec_node_init(vp, va->va_rdev);
577 }
578 vp->v_data = rn;
579
580 genfs_node_init(vp, &rumpfs_genfsops);
581 vn_lock(vp, LK_RETRY | LK_EXCLUSIVE);
582 mutex_enter(&reclock);
583 rn->rn_vp = vp;
584 mutex_exit(&reclock);
585
586 *vpp = vp;
587
588 return 0;
589 }
590
591
592 static void
593 makedir(struct rumpfs_node *rnd,
594 struct componentname *cnp, struct rumpfs_node *rn)
595 {
596 struct rumpfs_dent *rdent;
597
598 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
599 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
600 rdent->rd_node = rn;
601 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
602 rdent->rd_namelen = strlen(rdent->rd_name);
603
604 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
605 }
606
607 static void
608 freedir(struct rumpfs_node *rnd, struct componentname *cnp)
609 {
610 struct rumpfs_dent *rd = NULL;
611
612 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
613 if (rd->rd_namelen == cnp->cn_namelen &&
614 strncmp(rd->rd_name, cnp->cn_nameptr,
615 cnp->cn_namelen) == 0)
616 break;
617 }
618 if (rd == NULL)
619 panic("could not find directory entry: %s", cnp->cn_nameptr);
620
621 LIST_REMOVE(rd, rd_entries);
622 kmem_free(rd->rd_name, rd->rd_namelen+1);
623 kmem_free(rd, sizeof(*rd));
624 }
625
626 /*
627 * Simple lookup for rump file systems.
628 *
629 * uhm, this is twisted. C F C C, hope of C C F C looming
630 */
631 static int
632 rump_vop_lookup(void *v)
633 {
634 struct vop_lookup_args /* {
635 struct vnode *a_dvp;
636 struct vnode **a_vpp;
637 struct componentname *a_cnp;
638 }; */ *ap = v;
639 struct componentname *cnp = ap->a_cnp;
640 struct vnode *dvp = ap->a_dvp;
641 struct vnode **vpp = ap->a_vpp;
642 struct vnode *vp;
643 struct rumpfs_node *rnd = dvp->v_data, *rn;
644 struct rumpfs_dent *rd = NULL;
645 struct etfs *et;
646 bool dotdot = (cnp->cn_flags & ISDOTDOT) != 0;
647 int rv = 0;
648
649 *vpp = NULL;
650
651 if ((cnp->cn_flags & ISLASTCN)
652 && (dvp->v_mount->mnt_flag & MNT_RDONLY)
653 && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
654 return EROFS;
655
656 /* check for dot, return directly if the case */
657 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
658 vref(dvp);
659 *vpp = dvp;
660 return 0;
661 }
662
663 /* we don't do rename */
664 if (!(((cnp->cn_flags & ISLASTCN) == 0) || (cnp->cn_nameiop != RENAME)))
665 return EOPNOTSUPP;
666
667 /* check for etfs */
668 if (dvp == rootvnode && cnp->cn_nameiop == LOOKUP) {
669 bool found;
670 mutex_enter(&etfs_lock);
671 found = etfs_find(cnp->cn_nameptr, &et, false);
672 mutex_exit(&etfs_lock);
673
674 if (found) {
675 rn = et->et_rn;
676 cnp->cn_consume += et->et_keylen - cnp->cn_namelen;
677 if (rn->rn_va.va_type != VDIR)
678 cnp->cn_flags &= ~REQUIREDIR;
679 goto getvnode;
680 }
681 }
682
683 if (rnd->rn_flags & RUMPNODE_DIR_ET) {
684 uint64_t fsize;
685 char *newpath;
686 size_t newpathlen;
687 int hft, error;
688
689 if (dotdot)
690 return EOPNOTSUPP;
691
692 newpathlen = strlen(rnd->rn_hostpath) + 1 + cnp->cn_namelen + 1;
693 newpath = malloc(newpathlen, M_TEMP, M_WAITOK);
694
695 strlcpy(newpath, rnd->rn_hostpath, newpathlen);
696 strlcat(newpath, "/", newpathlen);
697 strlcat(newpath, cnp->cn_nameptr, newpathlen);
698
699 if (rumpuser_getfileinfo(newpath, &fsize, &hft, &error)) {
700 free(newpath, M_TEMP);
701 return error;
702 }
703
704 /* allow only dirs and regular files */
705 if (hft != RUMPUSER_FT_REG && hft != RUMPUSER_FT_DIR) {
706 free(newpath, M_TEMP);
707 return ENOENT;
708 }
709
710 rn = makeprivate(hft_to_vtype(hft), NODEV, fsize, true);
711 rn->rn_flags |= RUMPNODE_CANRECLAIM;
712 if (rnd->rn_flags & RUMPNODE_DIR_ETSUBS) {
713 rn->rn_flags |= RUMPNODE_DIR_ET | RUMPNODE_DIR_ETSUBS;
714 rn->rn_flags |= RUMPNODE_ET_PHONE_HOST;
715 }
716 rn->rn_hostpath = newpath;
717
718 goto getvnode;
719 } else {
720 if (dotdot) {
721 rn = rnd->rn_parent;
722 goto getvnode;
723 } else {
724 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
725 if (rd->rd_namelen == cnp->cn_namelen &&
726 strncmp(rd->rd_name, cnp->cn_nameptr,
727 cnp->cn_namelen) == 0)
728 break;
729 }
730 }
731 }
732
733 if (!rd && ((cnp->cn_flags & ISLASTCN) == 0||cnp->cn_nameiop != CREATE))
734 return ENOENT;
735
736 if (!rd && (cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
737 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
738 return EROFS;
739 return EJUSTRETURN;
740 }
741
742 if (RDENT_ISWHITEOUT(rd)) {
743 cnp->cn_flags |= ISWHITEOUT;
744 return ENOENT;
745 }
746
747 rn = rd->rd_node;
748
749 getvnode:
750 KASSERT(rn);
751 if (dotdot)
752 VOP_UNLOCK(dvp);
753 mutex_enter(&reclock);
754 if ((vp = rn->rn_vp)) {
755 mutex_enter(&vp->v_interlock);
756 mutex_exit(&reclock);
757 if (vget(vp, LK_EXCLUSIVE)) {
758 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
759 goto getvnode;
760 }
761 *vpp = vp;
762 } else {
763 mutex_exit(&reclock);
764 rv = makevnode(dvp->v_mount, rn, vpp);
765 }
766 if (dotdot)
767 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
768
769 return rv;
770 }
771
772 int
773 rump_vop_access(void *v)
774 {
775 struct vop_access_args /* {
776 const struct vnodeop_desc *a_desc;
777 struct vnode *a_vp;
778 int a_mode;
779 kauth_cred_t a_cred;
780 } */ *ap = v;
781 struct vnode *vp = ap->a_vp;
782 int mode = ap->a_mode;
783
784 if (mode & VWRITE) {
785 switch (vp->v_type) {
786 case VDIR:
787 case VLNK:
788 case VREG:
789 if ((vp->v_mount->mnt_flag & MNT_RDONLY))
790 return EROFS;
791 break;
792 default:
793 break;
794 }
795 }
796
797 return 0;
798 }
799
800 static int
801 rump_vop_getattr(void *v)
802 {
803 struct vop_getattr_args /* {
804 struct vnode *a_vp;
805 struct vattr *a_vap;
806 kauth_cred_t a_cred;
807 } */ *ap = v;
808 struct vnode *vp = ap->a_vp;
809 struct rumpfs_node *rn = vp->v_data;
810 struct vattr *vap = ap->a_vap;
811
812 memcpy(vap, &rn->rn_va, sizeof(struct vattr));
813 vap->va_size = vp->v_size;
814 return 0;
815 }
816
817 static int
818 rump_vop_setattr(void *v)
819 {
820 struct vop_getattr_args /* {
821 struct vnode *a_vp;
822 struct vattr *a_vap;
823 kauth_cred_t a_cred;
824 } */ *ap = v;
825 struct vnode *vp = ap->a_vp;
826 struct vattr *vap = ap->a_vap;
827 struct rumpfs_node *rn = vp->v_data;
828
829 #define SETIFVAL(a,t) if (vap->a != (t)VNOVAL) rn->rn_va.a = vap->a
830 SETIFVAL(va_mode, mode_t);
831 SETIFVAL(va_uid, uid_t);
832 SETIFVAL(va_gid, gid_t);
833 SETIFVAL(va_atime.tv_sec, time_t);
834 SETIFVAL(va_ctime.tv_sec, time_t);
835 SETIFVAL(va_mtime.tv_sec, time_t);
836 SETIFVAL(va_birthtime.tv_sec, time_t);
837 SETIFVAL(va_atime.tv_nsec, long);
838 SETIFVAL(va_ctime.tv_nsec, long);
839 SETIFVAL(va_mtime.tv_nsec, long);
840 SETIFVAL(va_birthtime.tv_nsec, long);
841 SETIFVAL(va_flags, u_long);
842 #undef SETIFVAL
843
844 if (vp->v_type == VREG && vap->va_size != VSIZENOTSET)
845 uvm_vnp_setsize(vp, vap->va_size);
846 return 0;
847 }
848
849 static int
850 rump_vop_mkdir(void *v)
851 {
852 struct vop_mkdir_args /* {
853 struct vnode *a_dvp;
854 struct vnode **a_vpp;
855 struct componentname *a_cnp;
856 struct vattr *a_vap;
857 }; */ *ap = v;
858 struct vnode *dvp = ap->a_dvp;
859 struct vnode **vpp = ap->a_vpp;
860 struct componentname *cnp = ap->a_cnp;
861 struct rumpfs_node *rnd = dvp->v_data, *rn;
862 int rv = 0;
863
864 rn = makeprivate(VDIR, NODEV, DEV_BSIZE, false);
865 rn->rn_parent = rnd;
866 rv = makevnode(dvp->v_mount, rn, vpp);
867 if (rv)
868 goto out;
869
870 makedir(rnd, cnp, rn);
871
872 out:
873 vput(dvp);
874 return rv;
875 }
876
877 static int
878 rump_vop_rmdir(void *v)
879 {
880 struct vop_rmdir_args /* {
881 struct vnode *a_dvp;
882 struct vnode *a_vp;
883 struct componentname *a_cnp;
884 }; */ *ap = v;
885 struct vnode *dvp = ap->a_dvp;
886 struct vnode *vp = ap->a_vp;
887 struct componentname *cnp = ap->a_cnp;
888 struct rumpfs_node *rnd = dvp->v_data;
889 struct rumpfs_node *rn = vp->v_data;
890 int rv = 0;
891
892 if (!LIST_EMPTY(&rn->rn_dir)) {
893 rv = ENOTEMPTY;
894 goto out;
895 }
896
897 freedir(rnd, cnp);
898 rn->rn_flags |= RUMPNODE_CANRECLAIM;
899
900 out:
901 vput(dvp);
902 vput(vp);
903
904 return rv;
905 }
906
907 static int
908 rump_vop_remove(void *v)
909 {
910 struct vop_rmdir_args /* {
911 struct vnode *a_dvp;
912 struct vnode *a_vp;
913 struct componentname *a_cnp;
914 }; */ *ap = v;
915 struct vnode *dvp = ap->a_dvp;
916 struct vnode *vp = ap->a_vp;
917 struct componentname *cnp = ap->a_cnp;
918 struct rumpfs_node *rnd = dvp->v_data;
919 struct rumpfs_node *rn = vp->v_data;
920 int rv = 0;
921
922 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
923 return EOPNOTSUPP;
924
925 if (vp->v_type == VREG) {
926 rump_hyperfree(rn->rn_data, rn->rn_dlen);
927 }
928
929 freedir(rnd, cnp);
930 rn->rn_flags |= RUMPNODE_CANRECLAIM;
931
932 vput(dvp);
933 vput(vp);
934
935 return rv;
936 }
937
938 static int
939 rump_vop_mknod(void *v)
940 {
941 struct vop_mknod_args /* {
942 struct vnode *a_dvp;
943 struct vnode **a_vpp;
944 struct componentname *a_cnp;
945 struct vattr *a_vap;
946 }; */ *ap = v;
947 struct vnode *dvp = ap->a_dvp;
948 struct vnode **vpp = ap->a_vpp;
949 struct componentname *cnp = ap->a_cnp;
950 struct vattr *va = ap->a_vap;
951 struct rumpfs_node *rnd = dvp->v_data, *rn;
952 int rv;
953
954 rn = makeprivate(va->va_type, va->va_rdev, DEV_BSIZE, false);
955 rv = makevnode(dvp->v_mount, rn, vpp);
956 if (rv)
957 goto out;
958
959 makedir(rnd, cnp, rn);
960
961 out:
962 vput(dvp);
963 return rv;
964 }
965
966 static int
967 rump_vop_create(void *v)
968 {
969 struct vop_create_args /* {
970 struct vnode *a_dvp;
971 struct vnode **a_vpp;
972 struct componentname *a_cnp;
973 struct vattr *a_vap;
974 }; */ *ap = v;
975 struct vnode *dvp = ap->a_dvp;
976 struct vnode **vpp = ap->a_vpp;
977 struct componentname *cnp = ap->a_cnp;
978 struct vattr *va = ap->a_vap;
979 struct rumpfs_node *rnd = dvp->v_data, *rn;
980 off_t newsize;
981 int rv;
982
983 newsize = va->va_type == VSOCK ? DEV_BSIZE : 0;
984 rn = makeprivate(va->va_type, NODEV, newsize, false);
985 rv = makevnode(dvp->v_mount, rn, vpp);
986 if (rv)
987 goto out;
988
989 makedir(rnd, cnp, rn);
990
991 out:
992 vput(dvp);
993 return rv;
994 }
995
996 static int
997 rump_vop_symlink(void *v)
998 {
999 struct vop_symlink_args /* {
1000 struct vnode *a_dvp;
1001 struct vnode **a_vpp;
1002 struct componentname *a_cnp;
1003 struct vattr *a_vap;
1004 char *a_target;
1005 }; */ *ap = v;
1006 struct vnode *dvp = ap->a_dvp;
1007 struct vnode **vpp = ap->a_vpp;
1008 struct componentname *cnp = ap->a_cnp;
1009 struct rumpfs_node *rnd = dvp->v_data, *rn;
1010 const char *target = ap->a_target;
1011 size_t linklen;
1012 int rv;
1013
1014 linklen = strlen(target);
1015 KASSERT(linklen < MAXPATHLEN);
1016 rn = makeprivate(VLNK, NODEV, linklen, false);
1017 rv = makevnode(dvp->v_mount, rn, vpp);
1018 if (rv)
1019 goto out;
1020
1021 makedir(rnd, cnp, rn);
1022
1023 KASSERT(linklen < MAXPATHLEN);
1024 rn->rn_linktarg = PNBUF_GET();
1025 rn->rn_linklen = linklen;
1026 strcpy(rn->rn_linktarg, target);
1027
1028 out:
1029 vput(dvp);
1030 return rv;
1031 }
1032
1033 static int
1034 rump_vop_readlink(void *v)
1035 {
1036 struct vop_readlink_args /* {
1037 struct vnode *a_vp;
1038 struct uio *a_uio;
1039 kauth_cred_t a_cred;
1040 }; */ *ap = v;
1041 struct vnode *vp = ap->a_vp;
1042 struct rumpfs_node *rn = vp->v_data;
1043 struct uio *uio = ap->a_uio;
1044
1045 return uiomove(rn->rn_linktarg, rn->rn_linklen, uio);
1046 }
1047
1048 static int
1049 rump_vop_whiteout(void *v)
1050 {
1051 struct vop_whiteout_args /* {
1052 struct vnode *a_dvp;
1053 struct componentname *a_cnp;
1054 int a_flags;
1055 } */ *ap = v;
1056 struct vnode *dvp = ap->a_dvp;
1057 struct rumpfs_node *rnd = dvp->v_data;
1058 struct componentname *cnp = ap->a_cnp;
1059 int flags = ap->a_flags;
1060
1061 switch (flags) {
1062 case LOOKUP:
1063 break;
1064 case CREATE:
1065 makedir(rnd, cnp, RUMPFS_WHITEOUT);
1066 break;
1067 case DELETE:
1068 cnp->cn_flags &= ~DOWHITEOUT; /* cargo culting never fails ? */
1069 freedir(rnd, cnp);
1070 break;
1071 default:
1072 panic("unknown whiteout op %d", flags);
1073 }
1074
1075 return 0;
1076 }
1077
1078 static int
1079 rump_vop_open(void *v)
1080 {
1081 struct vop_open_args /* {
1082 struct vnode *a_vp;
1083 int a_mode;
1084 kauth_cred_t a_cred;
1085 } */ *ap = v;
1086 struct vnode *vp = ap->a_vp;
1087 struct rumpfs_node *rn = vp->v_data;
1088 int mode = ap->a_mode;
1089 int error = EINVAL;
1090
1091 if (vp->v_type != VREG || (rn->rn_flags & RUMPNODE_ET_PHONE_HOST) == 0)
1092 return 0;
1093
1094 if (mode & FREAD) {
1095 if (rn->rn_readfd != -1)
1096 return 0;
1097 rn->rn_readfd = rumpuser_open(rn->rn_hostpath,
1098 O_RDONLY, &error);
1099 }
1100
1101 if (mode & FWRITE) {
1102 if (rn->rn_writefd != -1)
1103 return 0;
1104 rn->rn_writefd = rumpuser_open(rn->rn_hostpath,
1105 O_WRONLY, &error);
1106 }
1107
1108 return error;
1109 }
1110
1111 /* simple readdir. event omits dotstuff and periods */
1112 static int
1113 rump_vop_readdir(void *v)
1114 {
1115 struct vop_readdir_args /* {
1116 struct vnode *a_vp;
1117 struct uio *a_uio;
1118 kauth_cred_t a_cred;
1119 int *a_eofflag;
1120 off_t **a_cookies;
1121 int *a_ncookies;
1122 } */ *ap = v;
1123 struct vnode *vp = ap->a_vp;
1124 struct uio *uio = ap->a_uio;
1125 struct rumpfs_node *rnd = vp->v_data;
1126 struct rumpfs_dent *rdent;
1127 unsigned i;
1128 int rv = 0;
1129
1130 /* seek to current entry */
1131 for (i = 0, rdent = LIST_FIRST(&rnd->rn_dir);
1132 (i < uio->uio_offset) && rdent;
1133 i++, rdent = LIST_NEXT(rdent, rd_entries))
1134 continue;
1135 if (!rdent)
1136 goto out;
1137
1138 /* copy entries */
1139 for (; rdent && uio->uio_resid > 0;
1140 rdent = LIST_NEXT(rdent, rd_entries), i++) {
1141 struct dirent dent;
1142
1143 strlcpy(dent.d_name, rdent->rd_name, sizeof(dent.d_name));
1144 dent.d_namlen = strlen(dent.d_name);
1145 dent.d_reclen = _DIRENT_RECLEN(&dent, dent.d_namlen);
1146
1147 if (__predict_false(RDENT_ISWHITEOUT(rdent))) {
1148 dent.d_fileno = INO_WHITEOUT;
1149 dent.d_type = DT_WHT;
1150 } else {
1151 dent.d_fileno = rdent->rd_node->rn_va.va_fileid;
1152 dent.d_type = vtype2dt(rdent->rd_node->rn_va.va_type);
1153 }
1154
1155 if (uio->uio_resid < dent.d_reclen) {
1156 i--;
1157 break;
1158 }
1159
1160 rv = uiomove(&dent, dent.d_reclen, uio);
1161 if (rv) {
1162 i--;
1163 break;
1164 }
1165 }
1166
1167 out:
1168 if (ap->a_cookies) {
1169 *ap->a_ncookies = 0;
1170 *ap->a_cookies = NULL;
1171 }
1172 if (rdent)
1173 *ap->a_eofflag = 0;
1174 else
1175 *ap->a_eofflag = 1;
1176 uio->uio_offset = i;
1177
1178 return rv;
1179 }
1180
1181 static int
1182 etread(struct rumpfs_node *rn, struct uio *uio)
1183 {
1184 uint8_t *buf;
1185 size_t bufsize;
1186 ssize_t n;
1187 int error = 0;
1188
1189 bufsize = uio->uio_resid;
1190 buf = kmem_alloc(bufsize, KM_SLEEP);
1191 if ((n = rumpuser_pread(rn->rn_readfd, buf, bufsize,
1192 uio->uio_offset + rn->rn_offset, &error)) == -1)
1193 goto out;
1194 KASSERT(n <= bufsize);
1195 error = uiomove(buf, n, uio);
1196
1197 out:
1198 kmem_free(buf, bufsize);
1199 return error;
1200
1201 }
1202
1203 static int
1204 rump_vop_read(void *v)
1205 {
1206 struct vop_read_args /* {
1207 struct vnode *a_vp;
1208 struct uio *a_uio;
1209 int ioflags a_ioflag;
1210 kauth_cred_t a_cred;
1211 }; */ *ap = v;
1212 struct vnode *vp = ap->a_vp;
1213 struct rumpfs_node *rn = vp->v_data;
1214 struct uio *uio = ap->a_uio;
1215 const int advice = IO_ADV_DECODE(ap->a_ioflag);
1216 off_t chunk;
1217 int error = 0;
1218
1219 /* et op? */
1220 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1221 return etread(rn, uio);
1222
1223 /* otherwise, it's off to ubc with us */
1224 while (uio->uio_resid > 0) {
1225 chunk = MIN(uio->uio_resid, (off_t)rn->rn_dlen-uio->uio_offset);
1226 if (chunk == 0)
1227 break;
1228 error = ubc_uiomove(&vp->v_uobj, uio, chunk, advice,
1229 UBC_READ | UBC_PARTIALOK | UBC_WANT_UNMAP(vp)?UBC_UNMAP:0);
1230 if (error)
1231 break;
1232 }
1233
1234 return error;
1235 }
1236
1237 static int
1238 etwrite(struct rumpfs_node *rn, struct uio *uio)
1239 {
1240 uint8_t *buf;
1241 size_t bufsize;
1242 ssize_t n;
1243 int error = 0;
1244
1245 bufsize = uio->uio_resid;
1246 buf = kmem_alloc(bufsize, KM_SLEEP);
1247 error = uiomove(buf, bufsize, uio);
1248 if (error)
1249 goto out;
1250 KASSERT(uio->uio_resid == 0);
1251 n = rumpuser_pwrite(rn->rn_writefd, buf, bufsize,
1252 (uio->uio_offset-bufsize) + rn->rn_offset, &error);
1253 if (n >= 0) {
1254 KASSERT(n <= bufsize);
1255 uio->uio_resid = bufsize - n;
1256 }
1257
1258 out:
1259 kmem_free(buf, bufsize);
1260 return error;
1261 }
1262
1263 static int
1264 rump_vop_write(void *v)
1265 {
1266 struct vop_read_args /* {
1267 struct vnode *a_vp;
1268 struct uio *a_uio;
1269 int ioflags a_ioflag;
1270 kauth_cred_t a_cred;
1271 }; */ *ap = v;
1272 struct vnode *vp = ap->a_vp;
1273 struct rumpfs_node *rn = vp->v_data;
1274 struct uio *uio = ap->a_uio;
1275 const int advice = IO_ADV_DECODE(ap->a_ioflag);
1276 void *olddata;
1277 size_t oldlen, newlen;
1278 off_t chunk;
1279 int error = 0;
1280 bool allocd = false;
1281
1282 if (ap->a_ioflag & IO_APPEND)
1283 uio->uio_offset = vp->v_size;
1284
1285 /* consult et? */
1286 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1287 return etwrite(rn, uio);
1288
1289 /*
1290 * Otherwise, it's a case of ubcmove.
1291 */
1292
1293 /*
1294 * First, make sure we have enough storage.
1295 *
1296 * No, you don't need to tell me it's not very efficient.
1297 * No, it doesn't really support sparse files, just fakes it.
1298 */
1299 newlen = uio->uio_offset + uio->uio_resid;
1300 oldlen = 0; /* XXXgcc */
1301 olddata = NULL;
1302 if (rn->rn_dlen < newlen) {
1303 oldlen = rn->rn_dlen;
1304 olddata = rn->rn_data;
1305
1306 rn->rn_data = rump_hypermalloc(newlen, 0, true, "rumpfs");
1307 rn->rn_dlen = newlen;
1308 memset(rn->rn_data, 0, newlen);
1309 memcpy(rn->rn_data, olddata, oldlen);
1310 allocd = true;
1311 uvm_vnp_setsize(vp, newlen);
1312 }
1313
1314 /* ok, we have enough stooorage. write */
1315 while (uio->uio_resid > 0) {
1316 chunk = MIN(uio->uio_resid, (off_t)rn->rn_dlen-uio->uio_offset);
1317 if (chunk == 0)
1318 break;
1319 error = ubc_uiomove(&vp->v_uobj, uio, chunk, advice,
1320 UBC_WRITE | UBC_PARTIALOK | UBC_WANT_UNMAP(vp)?UBC_UNMAP:0);
1321 if (error)
1322 break;
1323 }
1324
1325 if (allocd) {
1326 if (error) {
1327 rump_hyperfree(rn->rn_data, newlen);
1328 rn->rn_data = olddata;
1329 rn->rn_dlen = oldlen;
1330 uvm_vnp_setsize(vp, oldlen);
1331 } else {
1332 rump_hyperfree(olddata, oldlen);
1333 }
1334 }
1335
1336 return error;
1337 }
1338
1339 static int
1340 rump_vop_bmap(void *v)
1341 {
1342 struct vop_bmap_args /* {
1343 struct vnode *a_vp;
1344 daddr_t a_bn;
1345 struct vnode **a_vpp;
1346 daddr_t *a_bnp;
1347 int *a_runp;
1348 } */ *ap = v;
1349
1350 /* 1:1 mapping */
1351 if (ap->a_vpp)
1352 *ap->a_vpp = ap->a_vp;
1353 if (ap->a_bnp)
1354 *ap->a_bnp = ap->a_bn;
1355 if (ap->a_runp)
1356 *ap->a_runp = 16;
1357
1358 return 0;
1359 }
1360
1361 static int
1362 rump_vop_strategy(void *v)
1363 {
1364 struct vop_strategy_args /* {
1365 struct vnode *a_vp;
1366 struct buf *a_bp;
1367 } */ *ap = v;
1368 struct vnode *vp = ap->a_vp;
1369 struct rumpfs_node *rn = vp->v_data;
1370 struct buf *bp = ap->a_bp;
1371 off_t copylen, copyoff;
1372 int error;
1373
1374 if (vp->v_type != VREG || rn->rn_flags & RUMPNODE_ET_PHONE_HOST) {
1375 error = EINVAL;
1376 goto out;
1377 }
1378
1379 copyoff = bp->b_blkno << DEV_BSHIFT;
1380 copylen = MIN(rn->rn_dlen - copyoff, bp->b_bcount);
1381 if (BUF_ISWRITE(bp)) {
1382 memcpy((uint8_t *)rn->rn_data + copyoff, bp->b_data, copylen);
1383 } else {
1384 memset((uint8_t*)bp->b_data + copylen, 0, bp->b_bcount-copylen);
1385 memcpy(bp->b_data, (uint8_t *)rn->rn_data + copyoff, copylen);
1386 }
1387 bp->b_resid = 0;
1388 error = 0;
1389
1390 out:
1391 bp->b_error = error;
1392 biodone(bp);
1393 return 0;
1394 }
1395
1396 static int
1397 rump_vop_pathconf(void *v)
1398 {
1399 struct vop_pathconf_args /* {
1400 struct vnode *a_vp;
1401 int a_name;
1402 register_t *a_retval;
1403 }; */ *ap = v;
1404 int name = ap->a_name;
1405 register_t *retval = ap->a_retval;
1406
1407 switch (name) {
1408 case _PC_LINK_MAX:
1409 *retval = LINK_MAX;
1410 return 0;
1411 case _PC_NAME_MAX:
1412 *retval = NAME_MAX;
1413 return 0;
1414 case _PC_PATH_MAX:
1415 *retval = PATH_MAX;
1416 return 0;
1417 case _PC_PIPE_BUF:
1418 *retval = PIPE_BUF;
1419 return 0;
1420 case _PC_CHOWN_RESTRICTED:
1421 *retval = 1;
1422 return 0;
1423 case _PC_NO_TRUNC:
1424 *retval = 1;
1425 return 0;
1426 case _PC_SYNC_IO:
1427 *retval = 1;
1428 return 0;
1429 case _PC_FILESIZEBITS:
1430 *retval = 43; /* this one goes to 11 */
1431 return 0;
1432 case _PC_SYMLINK_MAX:
1433 *retval = MAXPATHLEN;
1434 return 0;
1435 case _PC_2_SYMLINKS:
1436 *retval = 1;
1437 return 0;
1438 default:
1439 return EINVAL;
1440 }
1441 }
1442
1443 static int
1444 rump_vop_success(void *v)
1445 {
1446
1447 return 0;
1448 }
1449
1450 static int
1451 rump_vop_inactive(void *v)
1452 {
1453 struct vop_inactive_args /* {
1454 struct vnode *a_vp;
1455 bool *a_recycle;
1456 } */ *ap = v;
1457 struct vnode *vp = ap->a_vp;
1458 struct rumpfs_node *rn = vp->v_data;
1459 int error;
1460
1461 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST && vp->v_type == VREG) {
1462 if (rn->rn_readfd != -1) {
1463 rumpuser_close(rn->rn_readfd, &error);
1464 rn->rn_readfd = -1;
1465 }
1466 if (rn->rn_writefd != -1) {
1467 rumpuser_close(rn->rn_writefd, &error);
1468 rn->rn_writefd = -1;
1469 }
1470 }
1471 *ap->a_recycle = (rn->rn_flags & RUMPNODE_CANRECLAIM) ? true : false;
1472
1473 VOP_UNLOCK(vp);
1474 return 0;
1475 }
1476
1477 static int
1478 rump_vop_reclaim(void *v)
1479 {
1480 struct vop_reclaim_args /* {
1481 struct vnode *a_vp;
1482 } */ *ap = v;
1483 struct vnode *vp = ap->a_vp;
1484 struct rumpfs_node *rn = vp->v_data;
1485
1486 mutex_enter(&reclock);
1487 rn->rn_vp = NULL;
1488 mutex_exit(&reclock);
1489 genfs_node_destroy(vp);
1490 vp->v_data = NULL;
1491
1492 if (rn->rn_flags & RUMPNODE_CANRECLAIM) {
1493 if (vp->v_type == VLNK)
1494 PNBUF_PUT(rn->rn_linktarg);
1495 if (rn->rn_hostpath)
1496 free(rn->rn_hostpath, M_TEMP);
1497 kmem_free(rn, sizeof(*rn));
1498 }
1499
1500 return 0;
1501 }
1502
1503 static int
1504 rump_vop_spec(void *v)
1505 {
1506 struct vop_generic_args *ap = v;
1507 int (**opvec)(void *);
1508
1509 switch (ap->a_desc->vdesc_offset) {
1510 case VOP_ACCESS_DESCOFFSET:
1511 case VOP_GETATTR_DESCOFFSET:
1512 case VOP_SETATTR_DESCOFFSET:
1513 case VOP_LOCK_DESCOFFSET:
1514 case VOP_UNLOCK_DESCOFFSET:
1515 case VOP_ISLOCKED_DESCOFFSET:
1516 case VOP_RECLAIM_DESCOFFSET:
1517 opvec = rump_vnodeop_p;
1518 break;
1519 default:
1520 opvec = spec_vnodeop_p;
1521 break;
1522 }
1523
1524 return VOCALL(opvec, ap->a_desc->vdesc_offset, v);
1525 }
1526
1527 static int
1528 rump_vop_advlock(void *v)
1529 {
1530 struct vop_advlock_args /* {
1531 const struct vnodeop_desc *a_desc;
1532 struct vnode *a_vp;
1533 void *a_id;
1534 int a_op;
1535 struct flock *a_fl;
1536 int a_flags;
1537 } */ *ap = v;
1538 struct vnode *vp = ap->a_vp;
1539 struct rumpfs_node *rn = vp->v_data;
1540
1541 return lf_advlock(ap, &rn->rn_lockf, vp->v_size);
1542 }
1543
1544 /*
1545 * Begin vfs-level stuff
1546 */
1547
1548 VFS_PROTOS(rumpfs);
1549 struct vfsops rumpfs_vfsops = {
1550 .vfs_name = MOUNT_RUMPFS,
1551 .vfs_min_mount_data = 0,
1552 .vfs_mount = rumpfs_mount,
1553 .vfs_start = (void *)nullop,
1554 .vfs_unmount = rumpfs_unmount,
1555 .vfs_root = rumpfs_root,
1556 .vfs_quotactl = (void *)eopnotsupp,
1557 .vfs_statvfs = genfs_statvfs,
1558 .vfs_sync = (void *)nullop,
1559 .vfs_vget = rumpfs_vget,
1560 .vfs_fhtovp = (void *)eopnotsupp,
1561 .vfs_vptofh = (void *)eopnotsupp,
1562 .vfs_init = rumpfs_init,
1563 .vfs_reinit = NULL,
1564 .vfs_done = rumpfs_done,
1565 .vfs_mountroot = rumpfs_mountroot,
1566 .vfs_snapshot = (void *)eopnotsupp,
1567 .vfs_extattrctl = (void *)eopnotsupp,
1568 .vfs_suspendctl = (void *)eopnotsupp,
1569 .vfs_renamelock_enter = genfs_renamelock_enter,
1570 .vfs_renamelock_exit = genfs_renamelock_exit,
1571 .vfs_opv_descs = rump_opv_descs,
1572 /* vfs_refcount */
1573 /* vfs_list */
1574 };
1575
1576 static int
1577 rumpfs_mountfs(struct mount *mp)
1578 {
1579 struct rumpfs_mount *rfsmp;
1580 struct rumpfs_node *rn;
1581 int error;
1582
1583 rfsmp = kmem_alloc(sizeof(*rfsmp), KM_SLEEP);
1584
1585 rn = makeprivate(VDIR, NODEV, DEV_BSIZE, false);
1586 rn->rn_parent = rn;
1587 if ((error = makevnode(mp, rn, &rfsmp->rfsmp_rvp)) != 0)
1588 return error;
1589
1590 rfsmp->rfsmp_rvp->v_vflag |= VV_ROOT;
1591 VOP_UNLOCK(rfsmp->rfsmp_rvp);
1592
1593 mp->mnt_data = rfsmp;
1594 mp->mnt_stat.f_namemax = MAXNAMLEN;
1595 mp->mnt_stat.f_iosize = 512;
1596 mp->mnt_flag |= MNT_LOCAL;
1597 mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO;
1598 mp->mnt_fs_bshift = DEV_BSHIFT;
1599 vfs_getnewfsid(mp);
1600
1601 return 0;
1602 }
1603
1604 int
1605 rumpfs_mount(struct mount *mp, const char *mntpath, void *arg, size_t *alen)
1606 {
1607 int error;
1608
1609 if (mp->mnt_flag & MNT_UPDATE) {
1610 return 0;
1611 }
1612
1613 error = set_statvfs_info(mntpath, UIO_USERSPACE, "rumpfs", UIO_SYSSPACE,
1614 mp->mnt_op->vfs_name, mp, curlwp);
1615 if (error)
1616 return error;
1617
1618 return rumpfs_mountfs(mp);
1619 }
1620
1621 int
1622 rumpfs_unmount(struct mount *mp, int mntflags)
1623 {
1624 struct rumpfs_mount *rfsmp = mp->mnt_data;
1625 int flags = 0, error;
1626
1627 if (panicstr || mntflags & MNT_FORCE)
1628 flags |= FORCECLOSE;
1629
1630 if ((error = vflush(mp, rfsmp->rfsmp_rvp, flags)) != 0)
1631 return error;
1632 vgone(rfsmp->rfsmp_rvp); /* XXX */
1633
1634 kmem_free(rfsmp, sizeof(*rfsmp));
1635
1636 return 0;
1637 }
1638
1639 int
1640 rumpfs_root(struct mount *mp, struct vnode **vpp)
1641 {
1642 struct rumpfs_mount *rfsmp = mp->mnt_data;
1643
1644 vref(rfsmp->rfsmp_rvp);
1645 vn_lock(rfsmp->rfsmp_rvp, LK_EXCLUSIVE | LK_RETRY);
1646 *vpp = rfsmp->rfsmp_rvp;
1647 return 0;
1648 }
1649
1650 int
1651 rumpfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1652 {
1653
1654 return EOPNOTSUPP;
1655 }
1656
1657 void
1658 rumpfs_init()
1659 {
1660
1661 CTASSERT(RUMP_ETFS_SIZE_ENDOFF == RUMPBLK_SIZENOTSET);
1662
1663 mutex_init(&reclock, MUTEX_DEFAULT, IPL_NONE);
1664 mutex_init(&etfs_lock, MUTEX_DEFAULT, IPL_NONE);
1665 }
1666
1667 void
1668 rumpfs_done()
1669 {
1670
1671 mutex_destroy(&reclock);
1672 mutex_destroy(&etfs_lock);
1673 }
1674
1675 int
1676 rumpfs_mountroot()
1677 {
1678 struct mount *mp;
1679 int error;
1680
1681 if ((error = vfs_rootmountalloc(MOUNT_RUMPFS, "rootdev", &mp)) != 0) {
1682 vrele(rootvp);
1683 return error;
1684 }
1685
1686 if ((error = rumpfs_mountfs(mp)) != 0)
1687 panic("mounting rootfs failed: %d", error);
1688
1689 mutex_enter(&mountlist_lock);
1690 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
1691 mutex_exit(&mountlist_lock);
1692
1693 error = set_statvfs_info("/", UIO_SYSSPACE, "rumpfs", UIO_SYSSPACE,
1694 mp->mnt_op->vfs_name, mp, curlwp);
1695 if (error)
1696 panic("set_statvfs_info failed for rootfs: %d", error);
1697
1698 mp->mnt_flag &= ~MNT_RDONLY;
1699 vfs_unbusy(mp, false, NULL);
1700
1701 return 0;
1702 }
1703