rumpfs.c revision 1.164 1 /* $NetBSD: rumpfs.c,v 1.164 2021/06/29 22:38:10 dholland Exp $ */
2
3 /*
4 * Copyright (c) 2009, 2010, 2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: rumpfs.c,v 1.164 2021/06/29 22:38:10 dholland Exp $");
30
31 #include <sys/param.h>
32 #include <sys/atomic.h>
33 #include <sys/buf.h>
34 #include <sys/dirent.h>
35 #include <sys/errno.h>
36 #include <sys/filedesc.h>
37 #include <sys/fcntl.h>
38 #include <sys/kauth.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/lock.h>
44 #include <sys/lockf.h>
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 #include <sys/syscallargs.h>
48 #include <sys/vnode.h>
49 #include <sys/fstrans.h>
50 #include <sys/unistd.h>
51
52 #include <miscfs/specfs/specdev.h>
53 #include <miscfs/genfs/genfs.h>
54 #include <miscfs/genfs/genfs_node.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <rump-sys/kern.h>
59 #include <rump-sys/vfs.h>
60
61 #include <rump/rumpfs.h>
62 #include <rump/rumpuser.h>
63
64 static int rump_vop_parsepath(void *);
65 static int rump_vop_lookup(void *);
66 static int rump_vop_getattr(void *);
67 static int rump_vop_setattr(void *);
68 static int rump_vop_mkdir(void *);
69 static int rump_vop_rmdir(void *);
70 static int rump_vop_remove(void *);
71 static int rump_vop_mknod(void *);
72 static int rump_vop_create(void *);
73 static int rump_vop_inactive(void *);
74 static int rump_vop_reclaim(void *);
75 static int rump_vop_success(void *);
76 static int rump_vop_readdir(void *);
77 static int rump_vop_spec(void *);
78 static int rump_vop_read(void *);
79 static int rump_vop_write(void *);
80 static int rump_vop_open(void *);
81 static int rump_vop_symlink(void *);
82 static int rump_vop_readlink(void *);
83 static int rump_vop_whiteout(void *);
84 static int rump_vop_pathconf(void *);
85 static int rump_vop_bmap(void *);
86 static int rump_vop_strategy(void *);
87 static int rump_vop_advlock(void *);
88 static int rump_vop_access(void *);
89 static int rump_vop_fcntl(void *);
90
91 int (**rump_vnodeop_p)(void *);
92 const struct vnodeopv_entry_desc rump_vnodeop_entries[] = {
93 { &vop_default_desc, vn_default_error },
94 { &vop_parsepath_desc, rump_vop_parsepath },
95 { &vop_lookup_desc, rump_vop_lookup },
96 { &vop_getattr_desc, rump_vop_getattr },
97 { &vop_setattr_desc, rump_vop_setattr },
98 { &vop_mkdir_desc, rump_vop_mkdir },
99 { &vop_rmdir_desc, rump_vop_rmdir },
100 { &vop_remove_desc, rump_vop_remove },
101 { &vop_mknod_desc, rump_vop_mknod },
102 { &vop_create_desc, rump_vop_create },
103 { &vop_symlink_desc, rump_vop_symlink },
104 { &vop_readlink_desc, rump_vop_readlink },
105 { &vop_access_desc, rump_vop_access },
106 { &vop_accessx_desc, genfs_accessx },
107 { &vop_readdir_desc, rump_vop_readdir },
108 { &vop_read_desc, rump_vop_read },
109 { &vop_write_desc, rump_vop_write },
110 { &vop_open_desc, rump_vop_open },
111 { &vop_close_desc, genfs_nullop },
112 { &vop_seek_desc, genfs_seek },
113 { &vop_getpages_desc, genfs_getpages },
114 { &vop_putpages_desc, genfs_putpages },
115 { &vop_whiteout_desc, rump_vop_whiteout },
116 { &vop_fsync_desc, rump_vop_success },
117 { &vop_lock_desc, genfs_lock },
118 { &vop_unlock_desc, genfs_unlock },
119 { &vop_islocked_desc, genfs_islocked },
120 { &vop_inactive_desc, rump_vop_inactive },
121 { &vop_reclaim_desc, rump_vop_reclaim },
122 { &vop_link_desc, genfs_eopnotsupp },
123 { &vop_pathconf_desc, rump_vop_pathconf },
124 { &vop_bmap_desc, rump_vop_bmap },
125 { &vop_strategy_desc, rump_vop_strategy },
126 { &vop_advlock_desc, rump_vop_advlock },
127 { &vop_fcntl_desc, rump_vop_fcntl },
128 { NULL, NULL }
129 };
130 const struct vnodeopv_desc rump_vnodeop_opv_desc =
131 { &rump_vnodeop_p, rump_vnodeop_entries };
132
133 int (**rump_specop_p)(void *);
134 const struct vnodeopv_entry_desc rump_specop_entries[] = {
135 { &vop_default_desc, rump_vop_spec },
136 { NULL, NULL }
137 };
138 const struct vnodeopv_desc rump_specop_opv_desc =
139 { &rump_specop_p, rump_specop_entries };
140
141 const struct vnodeopv_desc * const rump_opv_descs[] = {
142 &rump_vnodeop_opv_desc,
143 &rump_specop_opv_desc,
144 NULL
145 };
146
147 #define RUMPFS_WHITEOUT ((void *)-1)
148 #define RDENT_ISWHITEOUT(rdp) (rdp->rd_node == RUMPFS_WHITEOUT)
149 struct rumpfs_dent {
150 char *rd_name;
151 int rd_namelen;
152 struct rumpfs_node *rd_node;
153
154 LIST_ENTRY(rumpfs_dent) rd_entries;
155 };
156
157 struct genfs_ops rumpfs_genfsops = {
158 .gop_size = genfs_size,
159 .gop_write = genfs_gop_write,
160 .gop_putrange = genfs_gop_putrange,
161
162 /* optional */
163 .gop_alloc = NULL,
164 .gop_markupdate = NULL,
165 };
166
167 struct rumpfs_node {
168 struct genfs_node rn_gn;
169 struct vattr rn_va;
170 struct vnode *rn_vp;
171 char *rn_hostpath;
172 int rn_flags;
173 struct lockf *rn_lockf;
174
175 union {
176 struct { /* VREG */
177 int readfd;
178 int writefd;
179 uint64_t offset;
180 } reg;
181 struct {
182 void *data;
183 size_t dlen;
184 } reg_noet;
185 struct { /* VDIR */
186 LIST_HEAD(, rumpfs_dent) dents;
187 struct rumpfs_node *parent;
188 int flags;
189 } dir;
190 struct {
191 char *target;
192 size_t len;
193 } link;
194 } rn_u;
195 };
196 #define rn_readfd rn_u.reg.readfd
197 #define rn_writefd rn_u.reg.writefd
198 #define rn_offset rn_u.reg.offset
199 #define rn_data rn_u.reg_noet.data
200 #define rn_dlen rn_u.reg_noet.dlen
201 #define rn_dir rn_u.dir.dents
202 #define rn_parent rn_u.dir.parent
203 #define rn_linktarg rn_u.link.target
204 #define rn_linklen rn_u.link.len
205
206 #define RUMPNODE_CANRECLAIM 0x01
207 #define RUMPNODE_DIR_ET 0x02
208 #define RUMPNODE_DIR_ETSUBS 0x04
209 #define RUMPNODE_ET_PHONE_HOST 0x10
210 #define RUMPNODE_EXTSTORAGE 0x20
211
212 struct rumpfs_mount {
213 struct vnode *rfsmp_rvp;
214 };
215
216 #define INO_WHITEOUT 1
217 static int lastino = 2;
218 static kmutex_t reclock;
219
220 #define RUMPFS_DEFAULTMODE 0755
221 static void freedir(struct rumpfs_node *, struct componentname *);
222 static struct rumpfs_node *makeprivate(enum vtype, mode_t, dev_t, off_t, bool);
223 static void freeprivate(struct rumpfs_node *);
224
225 /*
226 * Extra Terrestrial stuff. We map a given key (pathname) to a file on
227 * the host FS. ET phones home only from the root node of rumpfs.
228 *
229 * When an etfs node is removed, a vnode potentially behind it is not
230 * immediately recycled.
231 */
232
233 struct etfs {
234 char et_key[MAXPATHLEN];
235 size_t et_keylen;
236 bool et_prefixkey;
237 bool et_removing;
238 devminor_t et_blkmin;
239
240 LIST_ENTRY(etfs) et_entries;
241
242 struct rumpfs_node *et_rn;
243 };
244 static kmutex_t etfs_lock;
245 static LIST_HEAD(, etfs) etfs_list = LIST_HEAD_INITIALIZER(etfs_list);
246
247 static enum vtype
248 ettype_to_vtype(enum rump_etfs_type et)
249 {
250 enum vtype vt;
251
252 switch (et) {
253 case RUMP_ETFS_REG:
254 vt = VREG;
255 break;
256 case RUMP_ETFS_BLK:
257 vt = VBLK;
258 break;
259 case RUMP_ETFS_CHR:
260 vt = VCHR;
261 break;
262 case RUMP_ETFS_DIR:
263 vt = VDIR;
264 break;
265 case RUMP_ETFS_DIR_SUBDIRS:
266 vt = VDIR;
267 break;
268 default:
269 panic("invalid et type: %d", et);
270 }
271
272 return vt;
273 }
274
275 static enum vtype
276 hft_to_vtype(int hft)
277 {
278 enum vtype vt;
279
280 switch (hft) {
281 case RUMPUSER_FT_OTHER:
282 vt = VNON;
283 break;
284 case RUMPUSER_FT_DIR:
285 vt = VDIR;
286 break;
287 case RUMPUSER_FT_REG:
288 vt = VREG;
289 break;
290 case RUMPUSER_FT_BLK:
291 vt = VBLK;
292 break;
293 case RUMPUSER_FT_CHR:
294 vt = VCHR;
295 break;
296 default:
297 vt = VNON;
298 break;
299 }
300
301 return vt;
302 }
303
304 static bool
305 etfs_find(const char *key, struct etfs **etp, bool forceprefix)
306 {
307 struct etfs *et;
308 size_t keylen = strlen(key);
309
310 KASSERT(mutex_owned(&etfs_lock));
311
312 LIST_FOREACH(et, &etfs_list, et_entries) {
313 if ((keylen == et->et_keylen || et->et_prefixkey || forceprefix)
314 && strncmp(key, et->et_key, et->et_keylen) == 0) {
315 if (etp)
316 *etp = et;
317 return true;
318 }
319 }
320
321 return false;
322 }
323
324 #define REGDIR(ftype) \
325 ((ftype) == RUMP_ETFS_DIR || (ftype) == RUMP_ETFS_DIR_SUBDIRS)
326 static int
327 etfsregister(const char *key, const char *hostpath,
328 enum rump_etfs_type ftype, uint64_t begin, uint64_t size)
329 {
330 char buf[9];
331 struct etfs *et;
332 struct rumpfs_node *rn;
333 uint64_t fsize;
334 dev_t rdev = NODEV;
335 devminor_t dmin = -1;
336 int hft, error;
337
338 if (key[0] != '/') {
339 return EINVAL;
340 }
341 while (key[0] == '/') {
342 key++;
343 }
344
345 if ((error = rumpuser_getfileinfo(hostpath, &fsize, &hft)) != 0)
346 return error;
347
348 /* etfs directory requires a directory on the host */
349 if (REGDIR(ftype)) {
350 if (hft != RUMPUSER_FT_DIR)
351 return ENOTDIR;
352 if (begin != 0)
353 return EISDIR;
354 if (size != RUMP_ETFS_SIZE_ENDOFF)
355 return EISDIR;
356 size = fsize;
357 } else {
358 if (begin > fsize)
359 return EINVAL;
360 if (size == RUMP_ETFS_SIZE_ENDOFF)
361 size = fsize - begin;
362 if (begin + size > fsize)
363 return EINVAL;
364 }
365
366 if (ftype == RUMP_ETFS_BLK || ftype == RUMP_ETFS_CHR) {
367 error = rumpblk_register(hostpath, &dmin, begin, size);
368 if (error != 0) {
369 return error;
370 }
371 rdev = makedev(RUMPBLK_DEVMAJOR, dmin);
372 }
373
374 et = kmem_alloc(sizeof(*et), KM_SLEEP);
375 strcpy(et->et_key, key);
376 et->et_keylen = strlen(et->et_key);
377 et->et_rn = rn = makeprivate(ettype_to_vtype(ftype), RUMPFS_DEFAULTMODE,
378 rdev, size, true);
379 et->et_removing = false;
380 et->et_blkmin = dmin;
381
382 rn->rn_flags |= RUMPNODE_ET_PHONE_HOST;
383
384 if (ftype == RUMP_ETFS_REG || REGDIR(ftype) || et->et_blkmin != -1) {
385 size_t len = strlen(hostpath)+1;
386
387 rn->rn_hostpath = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
388 memcpy(rn->rn_hostpath, hostpath, len);
389 rn->rn_offset = begin;
390 }
391
392 if (REGDIR(ftype)) {
393 rn->rn_flags |= RUMPNODE_DIR_ET;
394 et->et_prefixkey = true;
395 } else {
396 et->et_prefixkey = false;
397 }
398
399 if (ftype == RUMP_ETFS_DIR_SUBDIRS)
400 rn->rn_flags |= RUMPNODE_DIR_ETSUBS;
401
402 mutex_enter(&etfs_lock);
403 if (etfs_find(key, NULL, REGDIR(ftype))) {
404 mutex_exit(&etfs_lock);
405 if (et->et_blkmin != -1)
406 rumpblk_deregister(hostpath);
407 if (et->et_rn->rn_hostpath != NULL)
408 free(et->et_rn->rn_hostpath, M_TEMP);
409 freeprivate(et->et_rn);
410 kmem_free(et, sizeof(*et));
411 return EEXIST;
412 }
413 LIST_INSERT_HEAD(&etfs_list, et, et_entries);
414 mutex_exit(&etfs_lock);
415
416 if (ftype == RUMP_ETFS_BLK) {
417 format_bytes(buf, sizeof(buf), size);
418 aprint_verbose("/%s: hostpath %s (%s)\n", key, hostpath, buf);
419 }
420
421 return 0;
422 }
423 #undef REGDIR
424
425 /* remove etfs mapping. caller's responsibility to make sure it's not in use */
426 static int
427 etfsremove(const char *key)
428 {
429 struct etfs *et;
430 size_t keylen;
431 int rv __diagused;
432
433 if (key[0] != '/') {
434 return EINVAL;
435 }
436 while (key[0] == '/') {
437 key++;
438 }
439
440 keylen = strlen(key);
441
442 mutex_enter(&etfs_lock);
443 LIST_FOREACH(et, &etfs_list, et_entries) {
444 if (keylen == et->et_keylen && strcmp(et->et_key, key) == 0) {
445 if (et->et_removing)
446 et = NULL;
447 else
448 et->et_removing = true;
449 break;
450 }
451 }
452 mutex_exit(&etfs_lock);
453 if (!et)
454 return ENOENT;
455
456 /*
457 * ok, we know what we want to remove and have signalled there
458 * actually are men at work. first, unregister from rumpblk
459 */
460 if (et->et_blkmin != -1) {
461 rv = rumpblk_deregister(et->et_rn->rn_hostpath);
462 } else {
463 rv = 0;
464 }
465 KASSERT(rv == 0);
466
467 /* then do the actual removal */
468 mutex_enter(&etfs_lock);
469 LIST_REMOVE(et, et_entries);
470 mutex_exit(&etfs_lock);
471
472 /* node is unreachable, safe to nuke all device copies */
473 if (et->et_blkmin != -1) {
474 vdevgone(RUMPBLK_DEVMAJOR, et->et_blkmin, et->et_blkmin, VBLK);
475 } else {
476 struct vnode *vp;
477 struct mount *mp;
478 struct rumpfs_node *rn;
479
480 mutex_enter(&reclock);
481 if ((vp = et->et_rn->rn_vp) != NULL) {
482 mp = vp->v_mount;
483 rn = vp->v_data;
484 KASSERT(rn == et->et_rn);
485 } else {
486 mp = NULL;
487 }
488 mutex_exit(&reclock);
489 if (mp && vcache_get(mp, &rn, sizeof(rn), &vp) == 0) {
490 rv = vfs_suspend(mp, 0);
491 KASSERT(rv == 0);
492 vgone(vp);
493 vfs_resume(mp);
494 }
495 }
496
497 if (et->et_rn->rn_hostpath != NULL)
498 free(et->et_rn->rn_hostpath, M_TEMP);
499 freeprivate(et->et_rn);
500 kmem_free(et, sizeof(*et));
501
502 return 0;
503 }
504
505 /*
506 * rumpfs
507 */
508
509 static struct rumpfs_node *
510 makeprivate(enum vtype vt, mode_t mode, dev_t rdev, off_t size, bool et)
511 {
512 struct rumpfs_node *rn;
513 struct vattr *va;
514 struct timespec ts;
515
516 KASSERT((mode & ~ALLPERMS) == 0);
517 rn = kmem_zalloc(sizeof(*rn), KM_SLEEP);
518
519 switch (vt) {
520 case VDIR:
521 LIST_INIT(&rn->rn_dir);
522 break;
523 case VREG:
524 if (et) {
525 rn->rn_readfd = -1;
526 rn->rn_writefd = -1;
527 }
528 break;
529 default:
530 break;
531 }
532
533 nanotime(&ts);
534
535 va = &rn->rn_va;
536 va->va_type = vt;
537 va->va_mode = mode;
538 if (vt == VDIR)
539 va->va_nlink = 2;
540 else
541 va->va_nlink = 1;
542 va->va_uid = 0;
543 va->va_gid = 0;
544 va->va_fsid =
545 va->va_fileid = atomic_inc_uint_nv(&lastino);
546 va->va_size = size;
547 va->va_blocksize = 512;
548 va->va_atime = ts;
549 va->va_mtime = ts;
550 va->va_ctime = ts;
551 va->va_birthtime = ts;
552 va->va_gen = 0;
553 va->va_flags = 0;
554 va->va_rdev = rdev;
555 va->va_bytes = 512;
556 va->va_filerev = 0;
557 va->va_vaflags = 0;
558
559 return rn;
560 }
561
562 static void
563 freeprivate(struct rumpfs_node *rn)
564 {
565
566 kmem_free(rn, sizeof(*rn));
567 }
568
569 static void
570 makedir(struct rumpfs_node *rnd,
571 struct componentname *cnp, struct rumpfs_node *rn)
572 {
573 struct rumpfs_dent *rdent;
574
575 rdent = kmem_alloc(sizeof(*rdent), KM_SLEEP);
576 rdent->rd_name = kmem_alloc(cnp->cn_namelen+1, KM_SLEEP);
577 rdent->rd_node = rn;
578 strlcpy(rdent->rd_name, cnp->cn_nameptr, cnp->cn_namelen+1);
579 rdent->rd_namelen = strlen(rdent->rd_name);
580
581 if ((cnp->cn_flags & ISWHITEOUT) != 0) {
582 KASSERT((cnp->cn_flags & DOWHITEOUT) == 0);
583 freedir(rnd, cnp);
584 }
585 LIST_INSERT_HEAD(&rnd->rn_dir, rdent, rd_entries);
586 }
587
588 static void
589 freedir(struct rumpfs_node *rnd, struct componentname *cnp)
590 {
591 struct rumpfs_dent *rd = NULL;
592
593 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
594 if (rd->rd_namelen == cnp->cn_namelen &&
595 strncmp(rd->rd_name, cnp->cn_nameptr,
596 cnp->cn_namelen) == 0)
597 break;
598 }
599 if (rd == NULL)
600 panic("could not find directory entry: %s", cnp->cn_nameptr);
601
602 if (cnp->cn_flags & DOWHITEOUT) {
603 rd->rd_node = RUMPFS_WHITEOUT;
604 } else {
605 LIST_REMOVE(rd, rd_entries);
606 kmem_free(rd->rd_name, rd->rd_namelen+1);
607 kmem_free(rd, sizeof(*rd));
608 }
609 }
610
611 #define RUMPFS_ACCESS 1
612 #define RUMPFS_MODIFY 2
613 #define RUMPFS_CHANGE 4
614
615 static int
616 rumpfs_update(int flags, struct vnode *vp, const struct timespec *acc,
617 const struct timespec *mod, const struct timespec *chg)
618 {
619 struct rumpfs_node *rn = vp->v_data;
620
621 if (flags == 0)
622 return 0;
623
624 if (vp->v_mount->mnt_flag & MNT_RDONLY)
625 return EROFS;
626
627 if (flags & RUMPFS_ACCESS)
628 rn->rn_va.va_atime = *acc;
629 if (flags & RUMPFS_MODIFY)
630 rn->rn_va.va_mtime = *mod;
631 if (flags & RUMPFS_CHANGE)
632 rn->rn_va.va_ctime = *chg;
633
634 return 0;
635 }
636
637 /*
638 * parsepath for rump file systems - check for etfs entries.
639 */
640 static int
641 rump_vop_parsepath(void *v)
642 {
643 struct vop_parsepath_args /* {
644 struct vnode *a_dvp;
645 const char *a_name;
646 size_t *a_retval;
647 }; */ *ap = v;
648 struct etfs *et;
649 bool found;
650
651 /* check for etfs */
652 if (ap->a_dvp == rootvnode) {
653 mutex_enter(&etfs_lock);
654 found = etfs_find(ap->a_name, &et, false);
655 mutex_exit(&etfs_lock);
656 if (found) {
657 *ap->a_retval = et->et_keylen;
658 return 0;
659 }
660 }
661 return genfs_parsepath(v);
662 }
663
664 /*
665 * Simple lookup for rump file systems.
666 *
667 * uhm, this is twisted. C F C C, hope of C C F C looming
668 */
669 static int
670 rump_vop_lookup(void *v)
671 {
672 struct vop_lookup_v2_args /* {
673 struct vnode *a_dvp;
674 struct vnode **a_vpp;
675 struct componentname *a_cnp;
676 }; */ *ap = v;
677 struct componentname *cnp = ap->a_cnp;
678 struct vnode *dvp = ap->a_dvp;
679 struct vnode **vpp = ap->a_vpp;
680 struct rumpfs_node *rnd = dvp->v_data, *rn;
681 struct rumpfs_dent *rd = NULL;
682 struct etfs *et;
683 bool dotdot = (cnp->cn_flags & ISDOTDOT) != 0;
684 int rv = 0;
685
686 *vpp = NULL;
687
688 rv = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
689 if (rv)
690 return rv;
691
692 if ((cnp->cn_flags & ISLASTCN)
693 && (dvp->v_mount->mnt_flag & MNT_RDONLY)
694 && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
695 return EROFS;
696
697 /* check for dot, return directly if the case */
698 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
699 vref(dvp);
700 *vpp = dvp;
701 return 0;
702 }
703
704 /* we don't do rename */
705 if (!(((cnp->cn_flags & ISLASTCN) == 0) || (cnp->cn_nameiop != RENAME)))
706 return EOPNOTSUPP;
707
708 /* check for etfs */
709 if (dvp == rootvnode &&
710 (cnp->cn_nameiop == LOOKUP || cnp->cn_nameiop == CREATE)) {
711 bool found;
712 mutex_enter(&etfs_lock);
713 found = etfs_find(cnp->cn_nameptr, &et, false);
714 mutex_exit(&etfs_lock);
715
716 if (found) {
717 if (et->et_keylen != cnp->cn_namelen) {
718 /*
719 * This can theoretically happen if an
720 * etfs entry is added or removed
721 * while lookups are being done as we
722 * don't hold etfs_lock across here
723 * and parsepath. Won't ordinarily be
724 * the case. No biggie, just retry.
725 */
726 return ERESTART;
727 }
728 rn = et->et_rn;
729 goto getvnode;
730 }
731 }
732
733 if (rnd->rn_flags & RUMPNODE_DIR_ET) {
734 uint64_t fsize;
735 char *newpath;
736 size_t newpathlen;
737 int hft, error;
738
739 if (dotdot)
740 return EOPNOTSUPP;
741
742 newpathlen = strlen(rnd->rn_hostpath) + 1 + cnp->cn_namelen + 1;
743 newpath = malloc(newpathlen, M_TEMP, M_WAITOK);
744
745 strlcpy(newpath, rnd->rn_hostpath, newpathlen);
746 strlcat(newpath, "/", newpathlen);
747 strlcat(newpath, cnp->cn_nameptr, newpathlen);
748
749 if ((error = rumpuser_getfileinfo(newpath, &fsize, &hft)) != 0){
750 free(newpath, M_TEMP);
751 return error;
752 }
753
754 /* allow only dirs and regular files */
755 if (hft != RUMPUSER_FT_REG && hft != RUMPUSER_FT_DIR) {
756 free(newpath, M_TEMP);
757 return ENOENT;
758 }
759
760 rn = makeprivate(hft_to_vtype(hft), RUMPFS_DEFAULTMODE,
761 NODEV, fsize, true);
762 rn->rn_flags |= RUMPNODE_CANRECLAIM;
763 if (rnd->rn_flags & RUMPNODE_DIR_ETSUBS) {
764 rn->rn_flags |= RUMPNODE_DIR_ET | RUMPNODE_DIR_ETSUBS;
765 rn->rn_flags |= RUMPNODE_ET_PHONE_HOST;
766 }
767 rn->rn_hostpath = newpath;
768
769 goto getvnode;
770 } else {
771 if (dotdot) {
772 if ((rn = rnd->rn_parent) != NULL)
773 goto getvnode;
774 } else {
775 LIST_FOREACH(rd, &rnd->rn_dir, rd_entries) {
776 if (rd->rd_namelen == cnp->cn_namelen &&
777 strncmp(rd->rd_name, cnp->cn_nameptr,
778 cnp->cn_namelen) == 0)
779 break;
780 }
781 }
782 }
783
784 if (!rd && ((cnp->cn_flags & ISLASTCN) == 0||cnp->cn_nameiop != CREATE))
785 return ENOENT;
786
787 if (!rd && (cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
788 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
789 return EROFS;
790 rv = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
791 if (rv)
792 return rv;
793 return EJUSTRETURN;
794 }
795
796 if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == DELETE) {
797 rv = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
798 if (rv)
799 return rv;
800 }
801
802 if (RDENT_ISWHITEOUT(rd)) {
803 cnp->cn_flags |= ISWHITEOUT;
804 if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE)
805 return EJUSTRETURN;
806 return ENOENT;
807 }
808
809 rn = rd->rd_node;
810
811 getvnode:
812 KASSERT(rn);
813 rv = vcache_get(dvp->v_mount, &rn, sizeof(rn), vpp);
814 if (rv) {
815 if (rnd->rn_flags & RUMPNODE_DIR_ET)
816 freeprivate(rn);
817 return rv;
818 }
819
820 return 0;
821 }
822
823 static int
824 rump_check_possible(struct vnode *vp, struct rumpfs_node *rnode,
825 mode_t mode)
826 {
827
828 if ((mode & VWRITE) == 0)
829 return 0;
830
831 switch (vp->v_type) {
832 case VDIR:
833 case VLNK:
834 case VREG:
835 break;
836 default:
837 /* special file is always writable. */
838 return 0;
839 }
840
841 return vp->v_mount->mnt_flag & MNT_RDONLY ? EROFS : 0;
842 }
843
844 static int
845 rump_check_permitted(struct vnode *vp, struct rumpfs_node *rnode,
846 accmode_t accmode, kauth_cred_t cred)
847 {
848 struct vattr *attr = &rnode->rn_va;
849
850 return kauth_authorize_vnode(cred, KAUTH_ACCESS_ACTION(accmode,
851 vp->v_type, attr->va_mode), vp, NULL, genfs_can_access(vp, cred,
852 attr->va_uid, attr->va_gid, attr->va_mode, NULL, accmode));
853 }
854
855 int
856 rump_vop_access(void *v)
857 {
858 struct vop_access_args /* {
859 const struct vnodeop_desc *a_desc;
860 struct vnode *a_vp;
861 int a_mode;
862 kauth_cred_t a_cred;
863 } */ *ap = v;
864 struct vnode *vp = ap->a_vp;
865 struct rumpfs_node *rn = vp->v_data;
866 int error;
867
868 error = rump_check_possible(vp, rn, ap->a_accmode);
869 if (error)
870 return error;
871
872 error = rump_check_permitted(vp, rn, ap->a_accmode, ap->a_cred);
873
874 return error;
875 }
876
877 static int
878 rump_vop_getattr(void *v)
879 {
880 struct vop_getattr_args /* {
881 struct vnode *a_vp;
882 struct vattr *a_vap;
883 kauth_cred_t a_cred;
884 } */ *ap = v;
885 struct vnode *vp = ap->a_vp;
886 struct rumpfs_node *rn = vp->v_data;
887 struct vattr *vap = ap->a_vap;
888
889 memcpy(vap, &rn->rn_va, sizeof(struct vattr));
890 vap->va_size = vp->v_size;
891 return 0;
892 }
893
894 static int
895 rump_vop_setattr(void *v)
896 {
897 struct vop_setattr_args /* {
898 struct vnode *a_vp;
899 struct vattr *a_vap;
900 kauth_cred_t a_cred;
901 } */ *ap = v;
902 struct vnode *vp = ap->a_vp;
903 struct vattr *vap = ap->a_vap;
904 struct rumpfs_node *rn = vp->v_data;
905 struct vattr *attr = &rn->rn_va;
906 struct timespec now;
907 kauth_cred_t cred = ap->a_cred;
908 int error;
909
910 #define CHANGED(a, t) (vap->a != (t)VNOVAL)
911 #define SETIFVAL(a,t) if (CHANGED(a, t)) rn->rn_va.a = vap->a
912 if (CHANGED(va_atime.tv_sec, time_t) ||
913 CHANGED(va_ctime.tv_sec, time_t) ||
914 CHANGED(va_mtime.tv_sec, time_t) ||
915 CHANGED(va_birthtime.tv_sec, time_t) ||
916 CHANGED(va_atime.tv_nsec, long) ||
917 CHANGED(va_ctime.tv_nsec, long) ||
918 CHANGED(va_mtime.tv_nsec, long) ||
919 CHANGED(va_birthtime.tv_nsec, long)) {
920 error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_TIMES, vp,
921 NULL, genfs_can_chtimes(vp, cred, attr->va_uid,
922 vap->va_vaflags));
923 if (error)
924 return error;
925 }
926
927 int flags = 0;
928 getnanotime(&now);
929 if (vap->va_atime.tv_sec != VNOVAL)
930 if (!(vp->v_mount->mnt_flag & MNT_NOATIME))
931 flags |= RUMPFS_ACCESS;
932 if (vap->va_mtime.tv_sec != VNOVAL) {
933 flags |= RUMPFS_CHANGE | RUMPFS_MODIFY;
934 if (vp->v_mount->mnt_flag & MNT_RELATIME)
935 flags |= RUMPFS_ACCESS;
936 } else if (vap->va_size == 0) {
937 flags |= RUMPFS_MODIFY;
938 vap->va_mtime = now;
939 }
940 SETIFVAL(va_birthtime.tv_sec, time_t);
941 SETIFVAL(va_birthtime.tv_nsec, long);
942 flags |= RUMPFS_CHANGE;
943 error = rumpfs_update(flags, vp, &vap->va_atime, &vap->va_mtime, &now);
944 if (error)
945 return error;
946
947 if (CHANGED(va_flags, u_long)) {
948 /* XXX Can we handle system flags here...? */
949 error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_FLAGS, vp,
950 NULL, genfs_can_chflags(vp, cred, attr->va_uid, false));
951 if (error)
952 return error;
953 }
954
955 SETIFVAL(va_flags, u_long);
956 #undef SETIFVAL
957 #undef CHANGED
958
959 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (uid_t)VNOVAL) {
960 uid_t uid =
961 (vap->va_uid != (uid_t)VNOVAL) ? vap->va_uid : attr->va_uid;
962 gid_t gid =
963 (vap->va_gid != (gid_t)VNOVAL) ? vap->va_gid : attr->va_gid;
964 error = kauth_authorize_vnode(cred,
965 KAUTH_VNODE_CHANGE_OWNERSHIP, vp, NULL,
966 genfs_can_chown(vp, cred, attr->va_uid, attr->va_gid, uid,
967 gid));
968 if (error)
969 return error;
970 attr->va_uid = uid;
971 attr->va_gid = gid;
972 }
973
974 if (vap->va_mode != (mode_t)VNOVAL) {
975 mode_t mode = vap->va_mode;
976 error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_SECURITY,
977 vp, NULL, genfs_can_chmod(vp, cred, attr->va_uid,
978 attr->va_gid, mode));
979 if (error)
980 return error;
981 attr->va_mode = mode;
982 }
983
984 if (vp->v_type == VREG &&
985 vap->va_size != VSIZENOTSET &&
986 vap->va_size != rn->rn_dlen &&
987 (rn->rn_flags & RUMPNODE_ET_PHONE_HOST) == 0) {
988 void *newdata;
989 size_t copylen, newlen;
990
991 newlen = vap->va_size;
992 newdata = rump_hypermalloc(newlen, 0, false, "rumpfs");
993 if (newdata == NULL)
994 return ENOSPC;
995
996 copylen = MIN(rn->rn_dlen, newlen);
997 if (copylen > 0)
998 memcpy(newdata, rn->rn_data, copylen);
999 memset((char *)newdata + copylen, 0, newlen - copylen);
1000
1001 if ((rn->rn_flags & RUMPNODE_EXTSTORAGE) == 0) {
1002 rump_hyperfree(rn->rn_data, rn->rn_dlen);
1003 } else {
1004 rn->rn_flags &= ~RUMPNODE_EXTSTORAGE;
1005 }
1006
1007 rn->rn_data = newdata;
1008 rn->rn_dlen = newlen;
1009 uvm_vnp_setsize(vp, newlen);
1010 }
1011 return 0;
1012 }
1013
1014 static int
1015 rump_vop_mkdir(void *v)
1016 {
1017 struct vop_mkdir_v3_args /* {
1018 struct vnode *a_dvp;
1019 struct vnode **a_vpp;
1020 struct componentname *a_cnp;
1021 struct vattr *a_vap;
1022 }; */ *ap = v;
1023 struct vnode *dvp = ap->a_dvp;
1024 struct vnode **vpp = ap->a_vpp;
1025 struct componentname *cnp = ap->a_cnp;
1026 struct vattr *va = ap->a_vap;
1027 struct rumpfs_node *rnd = dvp->v_data, *rn;
1028 int rv = 0;
1029
1030 rn = makeprivate(VDIR, va->va_mode & ALLPERMS, NODEV, DEV_BSIZE, false);
1031 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1032 rn->rn_va.va_flags |= UF_OPAQUE;
1033 rn->rn_parent = rnd;
1034 rv = vcache_get(dvp->v_mount, &rn, sizeof(rn), vpp);
1035 if (rv) {
1036 freeprivate(rn);
1037 return rv;
1038 }
1039
1040 makedir(rnd, cnp, rn);
1041
1042 return rv;
1043 }
1044
1045 static int
1046 rump_vop_rmdir(void *v)
1047 {
1048 struct vop_rmdir_v2_args /* {
1049 struct vnode *a_dvp;
1050 struct vnode *a_vp;
1051 struct componentname *a_cnp;
1052 }; */ *ap = v;
1053 struct vnode *dvp = ap->a_dvp;
1054 struct vnode *vp = ap->a_vp;
1055 struct componentname *cnp = ap->a_cnp;
1056 struct rumpfs_node *rnd = dvp->v_data;
1057 struct rumpfs_node *rn = vp->v_data;
1058 struct rumpfs_dent *rd;
1059 int rv = 0;
1060
1061 LIST_FOREACH(rd, &rn->rn_dir, rd_entries) {
1062 if (rd->rd_node != RUMPFS_WHITEOUT) {
1063 rv = ENOTEMPTY;
1064 goto out;
1065 }
1066 }
1067 while ((rd = LIST_FIRST(&rn->rn_dir)) != NULL) {
1068 KASSERT(rd->rd_node == RUMPFS_WHITEOUT);
1069 LIST_REMOVE(rd, rd_entries);
1070 kmem_free(rd->rd_name, rd->rd_namelen+1);
1071 kmem_free(rd, sizeof(*rd));
1072 }
1073
1074 freedir(rnd, cnp);
1075 rn->rn_flags |= RUMPNODE_CANRECLAIM;
1076 rn->rn_parent = NULL;
1077 rn->rn_va.va_nlink = 0;
1078
1079 out:
1080 vput(vp);
1081 return rv;
1082 }
1083
1084 static int
1085 rump_vop_remove(void *v)
1086 {
1087 struct vop_remove_v2_args /* {
1088 struct vnode *a_dvp;
1089 struct vnode *a_vp;
1090 struct componentname *a_cnp;
1091 }; */ *ap = v;
1092 struct vnode *dvp = ap->a_dvp;
1093 struct vnode *vp = ap->a_vp;
1094 struct componentname *cnp = ap->a_cnp;
1095 struct rumpfs_node *rnd = dvp->v_data;
1096 struct rumpfs_node *rn = vp->v_data;
1097 int rv = 0;
1098
1099 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1100 return EOPNOTSUPP;
1101
1102 freedir(rnd, cnp);
1103 rn->rn_flags |= RUMPNODE_CANRECLAIM;
1104 rn->rn_va.va_nlink = 0;
1105
1106 vput(vp);
1107 return rv;
1108 }
1109
1110 static int
1111 rump_vop_mknod(void *v)
1112 {
1113 struct vop_mknod_v3_args /* {
1114 struct vnode *a_dvp;
1115 struct vnode **a_vpp;
1116 struct componentname *a_cnp;
1117 struct vattr *a_vap;
1118 }; */ *ap = v;
1119 struct vnode *dvp = ap->a_dvp;
1120 struct vnode **vpp = ap->a_vpp;
1121 struct componentname *cnp = ap->a_cnp;
1122 struct vattr *va = ap->a_vap;
1123 struct rumpfs_node *rnd = dvp->v_data, *rn;
1124 int rv;
1125
1126 rn = makeprivate(va->va_type, va->va_mode & ALLPERMS, va->va_rdev,
1127 DEV_BSIZE, false);
1128 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1129 rn->rn_va.va_flags |= UF_OPAQUE;
1130 rv = vcache_get(dvp->v_mount, &rn, sizeof(rn), vpp);
1131 if (rv) {
1132 freeprivate(rn);
1133 return rv;
1134 }
1135
1136 makedir(rnd, cnp, rn);
1137
1138 return rv;
1139 }
1140
1141 static int
1142 rump_vop_create(void *v)
1143 {
1144 struct vop_create_v3_args /* {
1145 struct vnode *a_dvp;
1146 struct vnode **a_vpp;
1147 struct componentname *a_cnp;
1148 struct vattr *a_vap;
1149 }; */ *ap = v;
1150 struct vnode *dvp = ap->a_dvp;
1151 struct vnode **vpp = ap->a_vpp;
1152 struct componentname *cnp = ap->a_cnp;
1153 struct vattr *va = ap->a_vap;
1154 struct rumpfs_node *rnd = dvp->v_data, *rn;
1155 off_t newsize;
1156 int rv;
1157
1158 newsize = va->va_type == VSOCK ? DEV_BSIZE : 0;
1159 rn = makeprivate(va->va_type, va->va_mode & ALLPERMS, NODEV,
1160 newsize, false);
1161 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1162 rn->rn_va.va_flags |= UF_OPAQUE;
1163 rv = vcache_get(dvp->v_mount, &rn, sizeof(rn), vpp);
1164 if (rv) {
1165 freeprivate(rn);
1166 return rv;
1167 }
1168
1169 makedir(rnd, cnp, rn);
1170
1171 return rv;
1172 }
1173
1174 static int
1175 rump_vop_symlink(void *v)
1176 {
1177 struct vop_symlink_v3_args /* {
1178 struct vnode *a_dvp;
1179 struct vnode **a_vpp;
1180 struct componentname *a_cnp;
1181 struct vattr *a_vap;
1182 char *a_target;
1183 }; */ *ap = v;
1184 struct vnode *dvp = ap->a_dvp;
1185 struct vnode **vpp = ap->a_vpp;
1186 struct componentname *cnp = ap->a_cnp;
1187 struct vattr *va = ap->a_vap;
1188 struct rumpfs_node *rnd = dvp->v_data, *rn;
1189 const char *target = ap->a_target;
1190 size_t linklen;
1191 int rv;
1192
1193 linklen = strlen(target);
1194 KASSERT(linklen < MAXPATHLEN);
1195 rn = makeprivate(VLNK, va->va_mode & ALLPERMS, NODEV, linklen, false);
1196 if ((cnp->cn_flags & ISWHITEOUT) != 0)
1197 rn->rn_va.va_flags |= UF_OPAQUE;
1198 rv = vcache_get(dvp->v_mount, &rn, sizeof(rn), vpp);
1199 if (rv) {
1200 freeprivate(rn);
1201 return rv;
1202 }
1203
1204 makedir(rnd, cnp, rn);
1205
1206 KASSERT(linklen < MAXPATHLEN);
1207 rn->rn_linktarg = PNBUF_GET();
1208 rn->rn_linklen = linklen;
1209 strcpy(rn->rn_linktarg, target);
1210
1211 return rv;
1212 }
1213
1214 static int
1215 rump_vop_readlink(void *v)
1216 {
1217 struct vop_readlink_args /* {
1218 struct vnode *a_vp;
1219 struct uio *a_uio;
1220 kauth_cred_t a_cred;
1221 }; */ *ap = v;
1222 struct vnode *vp = ap->a_vp;
1223 struct rumpfs_node *rn = vp->v_data;
1224 struct uio *uio = ap->a_uio;
1225
1226 return uiomove(rn->rn_linktarg, rn->rn_linklen, uio);
1227 }
1228
1229 static int
1230 rump_vop_whiteout(void *v)
1231 {
1232 struct vop_whiteout_args /* {
1233 struct vnode *a_dvp;
1234 struct componentname *a_cnp;
1235 int a_flags;
1236 } */ *ap = v;
1237 struct vnode *dvp = ap->a_dvp;
1238 struct rumpfs_node *rnd = dvp->v_data;
1239 struct componentname *cnp = ap->a_cnp;
1240 int flags = ap->a_flags;
1241
1242 switch (flags) {
1243 case LOOKUP:
1244 break;
1245 case CREATE:
1246 makedir(rnd, cnp, RUMPFS_WHITEOUT);
1247 break;
1248 case DELETE:
1249 cnp->cn_flags &= ~DOWHITEOUT; /* cargo culting never fails ? */
1250 freedir(rnd, cnp);
1251 break;
1252 default:
1253 panic("unknown whiteout op %d", flags);
1254 }
1255
1256 return 0;
1257 }
1258
1259 static int
1260 rump_vop_open(void *v)
1261 {
1262 struct vop_open_args /* {
1263 struct vnode *a_vp;
1264 int a_mode;
1265 kauth_cred_t a_cred;
1266 } */ *ap = v;
1267 struct vnode *vp = ap->a_vp;
1268 struct rumpfs_node *rn = vp->v_data;
1269 int mode = ap->a_mode;
1270 int error = EINVAL;
1271
1272 if (vp->v_type != VREG || (rn->rn_flags & RUMPNODE_ET_PHONE_HOST) == 0)
1273 return 0;
1274
1275 if (mode & FREAD) {
1276 if (rn->rn_readfd != -1)
1277 return 0;
1278 error = rumpuser_open(rn->rn_hostpath,
1279 RUMPUSER_OPEN_RDONLY, &rn->rn_readfd);
1280 }
1281
1282 if (mode & FWRITE) {
1283 if (rn->rn_writefd != -1)
1284 return 0;
1285 error = rumpuser_open(rn->rn_hostpath,
1286 RUMPUSER_OPEN_WRONLY, &rn->rn_writefd);
1287 }
1288
1289 return error;
1290 }
1291
1292 /* simple readdir. even omits dotstuff and periods */
1293 static int
1294 rump_vop_readdir(void *v)
1295 {
1296 struct vop_readdir_args /* {
1297 struct vnode *a_vp;
1298 struct uio *a_uio;
1299 kauth_cred_t a_cred;
1300 int *a_eofflag;
1301 off_t **a_cookies;
1302 int *a_ncookies;
1303 } */ *ap = v;
1304 struct vnode *vp = ap->a_vp;
1305 struct uio *uio = ap->a_uio;
1306 struct rumpfs_node *rnd = vp->v_data;
1307 struct rumpfs_dent *rdent;
1308 struct dirent *dentp = NULL;
1309 unsigned i;
1310 int rv = 0;
1311
1312 /* seek to current entry */
1313 for (i = 0, rdent = LIST_FIRST(&rnd->rn_dir);
1314 (i < uio->uio_offset) && rdent;
1315 i++, rdent = LIST_NEXT(rdent, rd_entries))
1316 continue;
1317 if (!rdent)
1318 goto out;
1319
1320 /* copy entries */
1321 dentp = kmem_alloc(sizeof(*dentp), KM_SLEEP);
1322 for (; rdent && uio->uio_resid > 0;
1323 rdent = LIST_NEXT(rdent, rd_entries), i++) {
1324 strlcpy(dentp->d_name, rdent->rd_name, sizeof(dentp->d_name));
1325 dentp->d_namlen = strlen(dentp->d_name);
1326 dentp->d_reclen = _DIRENT_RECLEN(dentp, dentp->d_namlen);
1327
1328 if (__predict_false(RDENT_ISWHITEOUT(rdent))) {
1329 dentp->d_fileno = INO_WHITEOUT;
1330 dentp->d_type = DT_WHT;
1331 } else {
1332 dentp->d_fileno = rdent->rd_node->rn_va.va_fileid;
1333 dentp->d_type = vtype2dt(rdent->rd_node->rn_va.va_type);
1334 }
1335
1336 if (uio->uio_resid < dentp->d_reclen) {
1337 i--;
1338 break;
1339 }
1340
1341 rv = uiomove(dentp, dentp->d_reclen, uio);
1342 if (rv) {
1343 i--;
1344 break;
1345 }
1346 }
1347 kmem_free(dentp, sizeof(*dentp));
1348 dentp = NULL;
1349
1350 out:
1351 KASSERT(dentp == NULL);
1352 if (ap->a_cookies) {
1353 *ap->a_ncookies = 0;
1354 *ap->a_cookies = NULL;
1355 }
1356 if (rdent)
1357 *ap->a_eofflag = 0;
1358 else
1359 *ap->a_eofflag = 1;
1360 uio->uio_offset = i;
1361
1362 return rv;
1363 }
1364
1365 static int
1366 etread(struct rumpfs_node *rn, struct uio *uio)
1367 {
1368 struct rumpuser_iovec iov;
1369 uint8_t *buf;
1370 size_t bufsize, n;
1371 int error = 0;
1372
1373 bufsize = uio->uio_resid;
1374 if (bufsize == 0)
1375 return 0;
1376 buf = kmem_alloc(bufsize, KM_SLEEP);
1377
1378 iov.iov_base = buf;
1379 iov.iov_len = bufsize;
1380 if ((error = rumpuser_iovread(rn->rn_readfd, &iov, 1,
1381 uio->uio_offset + rn->rn_offset, &n)) == 0) {
1382 KASSERT(n <= bufsize);
1383 error = uiomove(buf, n, uio);
1384 }
1385
1386 kmem_free(buf, bufsize);
1387 return error;
1388 }
1389
1390 static int
1391 rump_vop_read(void *v)
1392 {
1393 struct vop_read_args /* {
1394 struct vnode *a_vp;
1395 struct uio *a_uio;
1396 int ioflags a_ioflag;
1397 kauth_cred_t a_cred;
1398 }; */ *ap = v;
1399 struct vnode *vp = ap->a_vp;
1400 struct rumpfs_node *rn = vp->v_data;
1401 struct uio *uio = ap->a_uio;
1402 const int advice = IO_ADV_DECODE(ap->a_ioflag);
1403 off_t chunk;
1404 int error = 0;
1405 struct timespec ts;
1406
1407 if (vp->v_type == VDIR)
1408 return EISDIR;
1409
1410 /* et op? */
1411 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1412 return etread(rn, uio);
1413
1414 getnanotime(&ts);
1415 (void)rumpfs_update(RUMPFS_ACCESS, vp, &ts, &ts, &ts);
1416
1417 /* otherwise, it's off to ubc with us */
1418 while (uio->uio_resid > 0) {
1419 chunk = MIN(uio->uio_resid, (off_t)rn->rn_dlen-uio->uio_offset);
1420 if (chunk == 0)
1421 break;
1422 error = ubc_uiomove(&vp->v_uobj, uio, chunk, advice,
1423 UBC_READ | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
1424 if (error)
1425 break;
1426 }
1427
1428 return error;
1429 }
1430
1431 static int
1432 etwrite(struct rumpfs_node *rn, struct uio *uio)
1433 {
1434 struct rumpuser_iovec iov;
1435 uint8_t *buf;
1436 size_t bufsize, n;
1437 int error = 0;
1438
1439 bufsize = uio->uio_resid;
1440 if (bufsize == 0)
1441 return 0;
1442 buf = kmem_alloc(bufsize, KM_SLEEP);
1443 error = uiomove(buf, bufsize, uio);
1444 if (error)
1445 goto out;
1446
1447 KASSERT(uio->uio_resid == 0);
1448 iov.iov_base = buf;
1449 iov.iov_len = bufsize;
1450 if ((error = rumpuser_iovwrite(rn->rn_writefd, &iov, 1,
1451 (uio->uio_offset-bufsize) + rn->rn_offset, &n)) == 0) {
1452 KASSERT(n <= bufsize);
1453 uio->uio_resid = bufsize - n;
1454 }
1455
1456 out:
1457 kmem_free(buf, bufsize);
1458 return error;
1459 }
1460
1461 static int
1462 rump_vop_write(void *v)
1463 {
1464 struct vop_write_args /* {
1465 struct vnode *a_vp;
1466 struct uio *a_uio;
1467 int ioflags a_ioflag;
1468 kauth_cred_t a_cred;
1469 }; */ *ap = v;
1470 struct vnode *vp = ap->a_vp;
1471 struct rumpfs_node *rn = vp->v_data;
1472 struct uio *uio = ap->a_uio;
1473 const int advice = IO_ADV_DECODE(ap->a_ioflag);
1474 void *olddata;
1475 size_t oldlen, newlen;
1476 off_t chunk;
1477 int error = 0;
1478 bool allocd = false;
1479 struct timespec ts;
1480
1481 getnanotime(&ts);
1482 (void)rumpfs_update(RUMPFS_MODIFY, vp, &ts, &ts, &ts);
1483
1484 if (ap->a_ioflag & IO_APPEND)
1485 uio->uio_offset = vp->v_size;
1486
1487 /* consult et? */
1488 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST)
1489 return etwrite(rn, uio);
1490
1491 /*
1492 * Otherwise, it's a case of ubcmove.
1493 */
1494
1495 /*
1496 * First, make sure we have enough storage.
1497 *
1498 * No, you don't need to tell me it's not very efficient.
1499 * No, it doesn't really support sparse files, just fakes it.
1500 */
1501 newlen = uio->uio_offset + uio->uio_resid;
1502 oldlen = 0; /* XXXgcc */
1503 olddata = NULL;
1504 if (rn->rn_dlen < newlen) {
1505 oldlen = rn->rn_dlen;
1506 olddata = rn->rn_data;
1507
1508 rn->rn_data = rump_hypermalloc(newlen, 0, false, "rumpfs");
1509 if (rn->rn_data == NULL)
1510 return ENOSPC;
1511 rn->rn_dlen = newlen;
1512 memset(rn->rn_data, 0, newlen);
1513 if (oldlen > 0)
1514 memcpy(rn->rn_data, olddata, oldlen);
1515 allocd = true;
1516 uvm_vnp_setsize(vp, newlen);
1517 }
1518
1519 /* ok, we have enough stooorage. write */
1520 while (uio->uio_resid > 0) {
1521 chunk = MIN(uio->uio_resid, (off_t)rn->rn_dlen-uio->uio_offset);
1522 if (chunk == 0)
1523 break;
1524 error = ubc_uiomove(&vp->v_uobj, uio, chunk, advice,
1525 UBC_WRITE | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
1526 if (error)
1527 break;
1528 }
1529
1530 if (allocd) {
1531 if (error) {
1532 rump_hyperfree(rn->rn_data, newlen);
1533 rn->rn_data = olddata;
1534 rn->rn_dlen = oldlen;
1535 uvm_vnp_setsize(vp, oldlen);
1536 } else {
1537 if ((rn->rn_flags & RUMPNODE_EXTSTORAGE) == 0) {
1538 rump_hyperfree(olddata, oldlen);
1539 } else {
1540 rn->rn_flags &= ~RUMPNODE_EXTSTORAGE;
1541 }
1542 }
1543 }
1544
1545 return error;
1546 }
1547
1548 static int
1549 rump_vop_bmap(void *v)
1550 {
1551 struct vop_bmap_args /* {
1552 struct vnode *a_vp;
1553 daddr_t a_bn;
1554 struct vnode **a_vpp;
1555 daddr_t *a_bnp;
1556 int *a_runp;
1557 } */ *ap = v;
1558
1559 /* 1:1 mapping */
1560 if (ap->a_vpp)
1561 *ap->a_vpp = ap->a_vp;
1562 if (ap->a_bnp)
1563 *ap->a_bnp = ap->a_bn;
1564 if (ap->a_runp)
1565 *ap->a_runp = 16;
1566
1567 return 0;
1568 }
1569
1570 static int
1571 rump_vop_strategy(void *v)
1572 {
1573 struct vop_strategy_args /* {
1574 struct vnode *a_vp;
1575 struct buf *a_bp;
1576 } */ *ap = v;
1577 struct vnode *vp = ap->a_vp;
1578 struct rumpfs_node *rn = vp->v_data;
1579 struct buf *bp = ap->a_bp;
1580 off_t copylen, copyoff;
1581 int error;
1582
1583 if (vp->v_type != VREG || rn->rn_flags & RUMPNODE_ET_PHONE_HOST) {
1584 error = EINVAL;
1585 goto out;
1586 }
1587
1588 copyoff = bp->b_blkno << DEV_BSHIFT;
1589 copylen = MIN(rn->rn_dlen - copyoff, bp->b_bcount);
1590 if (BUF_ISWRITE(bp)) {
1591 memcpy((uint8_t *)rn->rn_data + copyoff, bp->b_data, copylen);
1592 } else {
1593 memset((uint8_t*)bp->b_data + copylen, 0, bp->b_bcount-copylen);
1594 memcpy(bp->b_data, (uint8_t *)rn->rn_data + copyoff, copylen);
1595 }
1596 bp->b_resid = 0;
1597 error = 0;
1598
1599 out:
1600 bp->b_error = error;
1601 biodone(bp);
1602 return 0;
1603 }
1604
1605 static int
1606 rump_vop_pathconf(void *v)
1607 {
1608 struct vop_pathconf_args /* {
1609 struct vnode *a_vp;
1610 int a_name;
1611 register_t *a_retval;
1612 }; */ *ap = v;
1613 int name = ap->a_name;
1614 register_t *retval = ap->a_retval;
1615
1616 switch (name) {
1617 case _PC_LINK_MAX:
1618 *retval = LINK_MAX;
1619 return 0;
1620 case _PC_NAME_MAX:
1621 *retval = RUMPFS_MAXNAMLEN;
1622 return 0;
1623 case _PC_PATH_MAX:
1624 *retval = PATH_MAX;
1625 return 0;
1626 case _PC_PIPE_BUF:
1627 *retval = PIPE_BUF;
1628 return 0;
1629 case _PC_CHOWN_RESTRICTED:
1630 *retval = 1;
1631 return 0;
1632 case _PC_NO_TRUNC:
1633 *retval = 1;
1634 return 0;
1635 case _PC_SYNC_IO:
1636 *retval = 1;
1637 return 0;
1638 case _PC_FILESIZEBITS:
1639 *retval = 43; /* this one goes to 11 */
1640 return 0;
1641 case _PC_SYMLINK_MAX:
1642 *retval = MAXPATHLEN;
1643 return 0;
1644 case _PC_2_SYMLINKS:
1645 *retval = 1;
1646 return 0;
1647 default:
1648 return EINVAL;
1649 }
1650 }
1651
1652 static int
1653 rump_vop_success(void *v)
1654 {
1655
1656 return 0;
1657 }
1658
1659 static int
1660 rump_vop_inactive(void *v)
1661 {
1662 struct vop_inactive_v2_args /* {
1663 struct vnode *a_vp;
1664 bool *a_recycle;
1665 } */ *ap = v;
1666 struct vnode *vp = ap->a_vp;
1667 struct rumpfs_node *rn = vp->v_data;
1668
1669 if (rn->rn_flags & RUMPNODE_ET_PHONE_HOST && vp->v_type == VREG) {
1670 if (rn->rn_readfd != -1) {
1671 rumpuser_close(rn->rn_readfd);
1672 rn->rn_readfd = -1;
1673 }
1674 if (rn->rn_writefd != -1) {
1675 rumpuser_close(rn->rn_writefd);
1676 rn->rn_writefd = -1;
1677 }
1678 }
1679 *ap->a_recycle = (rn->rn_flags & RUMPNODE_CANRECLAIM) ? true : false;
1680
1681 return 0;
1682 }
1683
1684 static int
1685 rump_vop_reclaim(void *v)
1686 {
1687 struct vop_reclaim_v2_args /* {
1688 struct vnode *a_vp;
1689 } */ *ap = v;
1690 struct vnode *vp = ap->a_vp;
1691 struct rumpfs_node *rn = vp->v_data;
1692
1693 VOP_UNLOCK(vp);
1694
1695 mutex_enter(&reclock);
1696 rn->rn_vp = NULL;
1697 mutex_exit(&reclock);
1698 genfs_node_destroy(vp);
1699 vp->v_data = NULL;
1700
1701 if (rn->rn_flags & RUMPNODE_CANRECLAIM) {
1702 if (vp->v_type == VREG
1703 && (rn->rn_flags & RUMPNODE_ET_PHONE_HOST) == 0
1704 && rn->rn_data) {
1705 if ((rn->rn_flags & RUMPNODE_EXTSTORAGE) == 0) {
1706 rump_hyperfree(rn->rn_data, rn->rn_dlen);
1707 } else {
1708 rn->rn_flags &= ~RUMPNODE_EXTSTORAGE;
1709 }
1710 rn->rn_data = NULL;
1711 }
1712
1713 if (vp->v_type == VLNK)
1714 PNBUF_PUT(rn->rn_linktarg);
1715 if (rn->rn_hostpath)
1716 free(rn->rn_hostpath, M_TEMP);
1717 freeprivate(rn);
1718 }
1719
1720 return 0;
1721 }
1722
1723 static int
1724 rump_vop_spec(void *v)
1725 {
1726 struct vop_generic_args *ap = v;
1727 int (**opvec)(void *);
1728
1729 switch (ap->a_desc->vdesc_offset) {
1730 case VOP_ACCESS_DESCOFFSET:
1731 case VOP_GETATTR_DESCOFFSET:
1732 case VOP_SETATTR_DESCOFFSET:
1733 case VOP_LOCK_DESCOFFSET:
1734 case VOP_UNLOCK_DESCOFFSET:
1735 case VOP_ISLOCKED_DESCOFFSET:
1736 case VOP_INACTIVE_DESCOFFSET:
1737 case VOP_RECLAIM_DESCOFFSET:
1738 opvec = rump_vnodeop_p;
1739 break;
1740 default:
1741 opvec = spec_vnodeop_p;
1742 break;
1743 }
1744
1745 return VOCALL(opvec, ap->a_desc->vdesc_offset, v);
1746 }
1747
1748 static int
1749 rump_vop_advlock(void *v)
1750 {
1751 struct vop_advlock_args /* {
1752 const struct vnodeop_desc *a_desc;
1753 struct vnode *a_vp;
1754 void *a_id;
1755 int a_op;
1756 struct flock *a_fl;
1757 int a_flags;
1758 } */ *ap = v;
1759 struct vnode *vp = ap->a_vp;
1760 struct rumpfs_node *rn = vp->v_data;
1761
1762 return lf_advlock(ap, &rn->rn_lockf, vp->v_size);
1763 }
1764
1765 static int
1766 rump_vop_fcntl(void *v)
1767 {
1768 struct vop_fcntl_args /* {
1769 struct vnode *a_vp;
1770 u_int a_command;
1771 void *a_data;
1772 int a_fflag;
1773 kauth_cred_t a_cred;
1774 } */ *ap = v;
1775 struct proc *p = curproc;
1776 struct vnode *vp = ap->a_vp;
1777 struct rumpfs_node *rn = vp->v_data;
1778 u_int cmd = ap->a_command;
1779 int fflag = ap->a_fflag;
1780 struct rumpfs_extstorage *rfse = ap->a_data;
1781 int error = 0;
1782
1783 /* none of the current rumpfs fcntlops are defined for remotes */
1784 if (!RUMP_LOCALPROC_P(p))
1785 return EINVAL;
1786
1787 switch (cmd) {
1788 case RUMPFS_FCNTL_EXTSTORAGE_ADD:
1789 break;
1790 default:
1791 return EINVAL;
1792 }
1793
1794 if ((fflag & FWRITE) == 0)
1795 return EBADF;
1796
1797 if (vp->v_type != VREG || (rn->rn_flags & RUMPNODE_ET_PHONE_HOST))
1798 return EINVAL;
1799
1800 if (rfse->rfse_flags != 0)
1801 return EINVAL;
1802
1803 /*
1804 * Ok, we are good to go. Process.
1805 */
1806
1807 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1808
1809 KASSERT(cmd == RUMPFS_FCNTL_EXTSTORAGE_ADD);
1810 if (rn->rn_data && (rn->rn_flags & RUMPNODE_EXTSTORAGE) == 0) {
1811 rump_hyperfree(rn->rn_data, rn->rn_dlen);
1812 }
1813
1814 rn->rn_data = rfse->rfse_data;
1815 rn->rn_dlen = rfse->rfse_dlen;
1816 uvm_vnp_setsize(vp, rn->rn_dlen);
1817 rn->rn_flags |= RUMPNODE_EXTSTORAGE;
1818
1819 VOP_UNLOCK(vp);
1820
1821 return error;
1822 }
1823
1824 /*
1825 * Begin vfs-level stuff
1826 */
1827
1828 VFS_PROTOS(rumpfs);
1829 struct vfsops rumpfs_vfsops = {
1830 .vfs_name = MOUNT_RUMPFS,
1831 .vfs_min_mount_data = 0,
1832 .vfs_mount = rumpfs_mount,
1833 .vfs_start = (void *)nullop,
1834 .vfs_unmount = rumpfs_unmount,
1835 .vfs_root = rumpfs_root,
1836 .vfs_quotactl = (void *)eopnotsupp,
1837 .vfs_statvfs = genfs_statvfs,
1838 .vfs_sync = (void *)nullop,
1839 .vfs_vget = rumpfs_vget,
1840 .vfs_loadvnode = rumpfs_loadvnode,
1841 .vfs_fhtovp = (void *)eopnotsupp,
1842 .vfs_vptofh = (void *)eopnotsupp,
1843 .vfs_init = rumpfs_init,
1844 .vfs_reinit = NULL,
1845 .vfs_done = rumpfs_done,
1846 .vfs_mountroot = rumpfs_mountroot,
1847 .vfs_snapshot = (void *)eopnotsupp,
1848 .vfs_extattrctl = (void *)eopnotsupp,
1849 .vfs_suspendctl = genfs_suspendctl,
1850 .vfs_renamelock_enter = genfs_renamelock_enter,
1851 .vfs_renamelock_exit = genfs_renamelock_exit,
1852 .vfs_opv_descs = rump_opv_descs,
1853 /* vfs_refcount */
1854 /* vfs_list */
1855 };
1856
1857 static int
1858 rumpfs_mountfs(struct mount *mp)
1859 {
1860 struct rumpfs_mount *rfsmp;
1861 struct rumpfs_node *rn;
1862 int error;
1863
1864 rfsmp = kmem_alloc(sizeof(*rfsmp), KM_SLEEP);
1865
1866 rn = makeprivate(VDIR, RUMPFS_DEFAULTMODE, NODEV, DEV_BSIZE, false);
1867 rn->rn_parent = rn;
1868 if ((error = vcache_get(mp, &rn, sizeof(rn), &rfsmp->rfsmp_rvp))
1869 != 0) {
1870 freeprivate(rn);
1871 kmem_free(rfsmp, sizeof(*rfsmp));
1872 return error;
1873 }
1874
1875 rfsmp->rfsmp_rvp->v_vflag |= VV_ROOT;
1876
1877 mp->mnt_data = rfsmp;
1878 mp->mnt_stat.f_namemax = RUMPFS_MAXNAMLEN;
1879 mp->mnt_stat.f_iosize = 512;
1880 mp->mnt_flag |= MNT_LOCAL;
1881 mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO;
1882 mp->mnt_fs_bshift = DEV_BSHIFT;
1883 vfs_getnewfsid(mp);
1884
1885 return 0;
1886 }
1887
1888 int
1889 rumpfs_mount(struct mount *mp, const char *mntpath, void *arg, size_t *alen)
1890 {
1891 int error, flags;
1892
1893 if (mp->mnt_flag & MNT_GETARGS) {
1894 return 0;
1895 }
1896 if (mp->mnt_flag & MNT_UPDATE) {
1897 if ((mp->mnt_iflag & IMNT_WANTRDONLY)) {
1898 /* Changing from read/write to read-only. */
1899 flags = WRITECLOSE;
1900 if ((mp->mnt_flag & MNT_FORCE))
1901 flags |= FORCECLOSE;
1902 error = vflush(mp, NULL, flags);
1903 if (error)
1904 return error;
1905 }
1906 return 0;
1907 }
1908
1909 error = set_statvfs_info(mntpath, UIO_USERSPACE, "rumpfs", UIO_SYSSPACE,
1910 mp->mnt_op->vfs_name, mp, curlwp);
1911 if (error)
1912 return error;
1913
1914 return rumpfs_mountfs(mp);
1915 }
1916
1917 int
1918 rumpfs_unmount(struct mount *mp, int mntflags)
1919 {
1920 struct rumpfs_mount *rfsmp = mp->mnt_data;
1921 int flags = 0, error;
1922
1923 if (panicstr || mntflags & MNT_FORCE)
1924 flags |= FORCECLOSE;
1925
1926 if (vrefcnt(rfsmp->rfsmp_rvp) > 1 && (flags & FORCECLOSE) == 0)
1927 return EBUSY;
1928
1929 if ((error = vflush(mp, rfsmp->rfsmp_rvp, flags)) != 0)
1930 return error;
1931 vgone(rfsmp->rfsmp_rvp);
1932
1933 kmem_free(rfsmp, sizeof(*rfsmp));
1934
1935 return 0;
1936 }
1937
1938 int
1939 rumpfs_root(struct mount *mp, int lktype, struct vnode **vpp)
1940 {
1941 struct rumpfs_mount *rfsmp = mp->mnt_data;
1942
1943 vref(rfsmp->rfsmp_rvp);
1944 vn_lock(rfsmp->rfsmp_rvp, lktype | LK_RETRY);
1945 *vpp = rfsmp->rfsmp_rvp;
1946 return 0;
1947 }
1948
1949 int
1950 rumpfs_vget(struct mount *mp, ino_t ino, int lktype, struct vnode **vpp)
1951 {
1952
1953 return EOPNOTSUPP;
1954 }
1955
1956 int
1957 rumpfs_loadvnode(struct mount *mp, struct vnode *vp,
1958 const void *key, size_t key_len, const void **new_key)
1959 {
1960 struct rumpfs_node *rn;
1961 struct vattr *va;
1962
1963 KASSERT(!mutex_owned(&reclock));
1964
1965 KASSERT(key_len == sizeof(rn));
1966 memcpy(&rn, key, key_len);
1967
1968 va = &rn->rn_va;
1969
1970 vp->v_tag = VT_RUMP;
1971 vp->v_type = va->va_type;
1972 switch (vp->v_type) {
1973 case VCHR:
1974 case VBLK:
1975 vp->v_op = rump_specop_p;
1976 spec_node_init(vp, va->va_rdev);
1977 break;
1978 default:
1979 vp->v_op = rump_vnodeop_p;
1980 break;
1981 }
1982 vp->v_size = vp->v_writesize = va->va_size;
1983 vp->v_data = rn;
1984
1985 genfs_node_init(vp, &rumpfs_genfsops);
1986 mutex_enter(&reclock);
1987 rn->rn_vp = vp;
1988 mutex_exit(&reclock);
1989
1990 *new_key = &vp->v_data;
1991
1992 return 0;
1993 }
1994
1995 void
1996 rumpfs_init()
1997 {
1998 extern rump_etfs_register_withsize_fn rump__etfs_register;
1999 extern rump_etfs_remove_fn rump__etfs_remove;
2000 extern struct rump_boot_etfs *ebstart;
2001 struct rump_boot_etfs *eb;
2002
2003 CTASSERT(RUMP_ETFS_SIZE_ENDOFF == RUMPBLK_SIZENOTSET);
2004
2005 mutex_init(&reclock, MUTEX_DEFAULT, IPL_NONE);
2006 mutex_init(&etfs_lock, MUTEX_DEFAULT, IPL_NONE);
2007
2008 rump__etfs_register = etfsregister;
2009 rump__etfs_remove = etfsremove;
2010
2011 for (eb = ebstart; eb; eb = eb->_eb_next) {
2012 eb->eb_status = etfsregister(eb->eb_key, eb->eb_hostpath,
2013 eb->eb_type, eb->eb_begin, eb->eb_size);
2014 }
2015 }
2016
2017 void
2018 rumpfs_done()
2019 {
2020
2021 mutex_destroy(&reclock);
2022 mutex_destroy(&etfs_lock);
2023 }
2024
2025 int
2026 rumpfs_mountroot()
2027 {
2028 struct mount *mp;
2029 int error;
2030
2031 if ((error = vfs_rootmountalloc(MOUNT_RUMPFS, "rootdev", &mp)) != 0) {
2032 vrele(rootvp);
2033 return error;
2034 }
2035
2036 if ((error = rumpfs_mountfs(mp)) != 0)
2037 panic("mounting rootfs failed: %d", error);
2038
2039 mountlist_append(mp);
2040
2041 error = set_statvfs_info("/", UIO_SYSSPACE, "rumpfs", UIO_SYSSPACE,
2042 mp->mnt_op->vfs_name, mp, curlwp);
2043 if (error)
2044 panic("set_statvfs_info failed for rootfs: %d", error);
2045
2046 mp->mnt_flag &= ~MNT_RDONLY;
2047 vfs_unbusy(mp);
2048
2049 return 0;
2050 }
2051