tmpfs_vnops.c revision 1.145.4.1 1 /* $NetBSD: tmpfs_vnops.c,v 1.145.4.1 2021/08/01 22:42:37 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * tmpfs vnode interface.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: tmpfs_vnops.c,v 1.145.4.1 2021/08/01 22:42:37 thorpej Exp $");
39
40 #include <sys/param.h>
41 #include <sys/dirent.h>
42 #include <sys/fcntl.h>
43 #include <sys/event.h>
44 #include <sys/malloc.h>
45 #include <sys/namei.h>
46 #include <sys/stat.h>
47 #include <sys/uio.h>
48 #include <sys/unistd.h>
49 #include <sys/vnode.h>
50 #include <sys/lockf.h>
51 #include <sys/kauth.h>
52 #include <sys/atomic.h>
53
54 #include <uvm/uvm_object.h>
55
56 #include <miscfs/fifofs/fifo.h>
57 #include <miscfs/genfs/genfs.h>
58 #include <fs/tmpfs/tmpfs_vnops.h>
59 #include <fs/tmpfs/tmpfs.h>
60
61 /*
62 * vnode operations vector used for files stored in a tmpfs file system.
63 */
64 int (**tmpfs_vnodeop_p)(void *);
65 const struct vnodeopv_entry_desc tmpfs_vnodeop_entries[] = {
66 { &vop_default_desc, vn_default_error },
67 { &vop_parsepath_desc, genfs_parsepath },
68 { &vop_lookup_desc, tmpfs_lookup },
69 { &vop_create_desc, tmpfs_create },
70 { &vop_mknod_desc, tmpfs_mknod },
71 { &vop_open_desc, tmpfs_open },
72 { &vop_close_desc, tmpfs_close },
73 { &vop_access_desc, tmpfs_access },
74 { &vop_accessx_desc, genfs_accessx },
75 { &vop_getattr_desc, tmpfs_getattr },
76 { &vop_setattr_desc, tmpfs_setattr },
77 { &vop_read_desc, tmpfs_read },
78 { &vop_write_desc, tmpfs_write },
79 { &vop_fallocate_desc, genfs_eopnotsupp },
80 { &vop_fdiscard_desc, genfs_eopnotsupp },
81 { &vop_ioctl_desc, genfs_enoioctl },
82 { &vop_fcntl_desc, genfs_fcntl },
83 { &vop_poll_desc, genfs_poll },
84 { &vop_kqfilter_desc, genfs_kqfilter },
85 { &vop_revoke_desc, genfs_revoke },
86 { &vop_mmap_desc, genfs_mmap },
87 { &vop_fsync_desc, tmpfs_fsync },
88 { &vop_seek_desc, genfs_seek },
89 { &vop_remove_desc, tmpfs_remove },
90 { &vop_link_desc, tmpfs_link },
91 { &vop_rename_desc, tmpfs_rename },
92 { &vop_mkdir_desc, tmpfs_mkdir },
93 { &vop_rmdir_desc, tmpfs_rmdir },
94 { &vop_symlink_desc, tmpfs_symlink },
95 { &vop_readdir_desc, tmpfs_readdir },
96 { &vop_readlink_desc, tmpfs_readlink },
97 { &vop_abortop_desc, genfs_abortop },
98 { &vop_inactive_desc, tmpfs_inactive },
99 { &vop_reclaim_desc, tmpfs_reclaim },
100 { &vop_lock_desc, genfs_lock },
101 { &vop_unlock_desc, genfs_unlock },
102 { &vop_bmap_desc, genfs_eopnotsupp },
103 { &vop_strategy_desc, genfs_eopnotsupp },
104 { &vop_print_desc, tmpfs_print },
105 { &vop_pathconf_desc, tmpfs_pathconf },
106 { &vop_islocked_desc, genfs_islocked },
107 { &vop_advlock_desc, tmpfs_advlock },
108 { &vop_bwrite_desc, genfs_nullop },
109 { &vop_getpages_desc, tmpfs_getpages },
110 { &vop_putpages_desc, tmpfs_putpages },
111 { &vop_whiteout_desc, tmpfs_whiteout },
112 { NULL, NULL }
113 };
114
115 const struct vnodeopv_desc tmpfs_vnodeop_opv_desc = {
116 &tmpfs_vnodeop_p, tmpfs_vnodeop_entries
117 };
118
119 /*
120 * tmpfs_lookup: path name traversal routine.
121 *
122 * Arguments: dvp (directory being searched), vpp (result),
123 * cnp (component name - path).
124 *
125 * => Caller holds a reference and lock on dvp.
126 * => We return looked-up vnode (vpp) locked, with a reference held.
127 */
128 int
129 tmpfs_lookup(void *v)
130 {
131 struct vop_lookup_v2_args /* {
132 struct vnode *a_dvp;
133 struct vnode **a_vpp;
134 struct componentname *a_cnp;
135 } */ *ap = v;
136 vnode_t *dvp = ap->a_dvp, **vpp = ap->a_vpp;
137 struct componentname *cnp = ap->a_cnp;
138 const bool lastcn = (cnp->cn_flags & ISLASTCN) != 0;
139 tmpfs_node_t *dnode, *tnode;
140 tmpfs_dirent_t *de;
141 int cachefound, iswhiteout;
142 int error;
143
144 KASSERT(VOP_ISLOCKED(dvp));
145
146 dnode = VP_TO_TMPFS_DIR(dvp);
147 *vpp = NULL;
148
149 /* Check accessibility of directory. */
150 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
151 if (error) {
152 goto out;
153 }
154
155 /*
156 * If requesting the last path component on a read-only file system
157 * with a write operation, deny it.
158 */
159 if (lastcn && (dvp->v_mount->mnt_flag & MNT_RDONLY) != 0 &&
160 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
161 error = EROFS;
162 goto out;
163 }
164
165 /*
166 * Avoid doing a linear scan of the directory if the requested
167 * directory/name couple is already in the cache.
168 */
169 cachefound = cache_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
170 cnp->cn_nameiop, cnp->cn_flags,
171 &iswhiteout, vpp);
172 if (iswhiteout) {
173 cnp->cn_flags |= ISWHITEOUT;
174 }
175 if (cachefound && *vpp == NULLVP) {
176 /* Negative cache hit. */
177 error = ENOENT;
178 goto out;
179 } else if (cachefound) {
180 error = 0;
181 goto out;
182 }
183
184 /*
185 * Treat an unlinked directory as empty (no "." or "..")
186 */
187 if (dnode->tn_links == 0) {
188 KASSERT(dnode->tn_size == 0);
189 error = ENOENT;
190 goto out;
191 }
192
193 if (cnp->cn_flags & ISDOTDOT) {
194 tmpfs_node_t *pnode;
195
196 /*
197 * Lookup of ".." case.
198 */
199 if (lastcn && cnp->cn_nameiop == RENAME) {
200 error = EINVAL;
201 goto out;
202 }
203 KASSERT(dnode->tn_type == VDIR);
204 pnode = dnode->tn_spec.tn_dir.tn_parent;
205 if (pnode == NULL) {
206 error = ENOENT;
207 goto done;
208 }
209
210 error = vcache_get(dvp->v_mount, &pnode, sizeof(pnode), vpp);
211 goto done;
212 } else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
213 /*
214 * Lookup of "." case.
215 */
216 if (lastcn && cnp->cn_nameiop == RENAME) {
217 error = EISDIR;
218 goto out;
219 }
220 vref(dvp);
221 *vpp = dvp;
222 error = 0;
223 goto done;
224 }
225
226 /*
227 * Other lookup cases: perform directory scan.
228 */
229 de = tmpfs_dir_lookup(dnode, cnp);
230 if (de == NULL || de->td_node == TMPFS_NODE_WHITEOUT) {
231 /*
232 * The entry was not found in the directory. This is valid
233 * if we are creating or renaming an entry and are working
234 * on the last component of the path name.
235 */
236 if (lastcn && (cnp->cn_nameiop == CREATE ||
237 cnp->cn_nameiop == RENAME)) {
238 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
239 if (error) {
240 goto out;
241 }
242 error = EJUSTRETURN;
243 } else {
244 error = ENOENT;
245 }
246 if (de) {
247 KASSERT(de->td_node == TMPFS_NODE_WHITEOUT);
248 cnp->cn_flags |= ISWHITEOUT;
249 }
250 goto done;
251 }
252
253 tnode = de->td_node;
254
255 /*
256 * If it is not the last path component and found a non-directory
257 * or non-link entry (which may itself be pointing to a directory),
258 * raise an error.
259 */
260 if (!lastcn && tnode->tn_type != VDIR && tnode->tn_type != VLNK) {
261 error = ENOTDIR;
262 goto out;
263 }
264
265 /* Check the permissions. */
266 if (lastcn && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
267 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
268 if (error)
269 goto out;
270
271 if ((dnode->tn_mode & S_ISTXT) != 0) {
272 error = kauth_authorize_vnode(cnp->cn_cred,
273 KAUTH_VNODE_DELETE, tnode->tn_vnode,
274 dnode->tn_vnode, genfs_can_sticky(dvp, cnp->cn_cred,
275 dnode->tn_uid, tnode->tn_uid));
276 if (error) {
277 error = EPERM;
278 goto out;
279 }
280 }
281 }
282
283 /* Get a vnode for the matching entry. */
284 error = vcache_get(dvp->v_mount, &tnode, sizeof(tnode), vpp);
285 done:
286 /*
287 * Cache the result, unless request was for creation (as it does
288 * not improve the performance).
289 */
290 if (cnp->cn_nameiop != CREATE) {
291 cache_enter(dvp, *vpp, cnp->cn_nameptr, cnp->cn_namelen,
292 cnp->cn_flags);
293 }
294 out:
295 KASSERT(VOP_ISLOCKED(dvp));
296
297 return error;
298 }
299
300 int
301 tmpfs_create(void *v)
302 {
303 struct vop_create_v3_args /* {
304 struct vnode *a_dvp;
305 struct vnode **a_vpp;
306 struct componentname *a_cnp;
307 struct vattr *a_vap;
308 } */ *ap = v;
309 vnode_t *dvp = ap->a_dvp, **vpp = ap->a_vpp;
310 struct componentname *cnp = ap->a_cnp;
311 struct vattr *vap = ap->a_vap;
312
313 KASSERT(VOP_ISLOCKED(dvp));
314 KASSERT(vap->va_type == VREG || vap->va_type == VSOCK);
315 return tmpfs_construct_node(dvp, vpp, vap, cnp, NULL);
316 }
317
318 int
319 tmpfs_mknod(void *v)
320 {
321 struct vop_mknod_v3_args /* {
322 struct vnode *a_dvp;
323 struct vnode **a_vpp;
324 struct componentname *a_cnp;
325 struct vattr *a_vap;
326 } */ *ap = v;
327 vnode_t *dvp = ap->a_dvp, **vpp = ap->a_vpp;
328 struct componentname *cnp = ap->a_cnp;
329 struct vattr *vap = ap->a_vap;
330 enum vtype vt = vap->va_type;
331
332 if (vt != VBLK && vt != VCHR && vt != VFIFO) {
333 *vpp = NULL;
334 return EINVAL;
335 }
336 return tmpfs_construct_node(dvp, vpp, vap, cnp, NULL);
337 }
338
339 int
340 tmpfs_open(void *v)
341 {
342 struct vop_open_args /* {
343 struct vnode *a_vp;
344 int a_mode;
345 kauth_cred_t a_cred;
346 } */ *ap = v;
347 vnode_t *vp = ap->a_vp;
348 mode_t mode = ap->a_mode;
349 tmpfs_node_t *node;
350
351 KASSERT(VOP_ISLOCKED(vp));
352
353 node = VP_TO_TMPFS_NODE(vp);
354
355 /* If the file is marked append-only, deny write requests. */
356 if ((node->tn_flags & APPEND) != 0 &&
357 (mode & (FWRITE | O_APPEND)) == FWRITE) {
358 return EPERM;
359 }
360 return 0;
361 }
362
363 int
364 tmpfs_close(void *v)
365 {
366 struct vop_close_args /* {
367 struct vnode *a_vp;
368 int a_fflag;
369 kauth_cred_t a_cred;
370 } */ *ap = v;
371 vnode_t *vp __diagused = ap->a_vp;
372
373 KASSERT(VOP_ISLOCKED(vp));
374 return 0;
375 }
376
377 int
378 tmpfs_access(void *v)
379 {
380 struct vop_access_args /* {
381 struct vnode *a_vp;
382 accmode_t a_accmode;
383 kauth_cred_t a_cred;
384 } */ *ap = v;
385 vnode_t *vp = ap->a_vp;
386 accmode_t accmode = ap->a_accmode;
387 kauth_cred_t cred = ap->a_cred;
388 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
389 const bool writing = (accmode & VWRITE) != 0;
390
391 KASSERT(VOP_ISLOCKED(vp));
392
393 /* Possible? */
394 switch (vp->v_type) {
395 case VDIR:
396 case VLNK:
397 case VREG:
398 if (writing && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) {
399 return EROFS;
400 }
401 break;
402 case VBLK:
403 case VCHR:
404 case VSOCK:
405 case VFIFO:
406 break;
407 default:
408 return EINVAL;
409 }
410 if (writing && (node->tn_flags & IMMUTABLE) != 0) {
411 return EPERM;
412 }
413
414 return kauth_authorize_vnode(cred, KAUTH_ACCESS_ACTION(accmode,
415 vp->v_type, node->tn_mode), vp, NULL, genfs_can_access(vp, cred,
416 node->tn_uid, node->tn_gid, node->tn_mode, NULL, accmode));
417 }
418
419 int
420 tmpfs_getattr(void *v)
421 {
422 struct vop_getattr_args /* {
423 struct vnode *a_vp;
424 struct vattr *a_vap;
425 kauth_cred_t a_cred;
426 } */ *ap = v;
427 vnode_t *vp = ap->a_vp;
428 struct vattr *vap = ap->a_vap;
429 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
430
431 vattr_null(vap);
432
433 vap->va_type = vp->v_type;
434 vap->va_mode = node->tn_mode;
435 vap->va_nlink = node->tn_links;
436 vap->va_uid = node->tn_uid;
437 vap->va_gid = node->tn_gid;
438 vap->va_fsid = vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
439 vap->va_fileid = node->tn_id;
440 vap->va_size = node->tn_size;
441 vap->va_blocksize = PAGE_SIZE;
442 vap->va_gen = TMPFS_NODE_GEN(node);
443 vap->va_flags = node->tn_flags;
444 vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
445 node->tn_spec.tn_dev.tn_rdev : VNOVAL;
446 vap->va_bytes = round_page(node->tn_size);
447 vap->va_filerev = VNOVAL;
448 vap->va_vaflags = 0;
449 vap->va_spare = VNOVAL; /* XXX */
450
451 mutex_enter(&node->tn_timelock);
452 tmpfs_update_locked(vp, 0);
453 vap->va_atime = node->tn_atime;
454 vap->va_mtime = node->tn_mtime;
455 vap->va_ctime = node->tn_ctime;
456 vap->va_birthtime = node->tn_birthtime;
457 mutex_exit(&node->tn_timelock);
458
459 return 0;
460 }
461
462 int
463 tmpfs_setattr(void *v)
464 {
465 struct vop_setattr_args /* {
466 struct vnode *a_vp;
467 struct vattr *a_vap;
468 kauth_cred_t a_cred;
469 } */ *ap = v;
470 vnode_t *vp = ap->a_vp;
471 struct vattr *vap = ap->a_vap;
472 kauth_cred_t cred = ap->a_cred;
473 lwp_t *l = curlwp;
474 int error = 0;
475
476 KASSERT(VOP_ISLOCKED(vp));
477
478 /* Abort if any unsettable attribute is given. */
479 if (vap->va_type != VNON || vap->va_nlink != VNOVAL ||
480 vap->va_fsid != VNOVAL || vap->va_fileid != VNOVAL ||
481 vap->va_blocksize != VNOVAL || vap->va_ctime.tv_sec != VNOVAL ||
482 vap->va_gen != VNOVAL || vap->va_rdev != VNOVAL ||
483 vap->va_bytes != VNOVAL) {
484 return EINVAL;
485 }
486
487 if (error == 0 && vap->va_flags != VNOVAL)
488 error = tmpfs_chflags(vp, vap->va_flags, cred, l);
489
490 if (error == 0 && vap->va_size != VNOVAL)
491 error = tmpfs_chsize(vp, vap->va_size, cred, l);
492
493 if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
494 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, l);
495
496 if (error == 0 && vap->va_mode != VNOVAL)
497 error = tmpfs_chmod(vp, vap->va_mode, cred, l);
498
499 const bool chsometime =
500 vap->va_atime.tv_sec != VNOVAL ||
501 vap->va_mtime.tv_sec != VNOVAL ||
502 vap->va_birthtime.tv_sec != VNOVAL;
503 if (error == 0 && chsometime) {
504 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime,
505 &vap->va_birthtime, vap->va_vaflags, cred, l);
506 }
507 return error;
508 }
509
510 int
511 tmpfs_read(void *v)
512 {
513 struct vop_read_args /* {
514 struct vnode *a_vp;
515 struct uio *a_uio;
516 int a_ioflag;
517 kauth_cred_t a_cred;
518 } */ *ap = v;
519 vnode_t *vp = ap->a_vp;
520 struct uio *uio = ap->a_uio;
521 const int ioflag = ap->a_ioflag;
522 tmpfs_node_t *node;
523 struct uvm_object *uobj;
524 int error;
525
526 KASSERT(VOP_ISLOCKED(vp));
527
528 if (vp->v_type == VDIR) {
529 return EISDIR;
530 }
531 if (uio->uio_offset < 0 || vp->v_type != VREG) {
532 return EINVAL;
533 }
534
535 /* Note: reading zero bytes should not update atime. */
536 if (uio->uio_resid == 0) {
537 return 0;
538 }
539
540 node = VP_TO_TMPFS_NODE(vp);
541 uobj = node->tn_spec.tn_reg.tn_aobj;
542 error = 0;
543
544 while (error == 0 && uio->uio_resid > 0) {
545 vsize_t len;
546
547 if (node->tn_size <= uio->uio_offset) {
548 break;
549 }
550 len = MIN(node->tn_size - uio->uio_offset, uio->uio_resid);
551 if (len == 0) {
552 break;
553 }
554 error = ubc_uiomove(uobj, uio, len, IO_ADV_DECODE(ioflag),
555 UBC_READ | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
556 }
557
558 tmpfs_update(vp, TMPFS_UPDATE_ATIME);
559 return error;
560 }
561
562 int
563 tmpfs_write(void *v)
564 {
565 struct vop_write_args /* {
566 struct vnode *a_vp;
567 struct uio *a_uio;
568 int a_ioflag;
569 kauth_cred_t a_cred;
570 } */ *ap = v;
571 vnode_t *vp = ap->a_vp;
572 struct uio *uio = ap->a_uio;
573 const int ioflag = ap->a_ioflag;
574 tmpfs_node_t *node;
575 struct uvm_object *uobj;
576 off_t oldsize;
577 int error, ubc_flags;
578
579 KASSERT(VOP_ISLOCKED(vp));
580
581 node = VP_TO_TMPFS_NODE(vp);
582 oldsize = node->tn_size;
583
584 if ((vp->v_mount->mnt_flag & MNT_RDONLY) != 0) {
585 error = EROFS;
586 goto out;
587 }
588
589 if (uio->uio_offset < 0 || vp->v_type != VREG) {
590 error = EINVAL;
591 goto out;
592 }
593 if (uio->uio_resid == 0) {
594 error = 0;
595 goto out;
596 }
597 if (ioflag & IO_APPEND) {
598 uio->uio_offset = node->tn_size;
599 }
600
601 if (uio->uio_offset + uio->uio_resid > node->tn_size) {
602 error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid);
603 if (error)
604 goto out;
605 }
606
607 /*
608 * If we're extending the file and have data to write that would
609 * not leave an un-zeroed hole, we can avoid fault processing and
610 * zeroing of pages on allocation.
611 *
612 * Don't do this if the file is mapped and we need to touch an
613 * existing page, because writing a mapping of the file into itself
614 * could cause a deadlock on PG_BUSY.
615 *
616 * New pages will not become visible until finished here (because
617 * of PG_BUSY and the vnode lock).
618 */
619 ubc_flags = UBC_WRITE | UBC_VNODE_FLAGS(vp);
620 #if 0
621 /*
622 * XXX disable use of UBC_FAULTBUSY for now, this check is insufficient
623 * because it does not zero uninitialized parts of pages in all of
624 * the cases where zeroing is needed.
625 */
626 if (uio->uio_offset >= oldsize &&
627 ((uio->uio_offset & (PAGE_SIZE - 1)) == 0 ||
628 ((vp->v_vflag & VV_MAPPED) == 0 &&
629 trunc_page(uio->uio_offset) == trunc_page(oldsize)))) {
630 ubc_flags |= UBC_FAULTBUSY;
631 }
632 #endif
633
634 uobj = node->tn_spec.tn_reg.tn_aobj;
635 error = 0;
636 while (error == 0 && uio->uio_resid > 0) {
637 vsize_t len;
638
639 len = MIN(node->tn_size - uio->uio_offset, uio->uio_resid);
640 if (len == 0) {
641 break;
642 }
643 error = ubc_uiomove(uobj, uio, len, IO_ADV_DECODE(ioflag),
644 ubc_flags);
645 }
646 if (error) {
647 (void)tmpfs_reg_resize(vp, oldsize);
648 }
649
650 tmpfs_update(vp, TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME);
651 VN_KNOTE(vp, NOTE_WRITE);
652 out:
653 if (error) {
654 KASSERT(oldsize == node->tn_size);
655 } else {
656 KASSERT(uio->uio_resid == 0);
657 }
658 return error;
659 }
660
661 int
662 tmpfs_fsync(void *v)
663 {
664 struct vop_fsync_args /* {
665 struct vnode *a_vp;
666 kauth_cred_t a_cred;
667 int a_flags;
668 off_t a_offlo;
669 off_t a_offhi;
670 struct lwp *a_l;
671 } */ *ap = v;
672 vnode_t *vp __diagused = ap->a_vp;
673
674 /* Nothing to do. Should be up to date. */
675 KASSERT(VOP_ISLOCKED(vp));
676 return 0;
677 }
678
679 /*
680 * tmpfs_remove: unlink a file.
681 *
682 * => Both directory (dvp) and file (vp) are locked.
683 * => We unlock and drop the reference on both.
684 */
685 int
686 tmpfs_remove(void *v)
687 {
688 struct vop_remove_v2_args /* {
689 struct vnode *a_dvp;
690 struct vnode *a_vp;
691 struct componentname *a_cnp;
692 } */ *ap = v;
693 vnode_t *dvp = ap->a_dvp, *vp = ap->a_vp;
694 tmpfs_node_t *dnode, *node;
695 tmpfs_dirent_t *de;
696 int error, tflags;
697
698 KASSERT(VOP_ISLOCKED(dvp));
699 KASSERT(VOP_ISLOCKED(vp));
700
701 if (vp->v_type == VDIR) {
702 error = EPERM;
703 goto out;
704 }
705 dnode = VP_TO_TMPFS_DIR(dvp);
706 node = VP_TO_TMPFS_NODE(vp);
707
708 /*
709 * Files marked as immutable or append-only cannot be deleted.
710 * Likewise, files residing on directories marked as append-only
711 * cannot be deleted.
712 */
713 if (node->tn_flags & (IMMUTABLE | APPEND)) {
714 error = EPERM;
715 goto out;
716 }
717 if (dnode->tn_flags & APPEND) {
718 error = EPERM;
719 goto out;
720 }
721
722 /* Lookup the directory entry (check the cached hint first). */
723 de = tmpfs_dir_cached(node);
724 if (de == NULL) {
725 struct componentname *cnp = ap->a_cnp;
726 de = tmpfs_dir_lookup(dnode, cnp);
727 }
728 KASSERT(de && de->td_node == node);
729
730 /*
731 * Remove the entry from the directory (drops the link count) and
732 * destroy it or replace with a whiteout.
733 *
734 * Note: the inode referred by it will not be destroyed until the
735 * vnode is reclaimed/recycled.
736 */
737
738 tmpfs_dir_detach(dnode, de);
739
740 if (ap->a_cnp->cn_flags & DOWHITEOUT)
741 tmpfs_dir_attach(dnode, de, TMPFS_NODE_WHITEOUT);
742 else
743 tmpfs_free_dirent(VFS_TO_TMPFS(vp->v_mount), de);
744
745 tflags = TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME;
746 if (node->tn_links > 0) {
747 /* We removed a hard link. */
748 tflags |= TMPFS_UPDATE_CTIME;
749 }
750 tmpfs_update(dvp, tflags);
751 error = 0;
752 out:
753 /* Drop the reference and unlock the node. */
754 if (dvp == vp) {
755 vrele(vp);
756 } else {
757 vput(vp);
758 }
759 return error;
760 }
761
762 /*
763 * tmpfs_link: create a hard link.
764 */
765 int
766 tmpfs_link(void *v)
767 {
768 struct vop_link_v2_args /* {
769 struct vnode *a_dvp;
770 struct vnode *a_vp;
771 struct componentname *a_cnp;
772 } */ *ap = v;
773 vnode_t *dvp = ap->a_dvp;
774 vnode_t *vp = ap->a_vp;
775 struct componentname *cnp = ap->a_cnp;
776 tmpfs_node_t *dnode, *node;
777 tmpfs_dirent_t *de;
778 int error;
779
780 KASSERT(dvp != vp);
781 KASSERT(VOP_ISLOCKED(dvp));
782 KASSERT(vp->v_type != VDIR);
783 KASSERT(dvp->v_mount == vp->v_mount);
784
785 dnode = VP_TO_TMPFS_DIR(dvp);
786 node = VP_TO_TMPFS_NODE(vp);
787
788 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
789
790 /* Check for maximum number of links limit. */
791 if (node->tn_links == LINK_MAX) {
792 error = EMLINK;
793 goto out;
794 }
795 KASSERT(node->tn_links < LINK_MAX);
796
797 /* We cannot create links of files marked immutable or append-only. */
798 if (node->tn_flags & (IMMUTABLE | APPEND)) {
799 error = EPERM;
800 goto out;
801 }
802
803 /* Allocate a new directory entry to represent the inode. */
804 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount),
805 cnp->cn_nameptr, cnp->cn_namelen, &de);
806 if (error) {
807 goto out;
808 }
809
810 /*
811 * Insert the entry into the directory.
812 * It will increase the inode link count.
813 */
814 tmpfs_dir_attach(dnode, de, node);
815 tmpfs_update(dvp, TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME);
816
817 /* Update the timestamps and trigger the event. */
818 if (node->tn_vnode) {
819 VN_KNOTE(node->tn_vnode, NOTE_LINK);
820 }
821 tmpfs_update(vp, TMPFS_UPDATE_CTIME);
822 error = 0;
823 out:
824 VOP_UNLOCK(vp);
825 return error;
826 }
827
828 int
829 tmpfs_mkdir(void *v)
830 {
831 struct vop_mkdir_v3_args /* {
832 struct vnode *a_dvp;
833 struct vnode **a_vpp;
834 struct componentname *a_cnp;
835 struct vattr *a_vap;
836 } */ *ap = v;
837 vnode_t *dvp = ap->a_dvp;
838 vnode_t **vpp = ap->a_vpp;
839 struct componentname *cnp = ap->a_cnp;
840 struct vattr *vap = ap->a_vap;
841
842 KASSERT(vap->va_type == VDIR);
843 return tmpfs_construct_node(dvp, vpp, vap, cnp, NULL);
844 }
845
846 int
847 tmpfs_rmdir(void *v)
848 {
849 struct vop_rmdir_v2_args /* {
850 struct vnode *a_dvp;
851 struct vnode *a_vp;
852 struct componentname *a_cnp;
853 } */ *ap = v;
854 vnode_t *dvp = ap->a_dvp;
855 vnode_t *vp = ap->a_vp;
856 tmpfs_mount_t *tmp = VFS_TO_TMPFS(dvp->v_mount);
857 tmpfs_node_t *dnode = VP_TO_TMPFS_DIR(dvp);
858 tmpfs_node_t *node = VP_TO_TMPFS_DIR(vp);
859 tmpfs_dirent_t *de;
860 int error = 0;
861
862 KASSERT(VOP_ISLOCKED(dvp));
863 KASSERT(VOP_ISLOCKED(vp));
864
865 /*
866 * Directories with more than two entries ('.' and '..') cannot be
867 * removed. There may be whiteout entries, which we will destroy.
868 */
869 if (node->tn_size > 0) {
870 /*
871 * If never had whiteout entries, the directory is certainly
872 * not empty. Otherwise, scan for any non-whiteout entry.
873 */
874 if ((node->tn_gen & TMPFS_WHITEOUT_BIT) == 0) {
875 error = ENOTEMPTY;
876 goto out;
877 }
878 TAILQ_FOREACH(de, &node->tn_spec.tn_dir.tn_dir, td_entries) {
879 if (de->td_node != TMPFS_NODE_WHITEOUT) {
880 error = ENOTEMPTY;
881 goto out;
882 }
883 }
884 KASSERT(error == 0);
885 }
886
887 KASSERT(node->tn_spec.tn_dir.tn_parent == dnode);
888
889 /* Lookup the directory entry (check the cached hint first). */
890 de = tmpfs_dir_cached(node);
891 if (de == NULL) {
892 struct componentname *cnp = ap->a_cnp;
893 de = tmpfs_dir_lookup(dnode, cnp);
894 }
895 KASSERT(de && de->td_node == node);
896
897 /* Check flags to see if we are allowed to remove the directory. */
898 if (dnode->tn_flags & APPEND || node->tn_flags & (IMMUTABLE | APPEND)) {
899 error = EPERM;
900 goto out;
901 }
902
903 /* Decrement the link count for the virtual '.' entry. */
904 node->tn_links--;
905
906 /* Detach the directory entry from the directory. */
907 tmpfs_dir_detach(dnode, de);
908
909 /* Purge the cache for parent. */
910 cache_purge(dvp);
911
912 /*
913 * Destroy the directory entry or replace it with a whiteout.
914 *
915 * Note: the inode referred by it will not be destroyed until the
916 * vnode is reclaimed.
917 */
918 if (ap->a_cnp->cn_flags & DOWHITEOUT)
919 tmpfs_dir_attach(dnode, de, TMPFS_NODE_WHITEOUT);
920 else
921 tmpfs_free_dirent(tmp, de);
922
923 /* Destroy the whiteout entries from the node. */
924 while ((de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir)) != NULL) {
925 KASSERT(de->td_node == TMPFS_NODE_WHITEOUT);
926 tmpfs_dir_detach(node, de);
927 tmpfs_free_dirent(tmp, de);
928 }
929 tmpfs_update(dvp, TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME);
930
931 KASSERT(node->tn_size == 0);
932 KASSERT(node->tn_links == 0);
933 out:
934 /* Release the node. */
935 KASSERT(dvp != vp);
936 vput(vp);
937 return error;
938 }
939
940 int
941 tmpfs_symlink(void *v)
942 {
943 struct vop_symlink_v3_args /* {
944 struct vnode *a_dvp;
945 struct vnode **a_vpp;
946 struct componentname *a_cnp;
947 struct vattr *a_vap;
948 char *a_target;
949 } */ *ap = v;
950 vnode_t *dvp = ap->a_dvp;
951 vnode_t **vpp = ap->a_vpp;
952 struct componentname *cnp = ap->a_cnp;
953 struct vattr *vap = ap->a_vap;
954 char *target = ap->a_target;
955
956 KASSERT(vap->va_type == VLNK);
957 return tmpfs_construct_node(dvp, vpp, vap, cnp, target);
958 }
959
960 int
961 tmpfs_readdir(void *v)
962 {
963 struct vop_readdir_args /* {
964 struct vnode *a_vp;
965 struct uio *a_uio;
966 kauth_cred_t a_cred;
967 int *a_eofflag;
968 off_t **a_cookies;
969 int *ncookies;
970 } */ *ap = v;
971 vnode_t *vp = ap->a_vp;
972 struct uio *uio = ap->a_uio;
973 int *eofflag = ap->a_eofflag;
974 off_t **cookies = ap->a_cookies;
975 int *ncookies = ap->a_ncookies;
976 off_t startoff, cnt;
977 tmpfs_node_t *node;
978 int error;
979
980 KASSERT(VOP_ISLOCKED(vp));
981
982 /* This operation only makes sense on directory nodes. */
983 if (vp->v_type != VDIR) {
984 return ENOTDIR;
985 }
986 node = VP_TO_TMPFS_DIR(vp);
987 startoff = uio->uio_offset;
988 cnt = 0;
989
990 /*
991 * Retrieve the directory entries, unless it is being destroyed.
992 */
993 if (node->tn_links) {
994 error = tmpfs_dir_getdents(node, uio, &cnt);
995 } else {
996 error = 0;
997 }
998
999 if (eofflag != NULL) {
1000 *eofflag = !error && uio->uio_offset == TMPFS_DIRSEQ_EOF;
1001 }
1002 if (error || cookies == NULL || ncookies == NULL) {
1003 return error;
1004 }
1005
1006 /* Update NFS-related variables, if any. */
1007 tmpfs_dirent_t *de = NULL;
1008 off_t i, off = startoff;
1009
1010 *cookies = malloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);
1011 *ncookies = cnt;
1012
1013 for (i = 0; i < cnt; i++) {
1014 KASSERT(off != TMPFS_DIRSEQ_EOF);
1015 if (off != TMPFS_DIRSEQ_DOT) {
1016 if (off == TMPFS_DIRSEQ_DOTDOT) {
1017 de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir);
1018 } else if (de != NULL) {
1019 de = TAILQ_NEXT(de, td_entries);
1020 } else {
1021 de = tmpfs_dir_lookupbyseq(node, off);
1022 KASSERT(de != NULL);
1023 de = TAILQ_NEXT(de, td_entries);
1024 }
1025 if (de == NULL) {
1026 off = TMPFS_DIRSEQ_EOF;
1027 } else {
1028 off = tmpfs_dir_getseq(node, de);
1029 }
1030 } else {
1031 off = TMPFS_DIRSEQ_DOTDOT;
1032 }
1033 (*cookies)[i] = off;
1034 }
1035 KASSERT(uio->uio_offset == off);
1036 return error;
1037 }
1038
1039 int
1040 tmpfs_readlink(void *v)
1041 {
1042 struct vop_readlink_args /* {
1043 struct vnode *a_vp;
1044 struct uio *a_uio;
1045 kauth_cred_t a_cred;
1046 } */ *ap = v;
1047 vnode_t *vp = ap->a_vp;
1048 struct uio *uio = ap->a_uio;
1049 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1050 int error;
1051
1052 KASSERT(VOP_ISLOCKED(vp));
1053 KASSERT(uio->uio_offset == 0);
1054 KASSERT(vp->v_type == VLNK);
1055
1056 /* Note: readlink(2) returns the path without NUL terminator. */
1057 if (node->tn_size > 0) {
1058 error = uiomove(node->tn_spec.tn_lnk.tn_link,
1059 MIN(node->tn_size, uio->uio_resid), uio);
1060 } else {
1061 error = 0;
1062 }
1063 tmpfs_update(vp, TMPFS_UPDATE_ATIME);
1064
1065 return error;
1066 }
1067
1068 int
1069 tmpfs_inactive(void *v)
1070 {
1071 struct vop_inactive_v2_args /* {
1072 struct vnode *a_vp;
1073 bool *a_recycle;
1074 } */ *ap = v;
1075 vnode_t *vp = ap->a_vp;
1076 tmpfs_node_t *node;
1077 int error = 0;
1078
1079 KASSERT(VOP_ISLOCKED(vp));
1080
1081 node = VP_TO_TMPFS_NODE(vp);
1082 if (node->tn_links == 0) {
1083 /*
1084 * Mark node as dead by setting its generation to zero.
1085 */
1086 atomic_and_32(&node->tn_gen, ~TMPFS_NODE_GEN_MASK);
1087
1088 /*
1089 * If the file has been deleted, truncate it, otherwise VFS
1090 * will quite rightly try to write back dirty data, which in
1091 * the case of tmpfs/UAO means needless page deactivations.
1092 */
1093 if (vp->v_type == VREG) {
1094 error = tmpfs_reg_resize(vp, 0);
1095 }
1096 *ap->a_recycle = true;
1097 } else {
1098 tmpfs_update(vp, 0);
1099 *ap->a_recycle = false;
1100 }
1101
1102 return error;
1103 }
1104
1105 int
1106 tmpfs_reclaim(void *v)
1107 {
1108 struct vop_reclaim_v2_args /* {
1109 struct vnode *a_vp;
1110 } */ *ap = v;
1111 vnode_t *vp = ap->a_vp;
1112 tmpfs_mount_t *tmp = VFS_TO_TMPFS(vp->v_mount);
1113 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1114
1115 /* Unlock vnode. We still have exclusive access to it. */
1116 VOP_UNLOCK(vp);
1117
1118 /* Disassociate inode from vnode. */
1119 node->tn_vnode = NULL;
1120 vp->v_data = NULL;
1121
1122 /* If inode is not referenced, i.e. no links, then destroy it. */
1123 if (node->tn_links == 0)
1124 tmpfs_free_node(tmp, node);
1125 return 0;
1126 }
1127
1128 int
1129 tmpfs_pathconf(void *v)
1130 {
1131 struct vop_pathconf_args /* {
1132 struct vnode *a_vp;
1133 int a_name;
1134 register_t *a_retval;
1135 } */ *ap = v;
1136 register_t *retval = ap->a_retval;
1137
1138 switch (ap->a_name) {
1139 case _PC_LINK_MAX:
1140 *retval = LINK_MAX;
1141 return 0;
1142 case _PC_NAME_MAX:
1143 *retval = TMPFS_MAXNAMLEN;
1144 return 0;
1145 case _PC_PATH_MAX:
1146 *retval = PATH_MAX;
1147 return 0;
1148 case _PC_PIPE_BUF:
1149 *retval = PIPE_BUF;
1150 return 0;
1151 case _PC_CHOWN_RESTRICTED:
1152 *retval = 1;
1153 return 0;
1154 case _PC_NO_TRUNC:
1155 *retval = 1;
1156 return 0;
1157 case _PC_SYNC_IO:
1158 *retval = 1;
1159 return 0;
1160 case _PC_FILESIZEBITS:
1161 *retval = sizeof(off_t) * CHAR_BIT;
1162 return 0;
1163 default:
1164 return genfs_pathconf(ap);
1165 }
1166 }
1167
1168 int
1169 tmpfs_advlock(void *v)
1170 {
1171 struct vop_advlock_args /* {
1172 struct vnode *a_vp;
1173 void * a_id;
1174 int a_op;
1175 struct flock *a_fl;
1176 int a_flags;
1177 } */ *ap = v;
1178 vnode_t *vp = ap->a_vp;
1179 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1180
1181 return lf_advlock(v, &node->tn_lockf, node->tn_size);
1182 }
1183
1184 int
1185 tmpfs_getpages(void *v)
1186 {
1187 struct vop_getpages_args /* {
1188 struct vnode *a_vp;
1189 voff_t a_offset;
1190 struct vm_page **a_m;
1191 int *a_count;
1192 int a_centeridx;
1193 vm_prot_t a_access_type;
1194 int a_advice;
1195 int a_flags;
1196 } */ * const ap = v;
1197 vnode_t *vp = ap->a_vp;
1198 const voff_t offset = ap->a_offset;
1199 struct vm_page **pgs = ap->a_m;
1200 const int centeridx = ap->a_centeridx;
1201 const vm_prot_t access_type = ap->a_access_type;
1202 const int advice = ap->a_advice;
1203 const int flags = ap->a_flags;
1204 int error, iflag, npages = *ap->a_count;
1205 tmpfs_node_t *node;
1206 struct uvm_object *uobj;
1207
1208 KASSERT(vp->v_type == VREG);
1209 KASSERT(rw_lock_held(vp->v_uobj.vmobjlock));
1210
1211 /*
1212 * Currently, PGO_PASTEOF is not supported.
1213 */
1214 if (vp->v_size <= offset + (centeridx << PAGE_SHIFT)) {
1215 if ((flags & PGO_LOCKED) == 0)
1216 rw_exit(vp->v_uobj.vmobjlock);
1217 return EINVAL;
1218 }
1219
1220 if (vp->v_size < offset + (npages << PAGE_SHIFT)) {
1221 npages = (round_page(vp->v_size) - offset) >> PAGE_SHIFT;
1222 }
1223
1224 /*
1225 * Check for reclaimed vnode. v_interlock is not held here, but
1226 * VI_DEADCHECK is set with vmobjlock held.
1227 */
1228 iflag = atomic_load_relaxed(&vp->v_iflag);
1229 if (__predict_false((iflag & VI_DEADCHECK) != 0)) {
1230 mutex_enter(vp->v_interlock);
1231 error = vdead_check(vp, VDEAD_NOWAIT);
1232 mutex_exit(vp->v_interlock);
1233 if (error) {
1234 if ((flags & PGO_LOCKED) == 0)
1235 rw_exit(vp->v_uobj.vmobjlock);
1236 return error;
1237 }
1238 }
1239
1240 node = VP_TO_TMPFS_NODE(vp);
1241 uobj = node->tn_spec.tn_reg.tn_aobj;
1242
1243 /*
1244 * Update timestamp lazily. The update will be made real when
1245 * a synchronous update is next made -- or by tmpfs_getattr,
1246 * tmpfs_putpages, and tmpfs_inactive.
1247 */
1248 if ((flags & PGO_NOTIMESTAMP) == 0) {
1249 u_int tflags = 0;
1250
1251 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
1252 tflags |= TMPFS_UPDATE_ATIME;
1253
1254 if ((access_type & VM_PROT_WRITE) != 0) {
1255 tflags |= TMPFS_UPDATE_MTIME;
1256 if (vp->v_mount->mnt_flag & MNT_RELATIME)
1257 tflags |= TMPFS_UPDATE_ATIME;
1258 }
1259 tmpfs_update_lazily(vp, tflags);
1260 }
1261
1262 /* Invoke the pager. The vnode vmobjlock is shared with the UAO. */
1263 KASSERT(vp->v_uobj.vmobjlock == uobj->vmobjlock);
1264 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, centeridx,
1265 access_type, advice, flags);
1266 #if defined(DEBUG)
1267 if (!error && pgs) {
1268 KASSERT(pgs[centeridx] != NULL);
1269 }
1270 #endif
1271 return error;
1272 }
1273
1274 int
1275 tmpfs_putpages(void *v)
1276 {
1277 struct vop_putpages_args /* {
1278 struct vnode *a_vp;
1279 voff_t a_offlo;
1280 voff_t a_offhi;
1281 int a_flags;
1282 } */ * const ap = v;
1283 vnode_t *vp = ap->a_vp;
1284 const voff_t offlo = ap->a_offlo;
1285 const voff_t offhi = ap->a_offhi;
1286 const int flags = ap->a_flags;
1287 tmpfs_node_t *node;
1288 struct uvm_object *uobj;
1289 int error;
1290
1291 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
1292
1293 if (vp->v_type != VREG) {
1294 rw_exit(vp->v_uobj.vmobjlock);
1295 return 0;
1296 }
1297
1298 node = VP_TO_TMPFS_NODE(vp);
1299 uobj = node->tn_spec.tn_reg.tn_aobj;
1300
1301 KASSERT(vp->v_uobj.vmobjlock == uobj->vmobjlock);
1302 error = (*uobj->pgops->pgo_put)(uobj, offlo, offhi, flags);
1303
1304 /* XXX mtime */
1305
1306 /* Process deferred updates. */
1307 tmpfs_update(vp, 0);
1308 return error;
1309 }
1310
1311 int
1312 tmpfs_whiteout(void *v)
1313 {
1314 struct vop_whiteout_args /* {
1315 struct vnode *a_dvp;
1316 struct componentname *a_cnp;
1317 int a_flags;
1318 } */ *ap = v;
1319 vnode_t *dvp = ap->a_dvp;
1320 struct componentname *cnp = ap->a_cnp;
1321 const int flags = ap->a_flags;
1322 tmpfs_mount_t *tmp = VFS_TO_TMPFS(dvp->v_mount);
1323 tmpfs_node_t *dnode = VP_TO_TMPFS_DIR(dvp);
1324 tmpfs_dirent_t *de;
1325 int error;
1326
1327 switch (flags) {
1328 case LOOKUP:
1329 break;
1330 case CREATE:
1331 error = tmpfs_alloc_dirent(tmp, cnp->cn_nameptr,
1332 cnp->cn_namelen, &de);
1333 if (error)
1334 return error;
1335 tmpfs_dir_attach(dnode, de, TMPFS_NODE_WHITEOUT);
1336 break;
1337 case DELETE:
1338 cnp->cn_flags &= ~DOWHITEOUT; /* when in doubt, cargo cult */
1339 de = tmpfs_dir_lookup(dnode, cnp);
1340 if (de == NULL)
1341 return ENOENT;
1342 tmpfs_dir_detach(dnode, de);
1343 tmpfs_free_dirent(tmp, de);
1344 break;
1345 }
1346 tmpfs_update(dvp, TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME);
1347 return 0;
1348 }
1349
1350 int
1351 tmpfs_print(void *v)
1352 {
1353 struct vop_print_args /* {
1354 struct vnode *a_vp;
1355 } */ *ap = v;
1356 vnode_t *vp = ap->a_vp;
1357 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1358
1359 printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n"
1360 "\tmode 0%o, owner %d, group %d, size %" PRIdMAX,
1361 node, node->tn_flags, node->tn_links, node->tn_mode, node->tn_uid,
1362 node->tn_gid, (uintmax_t)node->tn_size);
1363 if (vp->v_type == VFIFO) {
1364 VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v);
1365 }
1366 printf("\n");
1367 return 0;
1368 }
1369