tmpfs_vnops.c revision 1.98 1 /* $NetBSD: tmpfs_vnops.c,v 1.98 2012/07/22 00:53:21 rmind Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * tmpfs vnode interface.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: tmpfs_vnops.c,v 1.98 2012/07/22 00:53:21 rmind Exp $");
39
40 #include <sys/param.h>
41 #include <sys/dirent.h>
42 #include <sys/fcntl.h>
43 #include <sys/event.h>
44 #include <sys/malloc.h>
45 #include <sys/namei.h>
46 #include <sys/stat.h>
47 #include <sys/uio.h>
48 #include <sys/unistd.h>
49 #include <sys/vnode.h>
50 #include <sys/lockf.h>
51 #include <sys/kauth.h>
52
53 #include <uvm/uvm.h>
54
55 #include <miscfs/fifofs/fifo.h>
56 #include <miscfs/genfs/genfs.h>
57 #include <fs/tmpfs/tmpfs_vnops.h>
58 #include <fs/tmpfs/tmpfs.h>
59
60 /*
61 * vnode operations vector used for files stored in a tmpfs file system.
62 */
63 int (**tmpfs_vnodeop_p)(void *);
64 const struct vnodeopv_entry_desc tmpfs_vnodeop_entries[] = {
65 { &vop_default_desc, vn_default_error },
66 { &vop_lookup_desc, tmpfs_lookup },
67 { &vop_create_desc, tmpfs_create },
68 { &vop_mknod_desc, tmpfs_mknod },
69 { &vop_open_desc, tmpfs_open },
70 { &vop_close_desc, tmpfs_close },
71 { &vop_access_desc, tmpfs_access },
72 { &vop_getattr_desc, tmpfs_getattr },
73 { &vop_setattr_desc, tmpfs_setattr },
74 { &vop_read_desc, tmpfs_read },
75 { &vop_write_desc, tmpfs_write },
76 { &vop_ioctl_desc, tmpfs_ioctl },
77 { &vop_fcntl_desc, tmpfs_fcntl },
78 { &vop_poll_desc, tmpfs_poll },
79 { &vop_kqfilter_desc, tmpfs_kqfilter },
80 { &vop_revoke_desc, tmpfs_revoke },
81 { &vop_mmap_desc, tmpfs_mmap },
82 { &vop_fsync_desc, tmpfs_fsync },
83 { &vop_seek_desc, tmpfs_seek },
84 { &vop_remove_desc, tmpfs_remove },
85 { &vop_link_desc, tmpfs_link },
86 { &vop_rename_desc, tmpfs_rename },
87 { &vop_mkdir_desc, tmpfs_mkdir },
88 { &vop_rmdir_desc, tmpfs_rmdir },
89 { &vop_symlink_desc, tmpfs_symlink },
90 { &vop_readdir_desc, tmpfs_readdir },
91 { &vop_readlink_desc, tmpfs_readlink },
92 { &vop_abortop_desc, tmpfs_abortop },
93 { &vop_inactive_desc, tmpfs_inactive },
94 { &vop_reclaim_desc, tmpfs_reclaim },
95 { &vop_lock_desc, tmpfs_lock },
96 { &vop_unlock_desc, tmpfs_unlock },
97 { &vop_bmap_desc, tmpfs_bmap },
98 { &vop_strategy_desc, tmpfs_strategy },
99 { &vop_print_desc, tmpfs_print },
100 { &vop_pathconf_desc, tmpfs_pathconf },
101 { &vop_islocked_desc, tmpfs_islocked },
102 { &vop_advlock_desc, tmpfs_advlock },
103 { &vop_bwrite_desc, tmpfs_bwrite },
104 { &vop_getpages_desc, tmpfs_getpages },
105 { &vop_putpages_desc, tmpfs_putpages },
106 { &vop_whiteout_desc, tmpfs_whiteout },
107 { NULL, NULL }
108 };
109
110 const struct vnodeopv_desc tmpfs_vnodeop_opv_desc = {
111 &tmpfs_vnodeop_p, tmpfs_vnodeop_entries
112 };
113
114 /*
115 * tmpfs_lookup: path name traversal routine.
116 *
117 * Arguments: dvp (directory being searched), vpp (result),
118 * cnp (component name - path).
119 *
120 * => Caller holds a reference and lock on dvp.
121 * => We return looked-up vnode (vpp) locked, with a reference held.
122 */
123 int
124 tmpfs_lookup(void *v)
125 {
126 struct vop_lookup_args /* {
127 struct vnode *a_dvp;
128 struct vnode **a_vpp;
129 struct componentname *a_cnp;
130 } */ *ap = v;
131 vnode_t *dvp = ap->a_dvp, **vpp = ap->a_vpp;
132 struct componentname *cnp = ap->a_cnp;
133 const bool lastcn = (cnp->cn_flags & ISLASTCN) != 0;
134 tmpfs_node_t *dnode, *tnode;
135 tmpfs_dirent_t *de;
136 int error;
137
138 KASSERT(VOP_ISLOCKED(dvp));
139
140 dnode = VP_TO_TMPFS_DIR(dvp);
141 *vpp = NULL;
142
143 /* Check accessibility of directory. */
144 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
145 if (error) {
146 goto out;
147 }
148
149 /*
150 * If requesting the last path component on a read-only file system
151 * with a write operation, deny it.
152 */
153 if (lastcn && (dvp->v_mount->mnt_flag & MNT_RDONLY) != 0 &&
154 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
155 error = EROFS;
156 goto out;
157 }
158
159 /*
160 * Avoid doing a linear scan of the directory if the requested
161 * directory/name couple is already in the cache.
162 */
163 error = cache_lookup(dvp, vpp, cnp);
164 if (error >= 0) {
165 /* Both cache-hit or an error case. */
166 goto out;
167 }
168
169 if (cnp->cn_flags & ISDOTDOT) {
170 tmpfs_node_t *pnode;
171
172 /*
173 * Lookup of ".." case.
174 */
175 if (lastcn && cnp->cn_nameiop == RENAME) {
176 error = EINVAL;
177 goto out;
178 }
179 KASSERT(dnode->tn_type == VDIR);
180 pnode = dnode->tn_spec.tn_dir.tn_parent;
181 if (pnode == NULL) {
182 error = ENOENT;
183 goto out;
184 }
185
186 /*
187 * Lock the parent tn_vlock before releasing the vnode lock,
188 * and thus prevents parent from disappearing.
189 */
190 mutex_enter(&pnode->tn_vlock);
191 VOP_UNLOCK(dvp);
192
193 /*
194 * Get a vnode of the '..' entry and re-acquire the lock.
195 * Release the tn_vlock.
196 */
197 error = tmpfs_vnode_get(dvp->v_mount, pnode, vpp);
198 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
199 goto out;
200
201 } else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
202 /*
203 * Lookup of "." case.
204 */
205 if (lastcn && cnp->cn_nameiop == RENAME) {
206 error = EISDIR;
207 goto out;
208 }
209 vref(dvp);
210 *vpp = dvp;
211 error = 0;
212 goto done;
213 }
214
215 /*
216 * Other lookup cases: perform directory scan.
217 */
218 de = tmpfs_dir_lookup(dnode, cnp);
219 if (de == NULL || de->td_node == TMPFS_NODE_WHITEOUT) {
220 /*
221 * The entry was not found in the directory. This is valid
222 * if we are creating or renaming an entry and are working
223 * on the last component of the path name.
224 */
225 if (lastcn && (cnp->cn_nameiop == CREATE ||
226 cnp->cn_nameiop == RENAME)) {
227 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
228 if (error) {
229 goto out;
230 }
231 error = EJUSTRETURN;
232 } else {
233 error = ENOENT;
234 }
235 if (de) {
236 KASSERT(de->td_node == TMPFS_NODE_WHITEOUT);
237 cnp->cn_flags |= ISWHITEOUT;
238 }
239 goto done;
240 }
241
242 tnode = de->td_node;
243
244 /*
245 * If it is not the last path component and found a non-directory
246 * or non-link entry (which may itself be pointing to a directory),
247 * raise an error.
248 */
249 if (!lastcn && tnode->tn_type != VDIR && tnode->tn_type != VLNK) {
250 error = ENOTDIR;
251 goto out;
252 }
253
254 /* Check the permissions. */
255 if (lastcn && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
256 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
257 if (error)
258 goto out;
259
260 if ((dnode->tn_mode & S_ISTXT) != 0) {
261 error = kauth_authorize_vnode(cnp->cn_cred,
262 KAUTH_VNODE_DELETE, tnode->tn_vnode,
263 dnode->tn_vnode, genfs_can_sticky(cnp->cn_cred,
264 dnode->tn_uid, tnode->tn_uid));
265 if (error) {
266 error = EPERM;
267 goto out;
268 }
269 }
270 }
271
272 /* Get a vnode for the matching entry. */
273 mutex_enter(&tnode->tn_vlock);
274 error = tmpfs_vnode_get(dvp->v_mount, tnode, vpp);
275 done:
276 /*
277 * Cache the result, unless request was for creation (as it does
278 * not improve the performance).
279 */
280 if (cnp->cn_nameiop != CREATE) {
281 cache_enter(dvp, *vpp, cnp);
282 }
283 out:
284 KASSERT((*vpp && VOP_ISLOCKED(*vpp)) || error);
285 KASSERT(VOP_ISLOCKED(dvp));
286
287 return error;
288 }
289
290 int
291 tmpfs_create(void *v)
292 {
293 struct vop_create_args /* {
294 struct vnode *a_dvp;
295 struct vnode **a_vpp;
296 struct componentname *a_cnp;
297 struct vattr *a_vap;
298 } */ *ap = v;
299 vnode_t *dvp = ap->a_dvp, **vpp = ap->a_vpp;
300 struct componentname *cnp = ap->a_cnp;
301 struct vattr *vap = ap->a_vap;
302
303 KASSERT(VOP_ISLOCKED(dvp));
304 KASSERT(vap->va_type == VREG || vap->va_type == VSOCK);
305 return tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
306 }
307
308 int
309 tmpfs_mknod(void *v)
310 {
311 struct vop_mknod_args /* {
312 struct vnode *a_dvp;
313 struct vnode **a_vpp;
314 struct componentname *a_cnp;
315 struct vattr *a_vap;
316 } */ *ap = v;
317 vnode_t *dvp = ap->a_dvp, **vpp = ap->a_vpp;
318 struct componentname *cnp = ap->a_cnp;
319 struct vattr *vap = ap->a_vap;
320 enum vtype vt = vap->va_type;
321
322 if (vt != VBLK && vt != VCHR && vt != VFIFO) {
323 vput(dvp);
324 return EINVAL;
325 }
326 return tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
327 }
328
329 int
330 tmpfs_open(void *v)
331 {
332 struct vop_open_args /* {
333 struct vnode *a_vp;
334 int a_mode;
335 kauth_cred_t a_cred;
336 } */ *ap = v;
337 vnode_t *vp = ap->a_vp;
338 mode_t mode = ap->a_mode;
339 tmpfs_node_t *node;
340
341 KASSERT(VOP_ISLOCKED(vp));
342
343 node = VP_TO_TMPFS_NODE(vp);
344 if (node->tn_links < 1) {
345 /*
346 * The file is still active, but all its names have been
347 * removed (e.g. by a "rmdir $(pwd)"). It cannot be opened
348 * any more, as it is about to be destroyed.
349 */
350 return ENOENT;
351 }
352
353 /* If the file is marked append-only, deny write requests. */
354 if ((node->tn_flags & APPEND) != 0 &&
355 (mode & (FWRITE | O_APPEND)) == FWRITE) {
356 return EPERM;
357 }
358 return 0;
359 }
360
361 int
362 tmpfs_close(void *v)
363 {
364 struct vop_close_args /* {
365 struct vnode *a_vp;
366 int a_fflag;
367 kauth_cred_t a_cred;
368 } */ *ap = v;
369 vnode_t *vp = ap->a_vp;
370
371 KASSERT(VOP_ISLOCKED(vp));
372
373 tmpfs_update(vp, NULL, NULL, NULL, UPDATE_CLOSE);
374 return 0;
375 }
376
377 int
378 tmpfs_access(void *v)
379 {
380 struct vop_access_args /* {
381 struct vnode *a_vp;
382 int a_mode;
383 kauth_cred_t a_cred;
384 } */ *ap = v;
385 vnode_t *vp = ap->a_vp;
386 mode_t mode = ap->a_mode;
387 kauth_cred_t cred = ap->a_cred;
388 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
389 const bool writing = (mode & VWRITE) != 0;
390
391 KASSERT(VOP_ISLOCKED(vp));
392
393 /* Possible? */
394 switch (vp->v_type) {
395 case VDIR:
396 case VLNK:
397 case VREG:
398 if (writing && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) {
399 return EROFS;
400 }
401 break;
402 case VBLK:
403 case VCHR:
404 case VSOCK:
405 case VFIFO:
406 break;
407 default:
408 return EINVAL;
409 }
410 if (writing && (node->tn_flags & IMMUTABLE) != 0) {
411 return EPERM;
412 }
413
414 return kauth_authorize_vnode(cred, kauth_access_action(mode,
415 vp->v_type, node->tn_mode), vp, NULL, genfs_can_access(vp->v_type,
416 node->tn_mode, node->tn_uid, node->tn_gid, mode, cred));
417 }
418
419 int
420 tmpfs_getattr(void *v)
421 {
422 struct vop_getattr_args /* {
423 struct vnode *a_vp;
424 struct vattr *a_vap;
425 kauth_cred_t a_cred;
426 } */ *ap = v;
427 vnode_t *vp = ap->a_vp;
428 struct vattr *vap = ap->a_vap;
429 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
430
431 vattr_null(vap);
432
433 tmpfs_update(vp, NULL, NULL, NULL, 0);
434
435 vap->va_type = vp->v_type;
436 vap->va_mode = node->tn_mode;
437 vap->va_nlink = node->tn_links;
438 vap->va_uid = node->tn_uid;
439 vap->va_gid = node->tn_gid;
440 vap->va_fsid = vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
441 vap->va_fileid = node->tn_id;
442 vap->va_size = node->tn_size;
443 vap->va_blocksize = PAGE_SIZE;
444 vap->va_atime = node->tn_atime;
445 vap->va_mtime = node->tn_mtime;
446 vap->va_ctime = node->tn_ctime;
447 vap->va_birthtime = node->tn_birthtime;
448 vap->va_gen = TMPFS_NODE_GEN(node);
449 vap->va_flags = node->tn_flags;
450 vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
451 node->tn_spec.tn_dev.tn_rdev : VNOVAL;
452 vap->va_bytes = round_page(node->tn_size);
453 vap->va_filerev = VNOVAL;
454 vap->va_vaflags = 0;
455 vap->va_spare = VNOVAL; /* XXX */
456
457 return 0;
458 }
459
460 #define GOODTIME(tv) ((tv)->tv_sec != VNOVAL || (tv)->tv_nsec != VNOVAL)
461 /* XXX Should this operation be atomic? I think it should, but code in
462 * XXX other places (e.g., ufs) doesn't seem to be... */
463 int
464 tmpfs_setattr(void *v)
465 {
466 struct vop_setattr_args /* {
467 struct vnode *a_vp;
468 struct vattr *a_vap;
469 kauth_cred_t a_cred;
470 } */ *ap = v;
471 vnode_t *vp = ap->a_vp;
472 struct vattr *vap = ap->a_vap;
473 kauth_cred_t cred = ap->a_cred;
474 lwp_t *l = curlwp;
475 int error = 0;
476
477 KASSERT(VOP_ISLOCKED(vp));
478
479 /* Abort if any unsettable attribute is given. */
480 if (vap->va_type != VNON || vap->va_nlink != VNOVAL ||
481 vap->va_fsid != VNOVAL || vap->va_fileid != VNOVAL ||
482 vap->va_blocksize != VNOVAL || GOODTIME(&vap->va_ctime) ||
483 vap->va_gen != VNOVAL || vap->va_rdev != VNOVAL ||
484 vap->va_bytes != VNOVAL) {
485 return EINVAL;
486 }
487 if (error == 0 && (vap->va_flags != VNOVAL))
488 error = tmpfs_chflags(vp, vap->va_flags, cred, l);
489
490 if (error == 0 && (vap->va_size != VNOVAL))
491 error = tmpfs_chsize(vp, vap->va_size, cred, l);
492
493 if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
494 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, l);
495
496 if (error == 0 && (vap->va_mode != VNOVAL))
497 error = tmpfs_chmod(vp, vap->va_mode, cred, l);
498
499 if (error == 0 && (GOODTIME(&vap->va_atime) || GOODTIME(&vap->va_mtime)
500 || GOODTIME(&vap->va_birthtime))) {
501 error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime,
502 &vap->va_birthtime, vap->va_vaflags, cred, l);
503 if (error == 0)
504 return 0;
505 }
506 tmpfs_update(vp, NULL, NULL, NULL, 0);
507 return error;
508 }
509
510 int
511 tmpfs_read(void *v)
512 {
513 struct vop_read_args /* {
514 struct vnode *a_vp;
515 struct uio *a_uio;
516 int a_ioflag;
517 kauth_cred_t a_cred;
518 } */ *ap = v;
519 vnode_t *vp = ap->a_vp;
520 struct uio *uio = ap->a_uio;
521 const int ioflag = ap->a_ioflag;
522 tmpfs_node_t *node;
523 struct uvm_object *uobj;
524 int error;
525
526 KASSERT(VOP_ISLOCKED(vp));
527
528 if (vp->v_type != VREG) {
529 return EISDIR;
530 }
531 if (uio->uio_offset < 0) {
532 return EINVAL;
533 }
534
535 node = VP_TO_TMPFS_NODE(vp);
536 node->tn_status |= TMPFS_NODE_ACCESSED;
537 uobj = node->tn_spec.tn_reg.tn_aobj;
538 error = 0;
539
540 while (error == 0 && uio->uio_resid > 0) {
541 vsize_t len;
542
543 if (node->tn_size <= uio->uio_offset) {
544 break;
545 }
546 len = MIN(node->tn_size - uio->uio_offset, uio->uio_resid);
547 if (len == 0) {
548 break;
549 }
550 error = ubc_uiomove(uobj, uio, len, IO_ADV_DECODE(ioflag),
551 UBC_READ | UBC_PARTIALOK | UBC_UNMAP_FLAG(vp));
552 }
553 return error;
554 }
555
556 int
557 tmpfs_write(void *v)
558 {
559 struct vop_write_args /* {
560 struct vnode *a_vp;
561 struct uio *a_uio;
562 int a_ioflag;
563 kauth_cred_t a_cred;
564 } */ *ap = v;
565 vnode_t *vp = ap->a_vp;
566 struct uio *uio = ap->a_uio;
567 const int ioflag = ap->a_ioflag;
568 tmpfs_node_t *node;
569 struct uvm_object *uobj;
570 off_t oldsize;
571 bool extended;
572 int error;
573
574 KASSERT(VOP_ISLOCKED(vp));
575
576 node = VP_TO_TMPFS_NODE(vp);
577 oldsize = node->tn_size;
578
579 if (uio->uio_offset < 0 || vp->v_type != VREG) {
580 error = EINVAL;
581 goto out;
582 }
583 if (uio->uio_resid == 0) {
584 error = 0;
585 goto out;
586 }
587 if (ioflag & IO_APPEND) {
588 uio->uio_offset = node->tn_size;
589 }
590
591 extended = uio->uio_offset + uio->uio_resid > node->tn_size;
592 if (extended) {
593 error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid);
594 if (error)
595 goto out;
596 }
597
598 uobj = node->tn_spec.tn_reg.tn_aobj;
599 error = 0;
600 while (error == 0 && uio->uio_resid > 0) {
601 vsize_t len;
602
603 len = MIN(node->tn_size - uio->uio_offset, uio->uio_resid);
604 if (len == 0) {
605 break;
606 }
607 error = ubc_uiomove(uobj, uio, len, IO_ADV_DECODE(ioflag),
608 UBC_WRITE | UBC_UNMAP_FLAG(vp));
609 }
610 if (error) {
611 (void)tmpfs_reg_resize(vp, oldsize);
612 }
613
614 node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
615 (extended ? TMPFS_NODE_CHANGED : 0);
616 VN_KNOTE(vp, NOTE_WRITE);
617 out:
618 if (error) {
619 KASSERT(oldsize == node->tn_size);
620 } else {
621 KASSERT(uio->uio_resid == 0);
622 }
623 return error;
624 }
625
626 int
627 tmpfs_fsync(void *v)
628 {
629 struct vop_fsync_args /* {
630 struct vnode *a_vp;
631 kauth_cred_t a_cred;
632 int a_flags;
633 off_t a_offlo;
634 off_t a_offhi;
635 struct lwp *a_l;
636 } */ *ap = v;
637 vnode_t *vp = ap->a_vp;
638
639 /* Nothing to do. Just update. */
640 KASSERT(VOP_ISLOCKED(vp));
641 tmpfs_update(vp, NULL, NULL, NULL, 0);
642 return 0;
643 }
644
645 /*
646 * tmpfs_remove: unlink a file.
647 *
648 * => Both directory (dvp) and file (vp) are locked.
649 * => We unlock and drop the reference on both.
650 */
651 int
652 tmpfs_remove(void *v)
653 {
654 struct vop_remove_args /* {
655 struct vnode *a_dvp;
656 struct vnode *a_vp;
657 struct componentname *a_cnp;
658 } */ *ap = v;
659 vnode_t *dvp = ap->a_dvp, *vp = ap->a_vp;
660 tmpfs_node_t *node;
661 tmpfs_dirent_t *de;
662 int error;
663
664 KASSERT(VOP_ISLOCKED(dvp));
665 KASSERT(VOP_ISLOCKED(vp));
666
667 if (vp->v_type == VDIR) {
668 error = EPERM;
669 goto out;
670 }
671 node = VP_TO_TMPFS_NODE(vp);
672
673 /* Files marked as immutable or append-only cannot be deleted. */
674 if (node->tn_flags & (IMMUTABLE | APPEND)) {
675 error = EPERM;
676 goto out;
677 }
678
679 /* Lookup the directory entry (check the cached hint first). */
680 de = tmpfs_dir_cached(node);
681 if (de == NULL) {
682 tmpfs_node_t *dnode = VP_TO_TMPFS_DIR(dvp);
683 struct componentname *cnp = ap->a_cnp;
684 de = tmpfs_dir_lookup(dnode, cnp);
685 }
686 KASSERT(de && de->td_node == node);
687
688 /*
689 * Remove the entry from the directory (drops the link count) and
690 * destroy it or replace it with a whiteout.
691 * Note: the inode referred by it will not be destroyed
692 * until the vnode is reclaimed/recycled.
693 */
694 tmpfs_dir_detach(dvp, de);
695 if (ap->a_cnp->cn_flags & DOWHITEOUT)
696 tmpfs_dir_attach(dvp, de, TMPFS_NODE_WHITEOUT);
697 else
698 tmpfs_free_dirent(VFS_TO_TMPFS(vp->v_mount), de);
699 error = 0;
700 out:
701 /* Drop the references and unlock the vnodes. */
702 vput(vp);
703 if (dvp == vp) {
704 vrele(dvp);
705 } else {
706 vput(dvp);
707 }
708 return error;
709 }
710
711 /*
712 * tmpfs_link: create a hard link.
713 */
714 int
715 tmpfs_link(void *v)
716 {
717 struct vop_link_args /* {
718 struct vnode *a_dvp;
719 struct vnode *a_vp;
720 struct componentname *a_cnp;
721 } */ *ap = v;
722 vnode_t *dvp = ap->a_dvp;
723 vnode_t *vp = ap->a_vp;
724 struct componentname *cnp = ap->a_cnp;
725 tmpfs_node_t *dnode, *node;
726 tmpfs_dirent_t *de;
727 int error;
728
729 KASSERT(dvp != vp);
730 KASSERT(VOP_ISLOCKED(dvp));
731 KASSERT(vp->v_type != VDIR);
732 KASSERT(dvp->v_mount == vp->v_mount);
733
734 dnode = VP_TO_TMPFS_DIR(dvp);
735 node = VP_TO_TMPFS_NODE(vp);
736
737 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
738
739 /* Check for maximum number of links limit. */
740 if (node->tn_links == LINK_MAX) {
741 error = EMLINK;
742 goto out;
743 }
744 KASSERT(node->tn_links < LINK_MAX);
745
746 /* We cannot create links of files marked immutable or append-only. */
747 if (node->tn_flags & (IMMUTABLE | APPEND)) {
748 error = EPERM;
749 goto out;
750 }
751
752 /* Allocate a new directory entry to represent the inode. */
753 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount),
754 cnp->cn_nameptr, cnp->cn_namelen, &de);
755 if (error) {
756 goto out;
757 }
758
759 /*
760 * Insert the entry into the directory.
761 * It will increase the inode link count.
762 */
763 tmpfs_dir_attach(dvp, de, node);
764
765 /* Update the timestamps and trigger the event. */
766 if (node->tn_vnode) {
767 VN_KNOTE(node->tn_vnode, NOTE_LINK);
768 }
769 node->tn_status |= TMPFS_NODE_CHANGED;
770 tmpfs_update(vp, NULL, NULL, NULL, 0);
771 error = 0;
772 out:
773 VOP_UNLOCK(vp);
774 vput(dvp);
775 return error;
776 }
777
778 int
779 tmpfs_mkdir(void *v)
780 {
781 struct vop_mkdir_args /* {
782 struct vnode *a_dvp;
783 struct vnode **a_vpp;
784 struct componentname *a_cnp;
785 struct vattr *a_vap;
786 } */ *ap = v;
787 vnode_t *dvp = ap->a_dvp;
788 vnode_t **vpp = ap->a_vpp;
789 struct componentname *cnp = ap->a_cnp;
790 struct vattr *vap = ap->a_vap;
791
792 KASSERT(vap->va_type == VDIR);
793 return tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
794 }
795
796 int
797 tmpfs_rmdir(void *v)
798 {
799 struct vop_rmdir_args /* {
800 struct vnode *a_dvp;
801 struct vnode *a_vp;
802 struct componentname *a_cnp;
803 } */ *ap = v;
804 vnode_t *dvp = ap->a_dvp;
805 vnode_t *vp = ap->a_vp;
806 tmpfs_mount_t *tmp = VFS_TO_TMPFS(dvp->v_mount);
807 tmpfs_node_t *dnode = VP_TO_TMPFS_DIR(dvp);
808 tmpfs_node_t *node = VP_TO_TMPFS_DIR(vp);
809 tmpfs_dirent_t *de;
810 int error = 0;
811
812 KASSERT(VOP_ISLOCKED(dvp));
813 KASSERT(VOP_ISLOCKED(vp));
814 KASSERT(node->tn_spec.tn_dir.tn_parent == dnode);
815
816 /*
817 * Directories with more than two non-whiteout
818 * entries ('.' and '..') cannot be removed.
819 */
820 if (node->tn_size > 0) {
821 KASSERT(error == 0);
822 TAILQ_FOREACH(de, &node->tn_spec.tn_dir.tn_dir, td_entries) {
823 if (de->td_node != TMPFS_NODE_WHITEOUT) {
824 error = ENOTEMPTY;
825 break;
826 }
827 }
828 if (error)
829 goto out;
830 }
831
832 /* Lookup the directory entry (check the cached hint first). */
833 de = tmpfs_dir_cached(node);
834 if (de == NULL) {
835 struct componentname *cnp = ap->a_cnp;
836 de = tmpfs_dir_lookup(dnode, cnp);
837 }
838 KASSERT(de && de->td_node == node);
839
840 /* Check flags to see if we are allowed to remove the directory. */
841 if (dnode->tn_flags & APPEND || node->tn_flags & (IMMUTABLE | APPEND)) {
842 error = EPERM;
843 goto out;
844 }
845
846 /* Decrement the link count for the virtual '.' entry. */
847 node->tn_links--;
848 node->tn_status |= TMPFS_NODE_STATUSALL;
849
850 /* Detach the directory entry from the directory. */
851 tmpfs_dir_detach(dvp, de);
852
853 /* Purge the cache for parent. */
854 cache_purge(dvp);
855
856 /*
857 * Destroy the directory entry or replace it with a whiteout.
858 * Note: the inode referred by it will not be destroyed
859 * until the vnode is reclaimed.
860 */
861 if (ap->a_cnp->cn_flags & DOWHITEOUT)
862 tmpfs_dir_attach(dvp, de, TMPFS_NODE_WHITEOUT);
863 else
864 tmpfs_free_dirent(tmp, de);
865
866 /* Destroy the whiteout entries from the node. */
867 while ((de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir)) != NULL) {
868 KASSERT(de->td_node == TMPFS_NODE_WHITEOUT);
869 tmpfs_dir_detach(vp, de);
870 tmpfs_free_dirent(tmp, de);
871 }
872
873 KASSERT(node->tn_links == 0);
874 out:
875 /* Release the nodes. */
876 vput(dvp);
877 vput(vp);
878 return error;
879 }
880
881 int
882 tmpfs_symlink(void *v)
883 {
884 struct vop_symlink_args /* {
885 struct vnode *a_dvp;
886 struct vnode **a_vpp;
887 struct componentname *a_cnp;
888 struct vattr *a_vap;
889 char *a_target;
890 } */ *ap = v;
891 vnode_t *dvp = ap->a_dvp;
892 vnode_t **vpp = ap->a_vpp;
893 struct componentname *cnp = ap->a_cnp;
894 struct vattr *vap = ap->a_vap;
895 char *target = ap->a_target;
896
897 KASSERT(vap->va_type == VLNK);
898 return tmpfs_alloc_file(dvp, vpp, vap, cnp, target);
899 }
900
901 int
902 tmpfs_readdir(void *v)
903 {
904 struct vop_readdir_args /* {
905 struct vnode *a_vp;
906 struct uio *a_uio;
907 kauth_cred_t a_cred;
908 int *a_eofflag;
909 off_t **a_cookies;
910 int *ncookies;
911 } */ *ap = v;
912 vnode_t *vp = ap->a_vp;
913 struct uio *uio = ap->a_uio;
914 int *eofflag = ap->a_eofflag;
915 off_t **cookies = ap->a_cookies;
916 int *ncookies = ap->a_ncookies;
917 off_t startoff, cnt;
918 tmpfs_node_t *node;
919 int error;
920
921 KASSERT(VOP_ISLOCKED(vp));
922
923 /* This operation only makes sense on directory nodes. */
924 if (vp->v_type != VDIR) {
925 return ENOTDIR;
926 }
927 node = VP_TO_TMPFS_DIR(vp);
928 startoff = uio->uio_offset;
929 cnt = 0;
930 if (node->tn_links == 0) {
931 error = 0;
932 goto out;
933 }
934
935 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) {
936 error = tmpfs_dir_getdotdent(node, uio);
937 if (error != 0) {
938 if (error == -1)
939 error = 0;
940 goto out;
941 }
942 cnt++;
943 }
944 if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) {
945 error = tmpfs_dir_getdotdotdent(node, uio);
946 if (error != 0) {
947 if (error == -1)
948 error = 0;
949 goto out;
950 }
951 cnt++;
952 }
953 error = tmpfs_dir_getdents(node, uio, &cnt);
954 if (error == -1) {
955 error = 0;
956 }
957 KASSERT(error >= 0);
958 out:
959 if (eofflag != NULL) {
960 *eofflag = (!error && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
961 }
962 if (error || cookies == NULL || ncookies == NULL) {
963 return error;
964 }
965
966 /* Update NFS-related variables, if any. */
967 off_t i, off = startoff;
968 tmpfs_dirent_t *de = NULL;
969
970 *cookies = malloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);
971 *ncookies = cnt;
972
973 for (i = 0; i < cnt; i++) {
974 KASSERT(off != TMPFS_DIRCOOKIE_EOF);
975 if (off != TMPFS_DIRCOOKIE_DOT) {
976 if (off == TMPFS_DIRCOOKIE_DOTDOT) {
977 de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir);
978 } else if (de != NULL) {
979 de = TAILQ_NEXT(de, td_entries);
980 } else {
981 de = tmpfs_dir_lookupbycookie(node, off);
982 KASSERT(de != NULL);
983 de = TAILQ_NEXT(de, td_entries);
984 }
985 if (de == NULL) {
986 off = TMPFS_DIRCOOKIE_EOF;
987 } else {
988 off = tmpfs_dircookie(de);
989 }
990 } else {
991 off = TMPFS_DIRCOOKIE_DOTDOT;
992 }
993 (*cookies)[i] = off;
994 }
995 KASSERT(uio->uio_offset == off);
996 return error;
997 }
998
999 int
1000 tmpfs_readlink(void *v)
1001 {
1002 struct vop_readlink_args /* {
1003 struct vnode *a_vp;
1004 struct uio *a_uio;
1005 kauth_cred_t a_cred;
1006 } */ *ap = v;
1007 vnode_t *vp = ap->a_vp;
1008 struct uio *uio = ap->a_uio;
1009 tmpfs_node_t *node;
1010 int error;
1011
1012 KASSERT(VOP_ISLOCKED(vp));
1013 KASSERT(uio->uio_offset == 0);
1014 KASSERT(vp->v_type == VLNK);
1015
1016 node = VP_TO_TMPFS_NODE(vp);
1017 error = uiomove(node->tn_spec.tn_lnk.tn_link,
1018 MIN(node->tn_size, uio->uio_resid), uio);
1019 node->tn_status |= TMPFS_NODE_ACCESSED;
1020
1021 return error;
1022 }
1023
1024 int
1025 tmpfs_inactive(void *v)
1026 {
1027 struct vop_inactive_args /* {
1028 struct vnode *a_vp;
1029 bool *a_recycle;
1030 } */ *ap = v;
1031 vnode_t *vp = ap->a_vp;
1032 tmpfs_node_t *node;
1033
1034 KASSERT(VOP_ISLOCKED(vp));
1035
1036 node = VP_TO_TMPFS_NODE(vp);
1037 *ap->a_recycle = (node->tn_links == 0);
1038 VOP_UNLOCK(vp);
1039
1040 return 0;
1041 }
1042
1043 int
1044 tmpfs_reclaim(void *v)
1045 {
1046 struct vop_reclaim_args /* {
1047 struct vnode *a_vp;
1048 } */ *ap = v;
1049 vnode_t *vp = ap->a_vp;
1050 tmpfs_mount_t *tmp = VFS_TO_TMPFS(vp->v_mount);
1051 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1052 bool racing;
1053
1054 /* Disassociate inode from vnode. */
1055 mutex_enter(&node->tn_vlock);
1056 node->tn_vnode = NULL;
1057 vp->v_data = NULL;
1058 /* Check if tmpfs_vnode_get() is racing with us. */
1059 racing = TMPFS_NODE_RECLAIMING(node);
1060 mutex_exit(&node->tn_vlock);
1061
1062 /*
1063 * If inode is not referenced, i.e. no links, then destroy it.
1064 * Note: if racing - inode is about to get a new vnode, leave it.
1065 */
1066 if (node->tn_links == 0 && !racing) {
1067 tmpfs_free_node(tmp, node);
1068 }
1069 return 0;
1070 }
1071
1072 int
1073 tmpfs_pathconf(void *v)
1074 {
1075 struct vop_pathconf_args /* {
1076 struct vnode *a_vp;
1077 int a_name;
1078 register_t *a_retval;
1079 } */ *ap = v;
1080 const int name = ap->a_name;
1081 register_t *retval = ap->a_retval;
1082 int error = 0;
1083
1084 switch (name) {
1085 case _PC_LINK_MAX:
1086 *retval = LINK_MAX;
1087 break;
1088 case _PC_NAME_MAX:
1089 *retval = TMPFS_MAXNAMLEN;
1090 break;
1091 case _PC_PATH_MAX:
1092 *retval = PATH_MAX;
1093 break;
1094 case _PC_PIPE_BUF:
1095 *retval = PIPE_BUF;
1096 break;
1097 case _PC_CHOWN_RESTRICTED:
1098 *retval = 1;
1099 break;
1100 case _PC_NO_TRUNC:
1101 *retval = 1;
1102 break;
1103 case _PC_SYNC_IO:
1104 *retval = 1;
1105 break;
1106 case _PC_FILESIZEBITS:
1107 *retval = sizeof(off_t) * CHAR_BIT;
1108 break;
1109 default:
1110 error = EINVAL;
1111 }
1112 return error;
1113 }
1114
1115 int
1116 tmpfs_advlock(void *v)
1117 {
1118 struct vop_advlock_args /* {
1119 struct vnode *a_vp;
1120 void * a_id;
1121 int a_op;
1122 struct flock *a_fl;
1123 int a_flags;
1124 } */ *ap = v;
1125 vnode_t *vp = ap->a_vp;
1126 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1127
1128 return lf_advlock(v, &node->tn_lockf, node->tn_size);
1129 }
1130
1131 int
1132 tmpfs_getpages(void *v)
1133 {
1134 struct vop_getpages_args /* {
1135 struct vnode *a_vp;
1136 voff_t a_offset;
1137 struct vm_page **a_m;
1138 int *a_count;
1139 int a_centeridx;
1140 vm_prot_t a_access_type;
1141 int a_advice;
1142 int a_flags;
1143 } */ * const ap = v;
1144 vnode_t *vp = ap->a_vp;
1145 const voff_t offset = ap->a_offset;
1146 struct vm_page **pgs = ap->a_m;
1147 const int centeridx = ap->a_centeridx;
1148 const vm_prot_t access_type = ap->a_access_type;
1149 const int advice = ap->a_advice;
1150 const int flags = ap->a_flags;
1151 int error, npages = *ap->a_count;
1152 tmpfs_node_t *node;
1153 struct uvm_object *uobj;
1154
1155 KASSERT(vp->v_type == VREG);
1156 KASSERT(mutex_owned(vp->v_interlock));
1157
1158 node = VP_TO_TMPFS_NODE(vp);
1159 uobj = node->tn_spec.tn_reg.tn_aobj;
1160
1161 /*
1162 * Currently, PGO_PASTEOF is not supported.
1163 */
1164 if (vp->v_size <= offset + (centeridx << PAGE_SHIFT)) {
1165 if ((flags & PGO_LOCKED) == 0)
1166 mutex_exit(vp->v_interlock);
1167 return EINVAL;
1168 }
1169
1170 if (vp->v_size < offset + (npages << PAGE_SHIFT)) {
1171 npages = (round_page(vp->v_size) - offset) >> PAGE_SHIFT;
1172 }
1173
1174 if ((flags & PGO_LOCKED) != 0)
1175 return EBUSY;
1176
1177 if ((flags & PGO_NOTIMESTAMP) == 0) {
1178 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
1179 node->tn_status |= TMPFS_NODE_ACCESSED;
1180
1181 if ((access_type & VM_PROT_WRITE) != 0) {
1182 node->tn_status |= TMPFS_NODE_MODIFIED;
1183 if (vp->v_mount->mnt_flag & MNT_RELATIME)
1184 node->tn_status |= TMPFS_NODE_ACCESSED;
1185 }
1186 }
1187
1188 /*
1189 * Invoke the pager.
1190 *
1191 * Clean the array of pages before. XXX: PR/32166
1192 * Note that vnode lock is shared with underlying UVM object.
1193 */
1194 if (pgs) {
1195 memset(pgs, 0, sizeof(struct vm_pages *) * npages);
1196 }
1197 KASSERT(vp->v_interlock == uobj->vmobjlock);
1198
1199 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, centeridx,
1200 access_type, advice, flags | PGO_ALLPAGES);
1201
1202 #if defined(DEBUG)
1203 if (!error && pgs) {
1204 for (int i = 0; i < npages; i++) {
1205 KASSERT(pgs[i] != NULL);
1206 }
1207 }
1208 #endif
1209 return error;
1210 }
1211
1212 int
1213 tmpfs_putpages(void *v)
1214 {
1215 struct vop_putpages_args /* {
1216 struct vnode *a_vp;
1217 voff_t a_offlo;
1218 voff_t a_offhi;
1219 int a_flags;
1220 } */ * const ap = v;
1221 vnode_t *vp = ap->a_vp;
1222 const voff_t offlo = ap->a_offlo;
1223 const voff_t offhi = ap->a_offhi;
1224 const int flags = ap->a_flags;
1225 tmpfs_node_t *node;
1226 struct uvm_object *uobj;
1227 int error;
1228
1229 KASSERT(mutex_owned(vp->v_interlock));
1230
1231 if (vp->v_type != VREG) {
1232 mutex_exit(vp->v_interlock);
1233 return 0;
1234 }
1235
1236 node = VP_TO_TMPFS_NODE(vp);
1237 uobj = node->tn_spec.tn_reg.tn_aobj;
1238
1239 KASSERT(vp->v_interlock == uobj->vmobjlock);
1240 error = (*uobj->pgops->pgo_put)(uobj, offlo, offhi, flags);
1241
1242 /* XXX mtime */
1243
1244 return error;
1245 }
1246
1247 int
1248 tmpfs_whiteout(void *v)
1249 {
1250 struct vop_whiteout_args /* {
1251 struct vnode *a_dvp;
1252 struct componentname *a_cnp;
1253 int a_flags;
1254 } */ *ap = v;
1255 vnode_t *dvp = ap->a_dvp;
1256 struct componentname *cnp = ap->a_cnp;
1257 const int flags = ap->a_flags;
1258 tmpfs_mount_t *tmp = VFS_TO_TMPFS(dvp->v_mount);
1259 tmpfs_dirent_t *de;
1260 int error;
1261
1262 switch (flags) {
1263 case LOOKUP:
1264 break;
1265 case CREATE:
1266 error = tmpfs_alloc_dirent(tmp, cnp->cn_nameptr,
1267 cnp->cn_namelen, &de);
1268 if (error)
1269 return error;
1270 tmpfs_dir_attach(dvp, de, TMPFS_NODE_WHITEOUT);
1271 break;
1272 case DELETE:
1273 cnp->cn_flags &= ~DOWHITEOUT; /* when in doubt, cargo cult */
1274 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), cnp);
1275 if (de == NULL)
1276 return ENOENT;
1277 tmpfs_dir_detach(dvp, de);
1278 tmpfs_free_dirent(tmp, de);
1279 break;
1280 }
1281 return 0;
1282 }
1283
1284 int
1285 tmpfs_print(void *v)
1286 {
1287 struct vop_print_args /* {
1288 struct vnode *a_vp;
1289 } */ *ap = v;
1290 vnode_t *vp = ap->a_vp;
1291 tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1292
1293 printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n"
1294 "\tmode 0%o, owner %d, group %d, size %" PRIdMAX ", status 0x%x",
1295 node, node->tn_flags, node->tn_links, node->tn_mode, node->tn_uid,
1296 node->tn_gid, (uintmax_t)node->tn_size, node->tn_status);
1297 if (vp->v_type == VFIFO) {
1298 VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v);
1299 }
1300 printf("\n");
1301 return 0;
1302 }
1303