Home | History | Annotate | Line # | Download | only in tmpfs
tmpfs_subr.c revision 1.102.14.1
      1  1.102.14.1  pgoyette /*	$NetBSD: tmpfs_subr.c,v 1.102.14.1 2018/06/25 07:26:04 pgoyette Exp $	*/
      2         1.1      jmmv 
      3         1.1      jmmv /*
      4        1.83     rmind  * Copyright (c) 2005-2013 The NetBSD Foundation, Inc.
      5         1.1      jmmv  * All rights reserved.
      6         1.1      jmmv  *
      7         1.1      jmmv  * This code is derived from software contributed to The NetBSD Foundation
      8         1.8      jmmv  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
      9        1.71     rmind  * 2005 program, and by Mindaugas Rasiukevicius.
     10         1.1      jmmv  *
     11         1.1      jmmv  * Redistribution and use in source and binary forms, with or without
     12         1.1      jmmv  * modification, are permitted provided that the following conditions
     13         1.1      jmmv  * are met:
     14         1.1      jmmv  * 1. Redistributions of source code must retain the above copyright
     15         1.1      jmmv  *    notice, this list of conditions and the following disclaimer.
     16         1.1      jmmv  * 2. Redistributions in binary form must reproduce the above copyright
     17         1.1      jmmv  *    notice, this list of conditions and the following disclaimer in the
     18         1.1      jmmv  *    documentation and/or other materials provided with the distribution.
     19         1.1      jmmv  *
     20         1.1      jmmv  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21         1.1      jmmv  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22         1.1      jmmv  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23         1.1      jmmv  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24         1.1      jmmv  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25         1.1      jmmv  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26         1.1      jmmv  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27         1.1      jmmv  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28         1.1      jmmv  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29         1.1      jmmv  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30         1.1      jmmv  * POSSIBILITY OF SUCH DAMAGE.
     31         1.1      jmmv  */
     32         1.1      jmmv 
     33         1.1      jmmv /*
     34        1.71     rmind  * Efficient memory file system: interfaces for inode and directory entry
     35        1.71     rmind  * construction, destruction and manipulation.
     36        1.71     rmind  *
     37        1.71     rmind  * Reference counting
     38        1.71     rmind  *
     39        1.71     rmind  *	The link count of inode (tmpfs_node_t::tn_links) is used as a
     40        1.71     rmind  *	reference counter.  However, it has slightly different semantics.
     41        1.71     rmind  *
     42        1.71     rmind  *	For directories - link count represents directory entries, which
     43        1.71     rmind  *	refer to the directories.  In other words, it represents the count
     44        1.71     rmind  *	of sub-directories.  It also takes into account the virtual '.'
     45        1.71     rmind  *	entry (which has no real entry in the list).  For files - link count
     46        1.71     rmind  *	represents the hard links.  Since only empty directories can be
     47        1.71     rmind  *	removed - link count aligns the reference counting requirements
     48        1.71     rmind  *	enough.  Note: to check whether directory is not empty, the inode
     49        1.71     rmind  *	size (tmpfs_node_t::tn_size) can be used.
     50        1.71     rmind  *
     51        1.71     rmind  *	The inode itself, as an object, gathers its first reference when
     52        1.71     rmind  *	directory entry is attached via tmpfs_dir_attach(9).  For instance,
     53        1.71     rmind  *	after regular tmpfs_create(), a file would have a link count of 1,
     54        1.71     rmind  *	while directory after tmpfs_mkdir() would have 2 (due to '.').
     55        1.71     rmind  *
     56        1.71     rmind  * Reclamation
     57        1.71     rmind  *
     58        1.71     rmind  *	It should be noted that tmpfs inodes rely on a combination of vnode
     59        1.71     rmind  *	reference counting and link counting.  That is, an inode can only be
     60        1.71     rmind  *	destroyed if its associated vnode is inactive.  The destruction is
     61        1.71     rmind  *	done on vnode reclamation i.e. tmpfs_reclaim().  It should be noted
     62        1.90     rmind  *	that tmpfs_node_t::tn_links being 0 is a destruction criterion.
     63        1.71     rmind  *
     64        1.71     rmind  *	If an inode has references within the file system (tn_links > 0) and
     65        1.71     rmind  *	its inactive vnode gets reclaimed/recycled - then the association is
     66        1.71     rmind  *	broken in tmpfs_reclaim().  In such case, an inode will always pass
     67        1.99   hannken  *	tmpfs_lookup() and thus vcache_get() to associate a new vnode.
     68        1.71     rmind  *
     69        1.71     rmind  * Lock order
     70        1.71     rmind  *
     71        1.99   hannken  *	vnode_t::v_vlock ->
     72        1.99   hannken  *		vnode_t::v_interlock
     73         1.1      jmmv  */
     74         1.1      jmmv 
     75         1.1      jmmv #include <sys/cdefs.h>
     76  1.102.14.1  pgoyette __KERNEL_RCSID(0, "$NetBSD: tmpfs_subr.c,v 1.102.14.1 2018/06/25 07:26:04 pgoyette Exp $");
     77         1.1      jmmv 
     78         1.1      jmmv #include <sys/param.h>
     79        1.86     rmind #include <sys/cprng.h>
     80         1.1      jmmv #include <sys/dirent.h>
     81         1.1      jmmv #include <sys/event.h>
     82        1.43        ad #include <sys/kmem.h>
     83         1.1      jmmv #include <sys/mount.h>
     84         1.1      jmmv #include <sys/namei.h>
     85         1.1      jmmv #include <sys/time.h>
     86         1.1      jmmv #include <sys/stat.h>
     87         1.1      jmmv #include <sys/systm.h>
     88         1.1      jmmv #include <sys/vnode.h>
     89        1.20  christos #include <sys/kauth.h>
     90        1.43        ad #include <sys/atomic.h>
     91         1.1      jmmv 
     92         1.1      jmmv #include <uvm/uvm.h>
     93         1.1      jmmv 
     94         1.1      jmmv #include <miscfs/specfs/specdev.h>
     95        1.53      elad #include <miscfs/genfs/genfs.h>
     96         1.1      jmmv #include <fs/tmpfs/tmpfs.h>
     97         1.1      jmmv #include <fs/tmpfs/tmpfs_fifoops.h>
     98         1.1      jmmv #include <fs/tmpfs/tmpfs_specops.h>
     99         1.1      jmmv #include <fs/tmpfs/tmpfs_vnops.h>
    100         1.1      jmmv 
    101        1.83     rmind static void	tmpfs_dir_putseq(tmpfs_node_t *, tmpfs_dirent_t *);
    102        1.83     rmind 
    103         1.8      jmmv /*
    104        1.99   hannken  * Initialize vnode with tmpfs node.
    105        1.99   hannken  */
    106        1.99   hannken static void
    107        1.99   hannken tmpfs_init_vnode(struct vnode *vp, tmpfs_node_t *node)
    108        1.99   hannken {
    109        1.99   hannken 	kmutex_t *slock;
    110        1.99   hannken 
    111        1.99   hannken 	KASSERT(node->tn_vnode == NULL);
    112        1.99   hannken 
    113        1.99   hannken 	/* Share the interlock with the node. */
    114        1.99   hannken 	if (node->tn_type == VREG) {
    115        1.99   hannken 		slock = node->tn_spec.tn_reg.tn_aobj->vmobjlock;
    116        1.99   hannken 		mutex_obj_hold(slock);
    117        1.99   hannken 		uvm_obj_setlock(&vp->v_uobj, slock);
    118        1.99   hannken 	}
    119        1.99   hannken 
    120        1.99   hannken 	vp->v_tag = VT_TMPFS;
    121        1.99   hannken 	vp->v_type = node->tn_type;
    122        1.99   hannken 
    123        1.99   hannken 	/* Type-specific initialization. */
    124        1.99   hannken 	switch (vp->v_type) {
    125        1.99   hannken 	case VBLK:
    126        1.99   hannken 	case VCHR:
    127        1.99   hannken 		vp->v_op = tmpfs_specop_p;
    128        1.99   hannken 		spec_node_init(vp, node->tn_spec.tn_dev.tn_rdev);
    129        1.99   hannken 		break;
    130        1.99   hannken 	case VFIFO:
    131        1.99   hannken 		vp->v_op = tmpfs_fifoop_p;
    132        1.99   hannken 		break;
    133        1.99   hannken 	case VDIR:
    134        1.99   hannken 		if (node->tn_spec.tn_dir.tn_parent == node)
    135        1.99   hannken 			vp->v_vflag |= VV_ROOT;
    136        1.99   hannken 		/* FALLTHROUGH */
    137        1.99   hannken 	case VLNK:
    138        1.99   hannken 	case VREG:
    139        1.99   hannken 	case VSOCK:
    140        1.99   hannken 		vp->v_op = tmpfs_vnodeop_p;
    141        1.99   hannken 		break;
    142        1.99   hannken 	default:
    143        1.99   hannken 		panic("bad node type %d", vp->v_type);
    144        1.99   hannken 		break;
    145        1.99   hannken 	}
    146        1.99   hannken 
    147        1.99   hannken 	vp->v_data = node;
    148        1.99   hannken 	node->tn_vnode = vp;
    149        1.99   hannken 	uvm_vnp_setsize(vp, node->tn_size);
    150        1.99   hannken }
    151        1.99   hannken 
    152        1.99   hannken /*
    153        1.99   hannken  * tmpfs_loadvnode: initialise a vnode for a specified inode.
    154         1.8      jmmv  */
    155         1.1      jmmv int
    156        1.99   hannken tmpfs_loadvnode(struct mount *mp, struct vnode *vp,
    157        1.99   hannken     const void *key, size_t key_len, const void **new_key)
    158         1.1      jmmv {
    159        1.99   hannken 	tmpfs_node_t *node;
    160        1.99   hannken 
    161        1.99   hannken 	KASSERT(key_len == sizeof(node));
    162        1.99   hannken 	memcpy(&node, key, key_len);
    163        1.99   hannken 
    164        1.99   hannken 	if (node->tn_links == 0)
    165        1.99   hannken 		return ENOENT;
    166        1.99   hannken 
    167        1.99   hannken 	tmpfs_init_vnode(vp, node);
    168        1.99   hannken 
    169        1.99   hannken 	*new_key = &vp->v_data;
    170        1.99   hannken 
    171        1.99   hannken 	return 0;
    172        1.99   hannken }
    173         1.1      jmmv 
    174        1.99   hannken /*
    175        1.99   hannken  * tmpfs_newvnode: allocate a new inode of a specified type and
    176        1.99   hannken  * attach the vonode.
    177        1.99   hannken  */
    178        1.99   hannken int
    179        1.99   hannken tmpfs_newvnode(struct mount *mp, struct vnode *dvp, struct vnode *vp,
    180        1.99   hannken     struct vattr *vap, kauth_cred_t cred,
    181        1.99   hannken     size_t *key_len, const void **new_key)
    182        1.99   hannken {
    183        1.99   hannken 	tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp);
    184        1.99   hannken 	tmpfs_node_t *node, *dnode;
    185        1.99   hannken 
    186        1.99   hannken 	if (dvp != NULL) {
    187        1.99   hannken 		KASSERT(VOP_ISLOCKED(dvp));
    188        1.99   hannken 		dnode = VP_TO_TMPFS_DIR(dvp);
    189        1.99   hannken 		if (dnode->tn_links == 0)
    190        1.99   hannken 			return ENOENT;
    191        1.99   hannken 		if (vap->va_type == VDIR) {
    192        1.99   hannken 			/* Check for maximum links limit. */
    193        1.99   hannken 			if (dnode->tn_links == LINK_MAX)
    194        1.99   hannken 				return EMLINK;
    195        1.99   hannken 			KASSERT(dnode->tn_links < LINK_MAX);
    196        1.99   hannken 		}
    197        1.99   hannken 	} else
    198        1.99   hannken 		dnode = NULL;
    199        1.99   hannken 
    200        1.99   hannken 	node = tmpfs_node_get(tmp);
    201        1.99   hannken 	if (node == NULL)
    202        1.43        ad 		return ENOSPC;
    203        1.43        ad 
    204        1.71     rmind 	/* Initially, no references and no associations. */
    205        1.99   hannken 	node->tn_links = 0;
    206        1.99   hannken 	node->tn_vnode = NULL;
    207        1.99   hannken 	node->tn_holdcount = 0;
    208        1.99   hannken 	node->tn_dirent_hint = NULL;
    209        1.71     rmind 
    210        1.43        ad 	/*
    211        1.43        ad 	 * XXX Where the pool is backed by a map larger than (4GB *
    212        1.99   hannken 	 * sizeof(*node)), this may produce duplicate inode numbers
    213        1.43        ad 	 * for applications that do not understand 64-bit ino_t.
    214        1.43        ad 	 */
    215        1.99   hannken 	node->tn_id = (ino_t)((uintptr_t)node / sizeof(*node));
    216        1.93   hannken 	/*
    217        1.93   hannken 	 * Make sure the generation number is not zero.
    218        1.93   hannken 	 * tmpfs_inactive() uses generation zero to mark dead nodes.
    219        1.93   hannken 	 */
    220        1.93   hannken 	do {
    221        1.99   hannken 		node->tn_gen = TMPFS_NODE_GEN_MASK & cprng_fast32();
    222        1.99   hannken 	} while (node->tn_gen == 0);
    223         1.1      jmmv 
    224         1.1      jmmv 	/* Generic initialization. */
    225       1.100    justin 	KASSERT((int)vap->va_type != VNOVAL);
    226        1.99   hannken 	node->tn_type = vap->va_type;
    227        1.99   hannken 	node->tn_size = 0;
    228        1.99   hannken 	node->tn_flags = 0;
    229        1.99   hannken 	node->tn_lockf = NULL;
    230        1.99   hannken 
    231        1.99   hannken 	vfs_timestamp(&node->tn_atime);
    232        1.99   hannken 	node->tn_birthtime = node->tn_atime;
    233        1.99   hannken 	node->tn_ctime = node->tn_atime;
    234        1.99   hannken 	node->tn_mtime = node->tn_atime;
    235        1.99   hannken 
    236        1.99   hannken 	if (dvp == NULL) {
    237        1.99   hannken 		KASSERT(vap->va_uid != VNOVAL && vap->va_gid != VNOVAL);
    238        1.99   hannken 		node->tn_uid = vap->va_uid;
    239        1.99   hannken 		node->tn_gid = vap->va_gid;
    240        1.99   hannken 		vp->v_vflag |= VV_ROOT;
    241        1.99   hannken 	} else {
    242        1.99   hannken 		KASSERT(dnode != NULL);
    243        1.99   hannken 		node->tn_uid = kauth_cred_geteuid(cred);
    244        1.99   hannken 		node->tn_gid = dnode->tn_gid;
    245        1.99   hannken 	}
    246        1.99   hannken 	KASSERT(vap->va_mode != VNOVAL);
    247        1.99   hannken 	node->tn_mode = vap->va_mode;
    248         1.1      jmmv 
    249         1.1      jmmv 	/* Type-specific initialization. */
    250        1.99   hannken 	switch (node->tn_type) {
    251         1.1      jmmv 	case VBLK:
    252         1.1      jmmv 	case VCHR:
    253        1.65     rmind 		/* Character/block special device. */
    254        1.99   hannken 		KASSERT(vap->va_rdev != VNOVAL);
    255        1.99   hannken 		node->tn_spec.tn_dev.tn_rdev = vap->va_rdev;
    256         1.1      jmmv 		break;
    257        1.65     rmind 	case VDIR:
    258        1.71     rmind 		/* Directory. */
    259        1.99   hannken 		TAILQ_INIT(&node->tn_spec.tn_dir.tn_dir);
    260        1.99   hannken 		node->tn_spec.tn_dir.tn_parent = NULL;
    261        1.99   hannken 		node->tn_spec.tn_dir.tn_seq_arena = NULL;
    262        1.99   hannken 		node->tn_spec.tn_dir.tn_next_seq = TMPFS_DIRSEQ_START;
    263        1.99   hannken 		node->tn_spec.tn_dir.tn_readdir_lastp = NULL;
    264        1.71     rmind 
    265        1.71     rmind 		/* Extra link count for the virtual '.' entry. */
    266        1.99   hannken 		node->tn_links++;
    267         1.1      jmmv 		break;
    268         1.1      jmmv 	case VFIFO:
    269         1.1      jmmv 	case VSOCK:
    270         1.1      jmmv 		break;
    271        1.65     rmind 	case VLNK:
    272        1.99   hannken 		node->tn_size = 0;
    273        1.99   hannken 		node->tn_spec.tn_lnk.tn_link = NULL;
    274         1.1      jmmv 		break;
    275         1.1      jmmv 	case VREG:
    276        1.65     rmind 		/* Regular file.  Create an underlying UVM object. */
    277        1.99   hannken 		node->tn_spec.tn_reg.tn_aobj =
    278  1.102.14.1  pgoyette 		    uao_create(INT64_MAX - PAGE_SIZE, 0);
    279        1.99   hannken 		node->tn_spec.tn_reg.tn_aobj_pages = 0;
    280         1.1      jmmv 		break;
    281         1.1      jmmv 	default:
    282        1.99   hannken 		panic("bad node type %d", vp->v_type);
    283        1.99   hannken 		break;
    284         1.1      jmmv 	}
    285         1.1      jmmv 
    286        1.99   hannken 	tmpfs_init_vnode(vp, node);
    287        1.43        ad 
    288        1.43        ad 	mutex_enter(&tmp->tm_lock);
    289        1.99   hannken 	LIST_INSERT_HEAD(&tmp->tm_nodes, node, tn_entries);
    290        1.43        ad 	mutex_exit(&tmp->tm_lock);
    291        1.43        ad 
    292        1.99   hannken 	*key_len = sizeof(vp->v_data);
    293        1.99   hannken 	*new_key = &vp->v_data;
    294        1.99   hannken 
    295         1.1      jmmv 	return 0;
    296         1.1      jmmv }
    297         1.1      jmmv 
    298         1.8      jmmv /*
    299        1.65     rmind  * tmpfs_free_node: remove the inode from a list in the mount point and
    300        1.65     rmind  * destroy the inode structures.
    301         1.8      jmmv  */
    302         1.1      jmmv void
    303        1.67     rmind tmpfs_free_node(tmpfs_mount_t *tmp, tmpfs_node_t *node)
    304         1.1      jmmv {
    305        1.57     rmind 	size_t objsz;
    306        1.99   hannken 	uint32_t hold;
    307        1.43        ad 
    308        1.43        ad 	mutex_enter(&tmp->tm_lock);
    309        1.99   hannken 	hold = atomic_or_32_nv(&node->tn_holdcount, TMPFS_NODE_RECLAIMED);
    310        1.99   hannken 	/* Defer destruction to last thread holding this node. */
    311        1.99   hannken 	if (hold != TMPFS_NODE_RECLAIMED) {
    312        1.99   hannken 		mutex_exit(&tmp->tm_lock);
    313        1.99   hannken 		return;
    314        1.99   hannken 	}
    315        1.43        ad 	LIST_REMOVE(node, tn_entries);
    316        1.43        ad 	mutex_exit(&tmp->tm_lock);
    317         1.1      jmmv 
    318        1.40        ad 	switch (node->tn_type) {
    319         1.1      jmmv 	case VLNK:
    320        1.65     rmind 		if (node->tn_size > 0) {
    321        1.63   hannken 			tmpfs_strname_free(tmp, node->tn_spec.tn_lnk.tn_link,
    322        1.63   hannken 			    node->tn_size);
    323        1.65     rmind 		}
    324         1.1      jmmv 		break;
    325         1.1      jmmv 	case VREG:
    326        1.57     rmind 		/*
    327        1.65     rmind 		 * Calculate the size of inode data, decrease the used-memory
    328        1.65     rmind 		 * counter, and destroy the unerlying UVM object (if any).
    329        1.57     rmind 		 */
    330        1.57     rmind 		objsz = PAGE_SIZE * node->tn_spec.tn_reg.tn_aobj_pages;
    331        1.57     rmind 		if (objsz != 0) {
    332        1.57     rmind 			tmpfs_mem_decr(tmp, objsz);
    333        1.57     rmind 		}
    334        1.57     rmind 		if (node->tn_spec.tn_reg.tn_aobj != NULL) {
    335        1.18      jmmv 			uao_detach(node->tn_spec.tn_reg.tn_aobj);
    336        1.57     rmind 		}
    337         1.1      jmmv 		break;
    338        1.65     rmind 	case VDIR:
    339        1.90     rmind 		KASSERT(node->tn_size == 0);
    340        1.83     rmind 		KASSERT(node->tn_spec.tn_dir.tn_seq_arena == NULL);
    341        1.83     rmind 		KASSERT(TAILQ_EMPTY(&node->tn_spec.tn_dir.tn_dir));
    342        1.83     rmind 		KASSERT(node->tn_spec.tn_dir.tn_parent == NULL ||
    343        1.83     rmind 		    node == tmp->tm_root);
    344        1.65     rmind 		break;
    345         1.1      jmmv 	default:
    346         1.1      jmmv 		break;
    347         1.1      jmmv 	}
    348        1.93   hannken 	KASSERT(node->tn_vnode == NULL);
    349        1.90     rmind 	KASSERT(node->tn_links == 0);
    350         1.1      jmmv 
    351        1.57     rmind 	tmpfs_node_put(tmp, node);
    352         1.1      jmmv }
    353         1.1      jmmv 
    354         1.8      jmmv /*
    355        1.90     rmind  * tmpfs_construct_node: allocate a new file of specified type and adds it
    356        1.67     rmind  * into the parent directory.
    357        1.67     rmind  *
    358        1.67     rmind  * => Credentials of the caller are used.
    359         1.9      jmmv  */
    360         1.1      jmmv int
    361        1.90     rmind tmpfs_construct_node(vnode_t *dvp, vnode_t **vpp, struct vattr *vap,
    362         1.1      jmmv     struct componentname *cnp, char *target)
    363         1.1      jmmv {
    364        1.67     rmind 	tmpfs_mount_t *tmp = VFS_TO_TMPFS(dvp->v_mount);
    365        1.71     rmind 	tmpfs_node_t *dnode = VP_TO_TMPFS_DIR(dvp), *node;
    366        1.77   hannken 	tmpfs_dirent_t *de, *wde;
    367        1.99   hannken 	char *slink = NULL;
    368        1.99   hannken 	int ssize = 0;
    369         1.1      jmmv 	int error;
    370         1.1      jmmv 
    371        1.99   hannken 	/* Allocate symlink target. */
    372        1.99   hannken 	if (target != NULL) {
    373        1.99   hannken 		KASSERT(vap->va_type == VLNK);
    374        1.99   hannken 		ssize = strlen(target);
    375        1.99   hannken 		KASSERT(ssize < MAXPATHLEN);
    376        1.99   hannken 		if (ssize > 0) {
    377        1.99   hannken 			slink = tmpfs_strname_alloc(tmp, ssize);
    378        1.99   hannken 			if (slink == NULL)
    379        1.99   hannken 				return ENOSPC;
    380        1.99   hannken 			memcpy(slink, target, ssize);
    381         1.1      jmmv 		}
    382        1.67     rmind 	}
    383         1.1      jmmv 
    384         1.1      jmmv 	/* Allocate a directory entry that points to the new file. */
    385        1.71     rmind 	error = tmpfs_alloc_dirent(tmp, cnp->cn_nameptr, cnp->cn_namelen, &de);
    386        1.67     rmind 	if (error) {
    387        1.99   hannken 		if (slink != NULL)
    388        1.99   hannken 			tmpfs_strname_free(tmp, slink, ssize);
    389        1.99   hannken 		return error;
    390         1.1      jmmv 	}
    391         1.1      jmmv 
    392        1.99   hannken 	/* Allocate a vnode that represents the new file. */
    393        1.99   hannken 	error = vcache_new(dvp->v_mount, dvp, vap, cnp->cn_cred, vpp);
    394        1.67     rmind 	if (error) {
    395        1.99   hannken 		if (slink != NULL)
    396        1.99   hannken 			tmpfs_strname_free(tmp, slink, ssize);
    397        1.71     rmind 		tmpfs_free_dirent(tmp, de);
    398        1.99   hannken 		return error;
    399        1.99   hannken 	}
    400        1.99   hannken 	error = vn_lock(*vpp, LK_EXCLUSIVE);
    401        1.99   hannken 	if (error) {
    402        1.99   hannken 		vrele(*vpp);
    403        1.99   hannken 		*vpp = NULL;
    404        1.99   hannken 		if (slink != NULL)
    405        1.99   hannken 			tmpfs_strname_free(tmp, slink, ssize);
    406        1.99   hannken 		tmpfs_free_dirent(tmp, de);
    407        1.99   hannken 		return error;
    408        1.99   hannken 	}
    409        1.99   hannken 
    410        1.99   hannken 	node = VP_TO_TMPFS_NODE(*vpp);
    411        1.99   hannken 
    412        1.99   hannken 	if (slink != NULL) {
    413        1.99   hannken 		node->tn_spec.tn_lnk.tn_link = slink;
    414        1.99   hannken 		node->tn_size = ssize;
    415         1.1      jmmv 	}
    416         1.1      jmmv 
    417        1.77   hannken 	/* Remove whiteout before adding the new entry. */
    418        1.77   hannken 	if (cnp->cn_flags & ISWHITEOUT) {
    419        1.77   hannken 		wde = tmpfs_dir_lookup(dnode, cnp);
    420        1.77   hannken 		KASSERT(wde != NULL && wde->td_node == TMPFS_NODE_WHITEOUT);
    421        1.83     rmind 		tmpfs_dir_detach(dnode, wde);
    422        1.77   hannken 		tmpfs_free_dirent(tmp, wde);
    423        1.77   hannken 	}
    424        1.77   hannken 
    425        1.71     rmind 	/* Associate inode and attach the entry into the directory. */
    426        1.83     rmind 	tmpfs_dir_attach(dnode, de, node);
    427        1.77   hannken 
    428        1.77   hannken 	/* Make node opaque if requested. */
    429        1.77   hannken 	if (cnp->cn_flags & ISWHITEOUT)
    430        1.77   hannken 		node->tn_flags |= UF_OPAQUE;
    431        1.90     rmind 
    432        1.90     rmind 	/* Update the parent's timestamps. */
    433        1.90     rmind 	tmpfs_update(dvp, TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME);
    434        1.96   hannken 
    435        1.99   hannken 	VOP_UNLOCK(*vpp);
    436        1.99   hannken 
    437        1.99   hannken 	return 0;
    438         1.1      jmmv }
    439         1.1      jmmv 
    440         1.8      jmmv /*
    441        1.68     rmind  * tmpfs_alloc_dirent: allocates a new directory entry for the inode.
    442        1.71     rmind  * The directory entry contains a path name component.
    443        1.68     rmind  */
    444        1.68     rmind int
    445        1.71     rmind tmpfs_alloc_dirent(tmpfs_mount_t *tmp, const char *name, uint16_t len,
    446        1.71     rmind     tmpfs_dirent_t **de)
    447        1.68     rmind {
    448        1.68     rmind 	tmpfs_dirent_t *nde;
    449        1.68     rmind 
    450        1.68     rmind 	nde = tmpfs_dirent_get(tmp);
    451        1.68     rmind 	if (nde == NULL)
    452        1.68     rmind 		return ENOSPC;
    453        1.68     rmind 
    454        1.68     rmind 	nde->td_name = tmpfs_strname_alloc(tmp, len);
    455        1.68     rmind 	if (nde->td_name == NULL) {
    456        1.68     rmind 		tmpfs_dirent_put(tmp, nde);
    457        1.68     rmind 		return ENOSPC;
    458        1.68     rmind 	}
    459        1.68     rmind 	nde->td_namelen = len;
    460        1.68     rmind 	memcpy(nde->td_name, name, len);
    461        1.83     rmind 	nde->td_seq = TMPFS_DIRSEQ_NONE;
    462       1.101      leot 	nde->td_node = NULL; /* for asserts */
    463        1.68     rmind 
    464        1.68     rmind 	*de = nde;
    465        1.68     rmind 	return 0;
    466        1.68     rmind }
    467        1.68     rmind 
    468        1.68     rmind /*
    469        1.68     rmind  * tmpfs_free_dirent: free a directory entry.
    470        1.68     rmind  */
    471        1.68     rmind void
    472        1.71     rmind tmpfs_free_dirent(tmpfs_mount_t *tmp, tmpfs_dirent_t *de)
    473        1.68     rmind {
    474        1.83     rmind 	KASSERT(de->td_node == NULL);
    475        1.83     rmind 	KASSERT(de->td_seq == TMPFS_DIRSEQ_NONE);
    476        1.68     rmind 	tmpfs_strname_free(tmp, de->td_name, de->td_namelen);
    477        1.68     rmind 	tmpfs_dirent_put(tmp, de);
    478        1.68     rmind }
    479        1.68     rmind 
    480        1.68     rmind /*
    481        1.71     rmind  * tmpfs_dir_attach: associate directory entry with a specified inode,
    482        1.71     rmind  * and attach the entry into the directory, specified by vnode.
    483        1.29      jmmv  *
    484        1.71     rmind  * => Increases link count on the associated node.
    485        1.90     rmind  * => Increases link count on directory node if our node is VDIR.
    486        1.90     rmind  * => It is caller's responsibility to check for the LINK_MAX limit.
    487        1.71     rmind  * => Triggers kqueue events here.
    488         1.8      jmmv  */
    489         1.1      jmmv void
    490        1.83     rmind tmpfs_dir_attach(tmpfs_node_t *dnode, tmpfs_dirent_t *de, tmpfs_node_t *node)
    491         1.1      jmmv {
    492        1.83     rmind 	vnode_t *dvp = dnode->tn_vnode;
    493        1.71     rmind 	int events = NOTE_WRITE;
    494        1.71     rmind 
    495        1.83     rmind 	KASSERT(dvp != NULL);
    496        1.71     rmind 	KASSERT(VOP_ISLOCKED(dvp));
    497        1.71     rmind 
    498        1.83     rmind 	/* Get a new sequence number. */
    499        1.83     rmind 	KASSERT(de->td_seq == TMPFS_DIRSEQ_NONE);
    500        1.83     rmind 	de->td_seq = tmpfs_dir_getseq(dnode, de);
    501        1.83     rmind 
    502        1.71     rmind 	/* Associate directory entry and the inode. */
    503        1.77   hannken 	de->td_node = node;
    504        1.71     rmind 	if (node != TMPFS_NODE_WHITEOUT) {
    505        1.71     rmind 		KASSERT(node->tn_links < LINK_MAX);
    506        1.71     rmind 		node->tn_links++;
    507         1.1      jmmv 
    508        1.71     rmind 		/* Save the hint (might overwrite). */
    509        1.71     rmind 		node->tn_dirent_hint = de;
    510        1.90     rmind 	} else if ((dnode->tn_gen & TMPFS_WHITEOUT_BIT) == 0) {
    511        1.90     rmind 		/* Flag that there are whiteout entries. */
    512        1.90     rmind 		atomic_or_32(&dnode->tn_gen, TMPFS_WHITEOUT_BIT);
    513        1.71     rmind 	}
    514         1.1      jmmv 
    515        1.71     rmind 	/* Insert the entry to the directory (parent of inode). */
    516        1.18      jmmv 	TAILQ_INSERT_TAIL(&dnode->tn_spec.tn_dir.tn_dir, de, td_entries);
    517        1.67     rmind 	dnode->tn_size += sizeof(tmpfs_dirent_t);
    518        1.71     rmind 	uvm_vnp_setsize(dvp, dnode->tn_size);
    519        1.71     rmind 
    520        1.71     rmind 	if (node != TMPFS_NODE_WHITEOUT && node->tn_type == VDIR) {
    521        1.71     rmind 		/* Set parent. */
    522        1.71     rmind 		KASSERT(node->tn_spec.tn_dir.tn_parent == NULL);
    523        1.71     rmind 		node->tn_spec.tn_dir.tn_parent = dnode;
    524        1.71     rmind 
    525        1.71     rmind 		/* Increase the link count of parent. */
    526        1.71     rmind 		KASSERT(dnode->tn_links < LINK_MAX);
    527        1.71     rmind 		dnode->tn_links++;
    528        1.71     rmind 		events |= NOTE_LINK;
    529        1.71     rmind 
    530        1.71     rmind 		TMPFS_VALIDATE_DIR(node);
    531        1.71     rmind 	}
    532        1.71     rmind 	VN_KNOTE(dvp, events);
    533         1.1      jmmv }
    534         1.1      jmmv 
    535         1.8      jmmv /*
    536        1.71     rmind  * tmpfs_dir_detach: disassociate directory entry and its inode,
    537        1.71     rmind  * and detach the entry from the directory, specified by vnode.
    538        1.29      jmmv  *
    539        1.71     rmind  * => Decreases link count on the associated node.
    540        1.71     rmind  * => Decreases the link count on directory node, if our node is VDIR.
    541        1.71     rmind  * => Triggers kqueue events here.
    542        1.83     rmind  *
    543        1.83     rmind  * => Note: dvp and vp may be NULL only if called by tmpfs_unmount().
    544         1.8      jmmv  */
    545         1.1      jmmv void
    546        1.83     rmind tmpfs_dir_detach(tmpfs_node_t *dnode, tmpfs_dirent_t *de)
    547         1.1      jmmv {
    548        1.71     rmind 	tmpfs_node_t *node = de->td_node;
    549        1.83     rmind 	vnode_t *vp, *dvp = dnode->tn_vnode;
    550        1.71     rmind 	int events = NOTE_WRITE;
    551        1.71     rmind 
    552        1.83     rmind 	KASSERT(dvp == NULL || VOP_ISLOCKED(dvp));
    553        1.71     rmind 
    554        1.83     rmind 	if (__predict_true(node != TMPFS_NODE_WHITEOUT)) {
    555        1.71     rmind 		/* Deassociate the inode and entry. */
    556        1.71     rmind 		node->tn_dirent_hint = NULL;
    557         1.1      jmmv 
    558        1.71     rmind 		KASSERT(node->tn_links > 0);
    559        1.71     rmind 		node->tn_links--;
    560        1.83     rmind 
    561        1.83     rmind 		if ((vp = node->tn_vnode) != NULL) {
    562        1.83     rmind 			KASSERT(VOP_ISLOCKED(vp));
    563        1.83     rmind 			VN_KNOTE(vp, node->tn_links ? NOTE_LINK : NOTE_DELETE);
    564        1.83     rmind 		}
    565        1.71     rmind 
    566        1.71     rmind 		/* If directory - decrease the link count of parent. */
    567        1.71     rmind 		if (node->tn_type == VDIR) {
    568        1.71     rmind 			KASSERT(node->tn_spec.tn_dir.tn_parent == dnode);
    569        1.71     rmind 			node->tn_spec.tn_dir.tn_parent = NULL;
    570        1.71     rmind 
    571        1.71     rmind 			KASSERT(dnode->tn_links > 0);
    572        1.71     rmind 			dnode->tn_links--;
    573        1.71     rmind 			events |= NOTE_LINK;
    574        1.71     rmind 		}
    575        1.71     rmind 	}
    576        1.85     rmind 	de->td_node = NULL;
    577         1.1      jmmv 
    578        1.71     rmind 	/* Remove the entry from the directory. */
    579        1.18      jmmv 	if (dnode->tn_spec.tn_dir.tn_readdir_lastp == de) {
    580        1.18      jmmv 		dnode->tn_spec.tn_dir.tn_readdir_lastp = NULL;
    581         1.5      yamt 	}
    582        1.67     rmind 	TAILQ_REMOVE(&dnode->tn_spec.tn_dir.tn_dir, de, td_entries);
    583        1.67     rmind 	dnode->tn_size -= sizeof(tmpfs_dirent_t);
    584        1.83     rmind 	tmpfs_dir_putseq(dnode, de);
    585        1.83     rmind 
    586        1.83     rmind 	if (dvp) {
    587        1.83     rmind 		uvm_vnp_setsize(dvp, dnode->tn_size);
    588        1.83     rmind 		VN_KNOTE(dvp, events);
    589        1.83     rmind 	}
    590         1.1      jmmv }
    591         1.1      jmmv 
    592         1.8      jmmv /*
    593        1.67     rmind  * tmpfs_dir_lookup: find a directory entry in the specified inode.
    594         1.8      jmmv  *
    595        1.67     rmind  * Note that the . and .. components are not allowed as they do not
    596        1.67     rmind  * physically exist within directories.
    597         1.8      jmmv  */
    598        1.67     rmind tmpfs_dirent_t *
    599        1.67     rmind tmpfs_dir_lookup(tmpfs_node_t *node, struct componentname *cnp)
    600         1.1      jmmv {
    601        1.67     rmind 	const char *name = cnp->cn_nameptr;
    602        1.67     rmind 	const uint16_t nlen = cnp->cn_namelen;
    603        1.67     rmind 	tmpfs_dirent_t *de;
    604         1.1      jmmv 
    605        1.49      yamt 	KASSERT(VOP_ISLOCKED(node->tn_vnode));
    606        1.67     rmind 	KASSERT(nlen != 1 || !(name[0] == '.'));
    607        1.67     rmind 	KASSERT(nlen != 2 || !(name[0] == '.' && name[1] == '.'));
    608        1.71     rmind 	TMPFS_VALIDATE_DIR(node);
    609         1.1      jmmv 
    610        1.18      jmmv 	TAILQ_FOREACH(de, &node->tn_spec.tn_dir.tn_dir, td_entries) {
    611        1.67     rmind 		if (de->td_namelen != nlen)
    612        1.67     rmind 			continue;
    613        1.69     rmind 		if (memcmp(de->td_name, name, nlen) != 0)
    614        1.67     rmind 			continue;
    615        1.67     rmind 		break;
    616         1.1      jmmv 	}
    617        1.49      yamt 	return de;
    618         1.1      jmmv }
    619         1.1      jmmv 
    620         1.9      jmmv /*
    621        1.71     rmind  * tmpfs_dir_cached: get a cached directory entry if it is valid.  Used to
    622        1.83     rmind  * avoid unnecessary tmpfs_dir_lookup().
    623        1.71     rmind  *
    624        1.71     rmind  * => The vnode must be locked.
    625        1.71     rmind  */
    626        1.71     rmind tmpfs_dirent_t *
    627        1.71     rmind tmpfs_dir_cached(tmpfs_node_t *node)
    628        1.71     rmind {
    629        1.71     rmind 	tmpfs_dirent_t *de = node->tn_dirent_hint;
    630        1.71     rmind 
    631        1.71     rmind 	KASSERT(VOP_ISLOCKED(node->tn_vnode));
    632        1.71     rmind 
    633        1.71     rmind 	if (de == NULL) {
    634        1.71     rmind 		return NULL;
    635        1.71     rmind 	}
    636        1.71     rmind 	KASSERT(de->td_node == node);
    637        1.71     rmind 
    638        1.71     rmind 	/*
    639        1.71     rmind 	 * Directories always have a valid hint.  For files, check if there
    640        1.71     rmind 	 * are any hard links.  If there are - hint might be invalid.
    641        1.71     rmind 	 */
    642        1.71     rmind 	return (node->tn_type != VDIR && node->tn_links > 1) ? NULL : de;
    643        1.71     rmind }
    644        1.71     rmind 
    645        1.71     rmind /*
    646        1.83     rmind  * tmpfs_dir_getseq: get a per-directory sequence number for the entry.
    647        1.83     rmind  *
    648        1.83     rmind  * => Shall not be larger than 2^31 for linux32 compatibility.
    649         1.9      jmmv  */
    650        1.83     rmind uint32_t
    651        1.83     rmind tmpfs_dir_getseq(tmpfs_node_t *dnode, tmpfs_dirent_t *de)
    652         1.1      jmmv {
    653        1.83     rmind 	uint32_t seq = de->td_seq;
    654        1.83     rmind 	vmem_t *seq_arena;
    655        1.83     rmind 	vmem_addr_t off;
    656        1.84  christos 	int error __diagused;
    657         1.1      jmmv 
    658        1.83     rmind 	TMPFS_VALIDATE_DIR(dnode);
    659        1.83     rmind 
    660        1.83     rmind 	if (__predict_true(seq != TMPFS_DIRSEQ_NONE)) {
    661        1.83     rmind 		/* Already set. */
    662        1.83     rmind 		KASSERT(seq >= TMPFS_DIRSEQ_START);
    663        1.83     rmind 		return seq;
    664        1.83     rmind 	}
    665        1.83     rmind 
    666        1.83     rmind 	/*
    667        1.83     rmind 	 * The "." and ".." and the end-of-directory have reserved numbers.
    668        1.83     rmind 	 * The other sequence numbers are allocated as following:
    669        1.83     rmind 	 *
    670        1.83     rmind 	 * - The first half of the 2^31 is assigned incrementally.
    671        1.83     rmind 	 *
    672        1.83     rmind 	 * - If that range is exceeded, then the second half of 2^31
    673        1.83     rmind 	 * is used, but managed by vmem(9).
    674        1.83     rmind 	 */
    675        1.83     rmind 
    676        1.83     rmind 	seq = dnode->tn_spec.tn_dir.tn_next_seq;
    677        1.83     rmind 	KASSERT(seq >= TMPFS_DIRSEQ_START);
    678        1.83     rmind 
    679        1.83     rmind 	if (__predict_true(seq < TMPFS_DIRSEQ_END)) {
    680        1.83     rmind 		/* First half: just increment and return. */
    681        1.83     rmind 		dnode->tn_spec.tn_dir.tn_next_seq++;
    682        1.83     rmind 		return seq;
    683        1.83     rmind 	}
    684        1.83     rmind 
    685        1.83     rmind 	/*
    686        1.83     rmind 	 * First half exceeded, use the second half.  May need to create
    687        1.83     rmind 	 * vmem(9) arena for the directory first.
    688        1.83     rmind 	 */
    689        1.83     rmind 	if ((seq_arena = dnode->tn_spec.tn_dir.tn_seq_arena) == NULL) {
    690        1.83     rmind 		seq_arena = vmem_create("tmpfscoo", 0,
    691        1.83     rmind 		    TMPFS_DIRSEQ_END - 1, 1, NULL, NULL, NULL, 0,
    692        1.83     rmind 		    VM_SLEEP, IPL_NONE);
    693        1.83     rmind 		dnode->tn_spec.tn_dir.tn_seq_arena = seq_arena;
    694        1.83     rmind 		KASSERT(seq_arena != NULL);
    695        1.83     rmind 	}
    696        1.83     rmind 	error = vmem_alloc(seq_arena, 1, VM_SLEEP | VM_BESTFIT, &off);
    697        1.83     rmind 	KASSERT(error == 0);
    698        1.83     rmind 
    699        1.83     rmind 	KASSERT(off < TMPFS_DIRSEQ_END);
    700        1.83     rmind 	seq = off | TMPFS_DIRSEQ_END;
    701        1.83     rmind 	return seq;
    702        1.83     rmind }
    703        1.83     rmind 
    704        1.83     rmind static void
    705        1.83     rmind tmpfs_dir_putseq(tmpfs_node_t *dnode, tmpfs_dirent_t *de)
    706        1.83     rmind {
    707        1.83     rmind 	vmem_t *seq_arena = dnode->tn_spec.tn_dir.tn_seq_arena;
    708        1.83     rmind 	uint32_t seq = de->td_seq;
    709        1.83     rmind 
    710        1.83     rmind 	TMPFS_VALIDATE_DIR(dnode);
    711        1.83     rmind 
    712        1.83     rmind 	if (seq == TMPFS_DIRSEQ_NONE || seq < TMPFS_DIRSEQ_END) {
    713        1.83     rmind 		/* First half (or no sequence number set yet). */
    714        1.83     rmind 		KASSERT(de->td_seq >= TMPFS_DIRSEQ_START);
    715        1.83     rmind 	} else {
    716        1.83     rmind 		/* Second half. */
    717        1.83     rmind 		KASSERT(seq_arena != NULL);
    718        1.83     rmind 		KASSERT(seq >= TMPFS_DIRSEQ_END);
    719        1.83     rmind 		seq &= ~TMPFS_DIRSEQ_END;
    720        1.83     rmind 		vmem_free(seq_arena, seq, 1);
    721        1.83     rmind 	}
    722        1.83     rmind 	de->td_seq = TMPFS_DIRSEQ_NONE;
    723         1.1      jmmv 
    724        1.83     rmind 	/* Empty?  We can reset. */
    725        1.83     rmind 	if (seq_arena && dnode->tn_size == 0) {
    726        1.83     rmind 		dnode->tn_spec.tn_dir.tn_seq_arena = NULL;
    727        1.83     rmind 		dnode->tn_spec.tn_dir.tn_next_seq = TMPFS_DIRSEQ_START;
    728        1.83     rmind 		vmem_destroy(seq_arena);
    729         1.1      jmmv 	}
    730         1.1      jmmv }
    731         1.1      jmmv 
    732         1.9      jmmv /*
    733        1.83     rmind  * tmpfs_dir_lookupbyseq: lookup a directory entry by the sequence number.
    734         1.9      jmmv  */
    735        1.83     rmind tmpfs_dirent_t *
    736        1.83     rmind tmpfs_dir_lookupbyseq(tmpfs_node_t *node, off_t seq)
    737         1.1      jmmv {
    738        1.83     rmind 	tmpfs_dirent_t *de = node->tn_spec.tn_dir.tn_readdir_lastp;
    739         1.1      jmmv 
    740         1.1      jmmv 	TMPFS_VALIDATE_DIR(node);
    741         1.1      jmmv 
    742        1.83     rmind 	/*
    743        1.83     rmind 	 * First, check the cache.  If does not match - perform a lookup.
    744        1.83     rmind 	 */
    745        1.83     rmind 	if (de && de->td_seq == seq) {
    746        1.83     rmind 		KASSERT(de->td_seq >= TMPFS_DIRSEQ_START);
    747        1.83     rmind 		KASSERT(de->td_seq != TMPFS_DIRSEQ_NONE);
    748        1.83     rmind 		return de;
    749        1.83     rmind 	}
    750        1.83     rmind 	TAILQ_FOREACH(de, &node->tn_spec.tn_dir.tn_dir, td_entries) {
    751        1.83     rmind 		KASSERT(de->td_seq >= TMPFS_DIRSEQ_START);
    752        1.83     rmind 		KASSERT(de->td_seq != TMPFS_DIRSEQ_NONE);
    753        1.83     rmind 		if (de->td_seq == seq)
    754        1.83     rmind 			return de;
    755         1.1      jmmv 	}
    756        1.83     rmind 	return NULL;
    757         1.1      jmmv }
    758         1.1      jmmv 
    759         1.9      jmmv /*
    760        1.83     rmind  * tmpfs_dir_getdotents: helper function for tmpfs_readdir() to get the
    761        1.83     rmind  * dot meta entries, that is, "." or "..".  Copy it to the UIO space.
    762         1.9      jmmv  */
    763        1.83     rmind static int
    764        1.83     rmind tmpfs_dir_getdotents(tmpfs_node_t *node, struct dirent *dp, struct uio *uio)
    765         1.5      yamt {
    766        1.67     rmind 	tmpfs_dirent_t *de;
    767        1.83     rmind 	off_t next = 0;
    768        1.83     rmind 	int error;
    769        1.83     rmind 
    770        1.83     rmind 	switch (uio->uio_offset) {
    771        1.83     rmind 	case TMPFS_DIRSEQ_DOT:
    772        1.89     rmind 		dp->d_fileno = node->tn_id;
    773        1.83     rmind 		strlcpy(dp->d_name, ".", sizeof(dp->d_name));
    774        1.83     rmind 		next = TMPFS_DIRSEQ_DOTDOT;
    775        1.83     rmind 		break;
    776        1.83     rmind 	case TMPFS_DIRSEQ_DOTDOT:
    777        1.89     rmind 		dp->d_fileno = node->tn_spec.tn_dir.tn_parent->tn_id;
    778        1.83     rmind 		strlcpy(dp->d_name, "..", sizeof(dp->d_name));
    779        1.83     rmind 		de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir);
    780        1.83     rmind 		next = de ? tmpfs_dir_getseq(node, de) : TMPFS_DIRSEQ_EOF;
    781        1.83     rmind 		break;
    782        1.83     rmind 	default:
    783        1.83     rmind 		KASSERT(false);
    784        1.83     rmind 	}
    785        1.89     rmind 	dp->d_type = DT_DIR;
    786        1.83     rmind 	dp->d_namlen = strlen(dp->d_name);
    787        1.83     rmind 	dp->d_reclen = _DIRENT_SIZE(dp);
    788        1.49      yamt 
    789        1.83     rmind 	if (dp->d_reclen > uio->uio_resid) {
    790        1.83     rmind 		return EJUSTRETURN;
    791         1.5      yamt 	}
    792        1.83     rmind 	if ((error = uiomove(dp, dp->d_reclen, uio)) != 0) {
    793        1.83     rmind 		return error;
    794         1.5      yamt 	}
    795        1.83     rmind 
    796        1.83     rmind 	uio->uio_offset = next;
    797        1.83     rmind 	return error;
    798         1.5      yamt }
    799         1.5      yamt 
    800         1.9      jmmv /*
    801        1.83     rmind  * tmpfs_dir_getdents: helper function for tmpfs_readdir.
    802        1.67     rmind  *
    803        1.67     rmind  * => Returns as much directory entries as can fit in the uio space.
    804        1.67     rmind  * => The read starts at uio->uio_offset.
    805         1.9      jmmv  */
    806         1.1      jmmv int
    807        1.67     rmind tmpfs_dir_getdents(tmpfs_node_t *node, struct uio *uio, off_t *cntp)
    808         1.1      jmmv {
    809        1.67     rmind 	tmpfs_dirent_t *de;
    810        1.94     pedro 	struct dirent dent;
    811        1.83     rmind 	int error = 0;
    812         1.1      jmmv 
    813        1.49      yamt 	KASSERT(VOP_ISLOCKED(node->tn_vnode));
    814         1.1      jmmv 	TMPFS_VALIDATE_DIR(node);
    815         1.1      jmmv 
    816        1.67     rmind 	/*
    817        1.94     pedro 	 * First check for the "." and ".." cases.
    818        1.83     rmind 	 * Note: tmpfs_dir_getdotents() will "seek" for us.
    819        1.67     rmind 	 */
    820        1.94     pedro 	memset(&dent, 0, sizeof(dent));
    821        1.83     rmind 
    822        1.83     rmind 	if (uio->uio_offset == TMPFS_DIRSEQ_DOT) {
    823        1.94     pedro 		if ((error = tmpfs_dir_getdotents(node, &dent, uio)) != 0) {
    824        1.83     rmind 			goto done;
    825        1.83     rmind 		}
    826        1.83     rmind 		(*cntp)++;
    827        1.83     rmind 	}
    828        1.83     rmind 	if (uio->uio_offset == TMPFS_DIRSEQ_DOTDOT) {
    829        1.94     pedro 		if ((error = tmpfs_dir_getdotents(node, &dent, uio)) != 0) {
    830        1.83     rmind 			goto done;
    831        1.83     rmind 		}
    832        1.83     rmind 		(*cntp)++;
    833        1.83     rmind 	}
    834        1.83     rmind 
    835        1.83     rmind 	/* Done if we reached the end. */
    836        1.83     rmind 	if (uio->uio_offset == TMPFS_DIRSEQ_EOF) {
    837        1.83     rmind 		goto done;
    838         1.5      yamt 	}
    839        1.83     rmind 
    840        1.83     rmind 	/* Locate the directory entry given by the given sequence number. */
    841        1.83     rmind 	de = tmpfs_dir_lookupbyseq(node, uio->uio_offset);
    842         1.5      yamt 	if (de == NULL) {
    843        1.83     rmind 		error = EINVAL;
    844        1.83     rmind 		goto done;
    845         1.1      jmmv 	}
    846         1.1      jmmv 
    847        1.67     rmind 	/*
    848        1.83     rmind 	 * Read as many entries as possible; i.e., until we reach the end
    849        1.83     rmind 	 * of the directory or we exhaust UIO space.
    850        1.67     rmind 	 */
    851         1.1      jmmv 	do {
    852        1.62     pooka 		if (de->td_node == TMPFS_NODE_WHITEOUT) {
    853        1.94     pedro 			dent.d_fileno = 1;
    854        1.94     pedro 			dent.d_type = DT_WHT;
    855        1.62     pooka 		} else {
    856        1.94     pedro 			dent.d_fileno = de->td_node->tn_id;
    857        1.94     pedro 			dent.d_type = vtype2dt(de->td_node->tn_type);
    858         1.1      jmmv 		}
    859        1.94     pedro 		dent.d_namlen = de->td_namelen;
    860        1.94     pedro 		KASSERT(de->td_namelen < sizeof(dent.d_name));
    861        1.94     pedro 		memcpy(dent.d_name, de->td_name, de->td_namelen);
    862        1.94     pedro 		dent.d_name[de->td_namelen] = '\0';
    863        1.94     pedro 		dent.d_reclen = _DIRENT_SIZE(&dent);
    864         1.1      jmmv 
    865        1.94     pedro 		if (dent.d_reclen > uio->uio_resid) {
    866        1.83     rmind 			/* Exhausted UIO space. */
    867        1.83     rmind 			error = EJUSTRETURN;
    868         1.1      jmmv 			break;
    869         1.1      jmmv 		}
    870         1.1      jmmv 
    871        1.83     rmind 		/* Copy out the directory entry and continue. */
    872        1.94     pedro 		error = uiomove(&dent, dent.d_reclen, uio);
    873        1.83     rmind 		if (error) {
    874        1.83     rmind 			break;
    875        1.83     rmind 		}
    876         1.5      yamt 		(*cntp)++;
    877         1.1      jmmv 		de = TAILQ_NEXT(de, td_entries);
    878         1.1      jmmv 
    879        1.83     rmind 	} while (uio->uio_resid > 0 && de);
    880        1.83     rmind 
    881        1.83     rmind 	/* Cache the last entry or clear and mark EOF. */
    882        1.83     rmind 	uio->uio_offset = de ? tmpfs_dir_getseq(node, de) : TMPFS_DIRSEQ_EOF;
    883        1.83     rmind 	node->tn_spec.tn_dir.tn_readdir_lastp = de;
    884        1.83     rmind done:
    885        1.90     rmind 	tmpfs_update(node->tn_vnode, TMPFS_UPDATE_ATIME);
    886        1.83     rmind 
    887        1.83     rmind 	if (error == EJUSTRETURN) {
    888        1.83     rmind 		/* Exhausted UIO space - just return. */
    889        1.83     rmind 		error = 0;
    890        1.83     rmind 	}
    891        1.83     rmind 	KASSERT(error >= 0);
    892         1.1      jmmv 	return error;
    893         1.1      jmmv }
    894         1.1      jmmv 
    895         1.8      jmmv /*
    896        1.67     rmind  * tmpfs_reg_resize: resize the underlying UVM object associated with the
    897        1.67     rmind  * specified regular file.
    898         1.8      jmmv  */
    899         1.1      jmmv int
    900         1.1      jmmv tmpfs_reg_resize(struct vnode *vp, off_t newsize)
    901         1.1      jmmv {
    902        1.67     rmind 	tmpfs_mount_t *tmp = VFS_TO_TMPFS(vp->v_mount);
    903        1.67     rmind 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
    904        1.74   hannken 	struct uvm_object *uobj = node->tn_spec.tn_reg.tn_aobj;
    905        1.57     rmind 	size_t newpages, oldpages;
    906        1.13      yamt 	off_t oldsize;
    907         1.1      jmmv 
    908         1.1      jmmv 	KASSERT(vp->v_type == VREG);
    909         1.1      jmmv 	KASSERT(newsize >= 0);
    910         1.1      jmmv 
    911        1.13      yamt 	oldsize = node->tn_size;
    912        1.57     rmind 	oldpages = round_page(oldsize) >> PAGE_SHIFT;
    913        1.57     rmind 	newpages = round_page(newsize) >> PAGE_SHIFT;
    914        1.18      jmmv 	KASSERT(oldpages == node->tn_spec.tn_reg.tn_aobj_pages);
    915         1.1      jmmv 
    916        1.57     rmind 	if (newpages > oldpages) {
    917        1.57     rmind 		/* Increase the used-memory counter if getting extra pages. */
    918        1.57     rmind 		if (!tmpfs_mem_incr(tmp, (newpages - oldpages) << PAGE_SHIFT)) {
    919        1.57     rmind 			return ENOSPC;
    920        1.57     rmind 		}
    921        1.57     rmind 	} else if (newsize < oldsize) {
    922        1.91     rmind 		size_t zerolen;
    923        1.13      yamt 
    924        1.91     rmind 		zerolen = MIN(round_page(newsize), node->tn_size) - newsize;
    925        1.74   hannken 		ubc_zerorange(uobj, newsize, zerolen, UBC_UNMAP_FLAG(vp));
    926        1.13      yamt 	}
    927         1.1      jmmv 
    928        1.36     pooka 	node->tn_spec.tn_reg.tn_aobj_pages = newpages;
    929        1.36     pooka 	node->tn_size = newsize;
    930        1.36     pooka 	uvm_vnp_setsize(vp, newsize);
    931        1.36     pooka 
    932        1.76     enami 	/*
    933        1.76     enami 	 * Free "backing store".
    934        1.76     enami 	 */
    935        1.43        ad 	if (newpages < oldpages) {
    936        1.76     enami 		KASSERT(uobj->vmobjlock == vp->v_interlock);
    937        1.76     enami 
    938        1.76     enami 		mutex_enter(uobj->vmobjlock);
    939        1.76     enami 		uao_dropswap_range(uobj, newpages, oldpages);
    940        1.76     enami 		mutex_exit(uobj->vmobjlock);
    941        1.76     enami 
    942        1.57     rmind 		/* Decrease the used-memory counter. */
    943        1.57     rmind 		tmpfs_mem_decr(tmp, (oldpages - newpages) << PAGE_SHIFT);
    944        1.43        ad 	}
    945        1.67     rmind 	if (newsize > oldsize) {
    946        1.29      jmmv 		VN_KNOTE(vp, NOTE_EXTEND);
    947        1.67     rmind 	}
    948        1.57     rmind 	return 0;
    949         1.1      jmmv }
    950         1.1      jmmv 
    951         1.9      jmmv /*
    952        1.67     rmind  * tmpfs_chflags: change flags of the given vnode.
    953         1.9      jmmv  */
    954         1.1      jmmv int
    955        1.67     rmind tmpfs_chflags(vnode_t *vp, int flags, kauth_cred_t cred, lwp_t *l)
    956         1.1      jmmv {
    957        1.67     rmind 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
    958        1.55     pooka 	kauth_action_t action = KAUTH_VNODE_WRITE_FLAGS;
    959        1.79      elad 	int error;
    960        1.79      elad 	bool changing_sysflags = false;
    961         1.1      jmmv 
    962         1.1      jmmv 	KASSERT(VOP_ISLOCKED(vp));
    963         1.1      jmmv 
    964         1.1      jmmv 	/* Disallow this operation if the file system is mounted read-only. */
    965         1.1      jmmv 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
    966         1.1      jmmv 		return EROFS;
    967         1.1      jmmv 
    968        1.54      elad 	/*
    969        1.54      elad 	 * If the new flags have non-user flags that are different than
    970        1.54      elad 	 * those on the node, we need special permission to change them.
    971        1.54      elad 	 */
    972        1.54      elad 	if ((flags & SF_SETTABLE) != (node->tn_flags & SF_SETTABLE)) {
    973        1.54      elad 		action |= KAUTH_VNODE_WRITE_SYSFLAGS;
    974        1.79      elad 		changing_sysflags = true;
    975        1.54      elad 	}
    976        1.54      elad 
    977        1.54      elad 	/*
    978        1.54      elad 	 * Indicate that this node's flags have system attributes in them if
    979        1.54      elad 	 * that's the case.
    980        1.54      elad 	 */
    981        1.54      elad 	if (node->tn_flags & (SF_IMMUTABLE | SF_APPEND)) {
    982        1.54      elad 		action |= KAUTH_VNODE_HAS_SYSFLAGS;
    983        1.54      elad 	}
    984        1.54      elad 
    985        1.79      elad 	error = kauth_authorize_vnode(cred, action, vp, NULL,
    986        1.79      elad 	    genfs_can_chflags(cred, vp->v_type, node->tn_uid,
    987        1.79      elad 	    changing_sysflags));
    988        1.54      elad 	if (error)
    989         1.1      jmmv 		return error;
    990        1.54      elad 
    991        1.54      elad 	/*
    992        1.54      elad 	 * Set the flags. If we're not setting non-user flags, be careful not
    993        1.54      elad 	 * to overwrite them.
    994        1.54      elad 	 *
    995        1.54      elad 	 * XXX: Can't we always assign here? if the system flags are different,
    996        1.54      elad 	 *      the code above should catch attempts to change them without
    997        1.54      elad 	 *      proper permissions, and if we're here it means it's okay to
    998        1.54      elad 	 *      change them...
    999        1.54      elad 	 */
   1000        1.79      elad 	if (!changing_sysflags) {
   1001        1.54      elad 		/* Clear all user-settable flags and re-set them. */
   1002         1.1      jmmv 		node->tn_flags &= SF_SETTABLE;
   1003         1.1      jmmv 		node->tn_flags |= (flags & UF_SETTABLE);
   1004        1.67     rmind 	} else {
   1005        1.67     rmind 		node->tn_flags = flags;
   1006         1.1      jmmv 	}
   1007        1.90     rmind 	tmpfs_update(vp, TMPFS_UPDATE_CTIME);
   1008         1.1      jmmv 	VN_KNOTE(vp, NOTE_ATTRIB);
   1009         1.1      jmmv 	return 0;
   1010         1.1      jmmv }
   1011         1.1      jmmv 
   1012         1.9      jmmv /*
   1013        1.67     rmind  * tmpfs_chmod: change access mode on the given vnode.
   1014         1.9      jmmv  */
   1015         1.1      jmmv int
   1016        1.67     rmind tmpfs_chmod(vnode_t *vp, mode_t mode, kauth_cred_t cred, lwp_t *l)
   1017         1.1      jmmv {
   1018        1.67     rmind 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
   1019        1.51      elad 	int error;
   1020         1.1      jmmv 
   1021         1.1      jmmv 	KASSERT(VOP_ISLOCKED(vp));
   1022         1.1      jmmv 
   1023         1.1      jmmv 	/* Disallow this operation if the file system is mounted read-only. */
   1024         1.1      jmmv 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
   1025         1.1      jmmv 		return EROFS;
   1026         1.1      jmmv 
   1027         1.1      jmmv 	/* Immutable or append-only files cannot be modified, either. */
   1028         1.1      jmmv 	if (node->tn_flags & (IMMUTABLE | APPEND))
   1029         1.1      jmmv 		return EPERM;
   1030         1.1      jmmv 
   1031        1.54      elad 	error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_SECURITY, vp,
   1032        1.79      elad 	    NULL, genfs_can_chmod(vp->v_type, cred, node->tn_uid, node->tn_gid, mode));
   1033        1.67     rmind 	if (error) {
   1034        1.67     rmind 		return error;
   1035        1.67     rmind 	}
   1036         1.1      jmmv 	node->tn_mode = (mode & ALLPERMS);
   1037        1.90     rmind 	tmpfs_update(vp, TMPFS_UPDATE_CTIME);
   1038         1.1      jmmv 	VN_KNOTE(vp, NOTE_ATTRIB);
   1039         1.1      jmmv 	return 0;
   1040         1.1      jmmv }
   1041         1.1      jmmv 
   1042         1.9      jmmv /*
   1043        1.67     rmind  * tmpfs_chown: change ownership of the given vnode.
   1044        1.67     rmind  *
   1045        1.67     rmind  * => At least one of uid or gid must be different than VNOVAL.
   1046        1.67     rmind  * => Attribute is unchanged for VNOVAL case.
   1047         1.9      jmmv  */
   1048         1.1      jmmv int
   1049        1.67     rmind tmpfs_chown(vnode_t *vp, uid_t uid, gid_t gid, kauth_cred_t cred, lwp_t *l)
   1050         1.1      jmmv {
   1051        1.67     rmind 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
   1052        1.51      elad 	int error;
   1053         1.1      jmmv 
   1054         1.1      jmmv 	KASSERT(VOP_ISLOCKED(vp));
   1055         1.1      jmmv 
   1056         1.1      jmmv 	/* Assign default values if they are unknown. */
   1057         1.1      jmmv 	KASSERT(uid != VNOVAL || gid != VNOVAL);
   1058        1.67     rmind 	if (uid == VNOVAL) {
   1059         1.1      jmmv 		uid = node->tn_uid;
   1060        1.67     rmind 	}
   1061        1.67     rmind 	if (gid == VNOVAL) {
   1062         1.1      jmmv 		gid = node->tn_gid;
   1063        1.67     rmind 	}
   1064         1.1      jmmv 
   1065         1.1      jmmv 	/* Disallow this operation if the file system is mounted read-only. */
   1066         1.1      jmmv 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
   1067         1.1      jmmv 		return EROFS;
   1068         1.1      jmmv 
   1069         1.1      jmmv 	/* Immutable or append-only files cannot be modified, either. */
   1070         1.1      jmmv 	if (node->tn_flags & (IMMUTABLE | APPEND))
   1071         1.1      jmmv 		return EPERM;
   1072         1.1      jmmv 
   1073        1.54      elad 	error = kauth_authorize_vnode(cred, KAUTH_VNODE_CHANGE_OWNERSHIP, vp,
   1074        1.79      elad 	    NULL, genfs_can_chown(cred, node->tn_uid, node->tn_gid, uid,
   1075        1.67     rmind 	    gid));
   1076        1.67     rmind 	if (error) {
   1077        1.67     rmind 		return error;
   1078        1.67     rmind 	}
   1079         1.1      jmmv 	node->tn_uid = uid;
   1080         1.1      jmmv 	node->tn_gid = gid;
   1081        1.90     rmind 	tmpfs_update(vp, TMPFS_UPDATE_CTIME);
   1082         1.1      jmmv 	VN_KNOTE(vp, NOTE_ATTRIB);
   1083         1.1      jmmv 	return 0;
   1084         1.1      jmmv }
   1085         1.1      jmmv 
   1086         1.9      jmmv /*
   1087        1.67     rmind  * tmpfs_chsize: change size of the given vnode.
   1088         1.9      jmmv  */
   1089         1.1      jmmv int
   1090        1.67     rmind tmpfs_chsize(vnode_t *vp, u_quad_t size, kauth_cred_t cred, lwp_t *l)
   1091         1.1      jmmv {
   1092        1.67     rmind 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
   1093        1.90     rmind 	const off_t length = size;
   1094        1.90     rmind 	int error;
   1095         1.1      jmmv 
   1096         1.1      jmmv 	KASSERT(VOP_ISLOCKED(vp));
   1097         1.1      jmmv 
   1098         1.1      jmmv 	/* Decide whether this is a valid operation based on the file type. */
   1099         1.1      jmmv 	switch (vp->v_type) {
   1100         1.1      jmmv 	case VDIR:
   1101         1.1      jmmv 		return EISDIR;
   1102         1.1      jmmv 	case VREG:
   1103        1.67     rmind 		if (vp->v_mount->mnt_flag & MNT_RDONLY) {
   1104         1.1      jmmv 			return EROFS;
   1105        1.67     rmind 		}
   1106         1.1      jmmv 		break;
   1107         1.1      jmmv 	case VBLK:
   1108         1.1      jmmv 	case VCHR:
   1109         1.1      jmmv 	case VFIFO:
   1110        1.67     rmind 		/*
   1111        1.67     rmind 		 * Allow modifications of special files even if in the file
   1112         1.1      jmmv 		 * system is mounted read-only (we are not modifying the
   1113        1.67     rmind 		 * files themselves, but the objects they represent).
   1114        1.67     rmind 		 */
   1115        1.14      yamt 		return 0;
   1116         1.1      jmmv 	default:
   1117        1.14      yamt 		return EOPNOTSUPP;
   1118         1.1      jmmv 	}
   1119         1.1      jmmv 
   1120         1.1      jmmv 	/* Immutable or append-only files cannot be modified, either. */
   1121        1.67     rmind 	if (node->tn_flags & (IMMUTABLE | APPEND)) {
   1122         1.1      jmmv 		return EPERM;
   1123        1.67     rmind 	}
   1124         1.1      jmmv 
   1125        1.90     rmind 	if (length < 0) {
   1126        1.90     rmind 		return EINVAL;
   1127        1.90     rmind 	}
   1128        1.90     rmind 
   1129        1.90     rmind 	/* Note: tmpfs_reg_resize() will raise NOTE_EXTEND and NOTE_ATTRIB. */
   1130       1.102   hannken 	if (node->tn_size != length &&
   1131       1.102   hannken 	    (error = tmpfs_reg_resize(vp, length)) != 0) {
   1132        1.90     rmind 		return error;
   1133        1.90     rmind 	}
   1134        1.90     rmind 	tmpfs_update(vp, TMPFS_UPDATE_CTIME | TMPFS_UPDATE_MTIME);
   1135        1.90     rmind 	return 0;
   1136         1.1      jmmv }
   1137         1.1      jmmv 
   1138         1.9      jmmv /*
   1139        1.67     rmind  * tmpfs_chtimes: change access and modification times for vnode.
   1140         1.9      jmmv  */
   1141         1.1      jmmv int
   1142        1.67     rmind tmpfs_chtimes(vnode_t *vp, const struct timespec *atime,
   1143        1.48  christos     const struct timespec *mtime, const struct timespec *btime,
   1144        1.67     rmind     int vaflags, kauth_cred_t cred, lwp_t *l)
   1145         1.1      jmmv {
   1146        1.67     rmind 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
   1147         1.1      jmmv 	int error;
   1148         1.1      jmmv 
   1149         1.1      jmmv 	KASSERT(VOP_ISLOCKED(vp));
   1150         1.1      jmmv 
   1151         1.1      jmmv 	/* Disallow this operation if the file system is mounted read-only. */
   1152         1.1      jmmv 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
   1153         1.1      jmmv 		return EROFS;
   1154         1.1      jmmv 
   1155         1.1      jmmv 	/* Immutable or append-only files cannot be modified, either. */
   1156         1.1      jmmv 	if (node->tn_flags & (IMMUTABLE | APPEND))
   1157         1.1      jmmv 		return EPERM;
   1158         1.1      jmmv 
   1159        1.54      elad 	error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_TIMES, vp, NULL,
   1160        1.67     rmind 	    genfs_can_chtimes(vp, vaflags, node->tn_uid, cred));
   1161        1.53      elad 	if (error)
   1162        1.67     rmind 		return error;
   1163         1.1      jmmv 
   1164        1.90     rmind 	if (atime->tv_sec != VNOVAL) {
   1165        1.90     rmind 		node->tn_atime = *atime;
   1166        1.90     rmind 	}
   1167        1.90     rmind 	if (mtime->tv_sec != VNOVAL) {
   1168        1.90     rmind 		node->tn_mtime = *mtime;
   1169        1.90     rmind 	}
   1170        1.90     rmind 	if (btime->tv_sec != VNOVAL) {
   1171        1.90     rmind 		node->tn_birthtime = *btime;
   1172        1.90     rmind 	}
   1173        1.29      jmmv 	VN_KNOTE(vp, NOTE_ATTRIB);
   1174        1.12      yamt 	return 0;
   1175         1.1      jmmv }
   1176        1.10      yamt 
   1177        1.67     rmind /*
   1178        1.90     rmind  * tmpfs_update: update the timestamps as indicated by the flags.
   1179        1.67     rmind  */
   1180        1.10      yamt void
   1181        1.90     rmind tmpfs_update(vnode_t *vp, unsigned tflags)
   1182        1.10      yamt {
   1183        1.67     rmind 	tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
   1184        1.56     rmind 	struct timespec nowtm;
   1185        1.10      yamt 
   1186        1.90     rmind 	if (tflags == 0) {
   1187        1.10      yamt 		return;
   1188        1.67     rmind 	}
   1189        1.56     rmind 	vfs_timestamp(&nowtm);
   1190        1.48  christos 
   1191        1.90     rmind 	if (tflags & TMPFS_UPDATE_ATIME) {
   1192        1.90     rmind 		node->tn_atime = nowtm;
   1193        1.10      yamt 	}
   1194        1.90     rmind 	if (tflags & TMPFS_UPDATE_MTIME) {
   1195        1.90     rmind 		node->tn_mtime = nowtm;
   1196        1.10      yamt 	}
   1197        1.90     rmind 	if (tflags & TMPFS_UPDATE_CTIME) {
   1198        1.56     rmind 		node->tn_ctime = nowtm;
   1199        1.48  christos 	}
   1200        1.12      yamt }
   1201