Home | History | Annotate | Line # | Download | only in tmpfs
tmpfs_vfsops.c revision 1.76
      1 /*	$NetBSD: tmpfs_vfsops.c,v 1.76 2020/01/17 20:08:08 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
      9  * 2005 program.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Efficient memory file system.
     35  *
     36  * tmpfs is a file system that uses NetBSD's virtual memory sub-system
     37  * (the well-known UVM) to store file data and metadata in an efficient
     38  * way.  This means that it does not follow the structure of an on-disk
     39  * file system because it simply does not need to.  Instead, it uses
     40  * memory-specific data structures and algorithms to automatically
     41  * allocate and release resources.
     42  */
     43 
     44 #include <sys/cdefs.h>
     45 __KERNEL_RCSID(0, "$NetBSD: tmpfs_vfsops.c,v 1.76 2020/01/17 20:08:08 ad Exp $");
     46 
     47 #include <sys/param.h>
     48 #include <sys/atomic.h>
     49 #include <sys/types.h>
     50 #include <sys/kmem.h>
     51 #include <sys/mount.h>
     52 #include <sys/stat.h>
     53 #include <sys/systm.h>
     54 #include <sys/vnode.h>
     55 #include <sys/kauth.h>
     56 #include <sys/module.h>
     57 
     58 #include <miscfs/genfs/genfs.h>
     59 #include <fs/tmpfs/tmpfs.h>
     60 #include <fs/tmpfs/tmpfs_args.h>
     61 
     62 MODULE(MODULE_CLASS_VFS, tmpfs, NULL);
     63 
     64 struct pool	tmpfs_dirent_pool;
     65 struct pool	tmpfs_node_pool;
     66 
     67 void
     68 tmpfs_init(void)
     69 {
     70 
     71 	pool_init(&tmpfs_dirent_pool, sizeof(tmpfs_dirent_t), 0, 0, 0,
     72 	    "tmpfs_dirent", &pool_allocator_nointr, IPL_NONE);
     73 	pool_init(&tmpfs_node_pool, sizeof(tmpfs_node_t), 0, 0, 0,
     74 	    "tmpfs_node", &pool_allocator_nointr, IPL_NONE);
     75 }
     76 
     77 void
     78 tmpfs_done(void)
     79 {
     80 
     81 	pool_destroy(&tmpfs_dirent_pool);
     82 	pool_destroy(&tmpfs_node_pool);
     83 }
     84 
     85 int
     86 tmpfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
     87 {
     88 	struct tmpfs_args *args = data;
     89 	tmpfs_mount_t *tmp;
     90 	tmpfs_node_t *root;
     91 	struct vattr va;
     92 	struct vnode *vp;
     93 	uint64_t memlimit;
     94 	ino_t nodes;
     95 	int error, flags;
     96 	bool set_memlimit;
     97 	bool set_nodes;
     98 
     99 	if (args == NULL)
    100 		return EINVAL;
    101 
    102 	/* Validate the version. */
    103 	if (*data_len < sizeof(*args) ||
    104 	    args->ta_version != TMPFS_ARGS_VERSION)
    105 		return EINVAL;
    106 
    107 	/* Handle retrieval of mount point arguments. */
    108 	if (mp->mnt_flag & MNT_GETARGS) {
    109 		if (mp->mnt_data == NULL)
    110 			return EIO;
    111 		tmp = VFS_TO_TMPFS(mp);
    112 
    113 		args->ta_version = TMPFS_ARGS_VERSION;
    114 		args->ta_nodes_max = tmp->tm_nodes_max;
    115 		args->ta_size_max = tmp->tm_mem_limit;
    116 
    117 		root = tmp->tm_root;
    118 		args->ta_root_uid = root->tn_uid;
    119 		args->ta_root_gid = root->tn_gid;
    120 		args->ta_root_mode = root->tn_mode;
    121 
    122 		*data_len = sizeof(*args);
    123 		return 0;
    124 	}
    125 
    126 
    127 	/* Prohibit mounts if there is not enough memory. */
    128 	if (tmpfs_mem_info(true) < uvmexp.freetarg)
    129 		return EINVAL;
    130 
    131 	/* Check for invalid uid and gid arguments */
    132 	if (args->ta_root_uid == VNOVAL || args->ta_root_gid == VNOVAL)
    133 		return EINVAL;
    134 
    135 	/* Get the memory usage limit for this file-system. */
    136 	if (args->ta_size_max < PAGE_SIZE) {
    137 		memlimit = UINT64_MAX;
    138 		set_memlimit = false;
    139 	} else {
    140 		memlimit = args->ta_size_max;
    141 		set_memlimit = true;
    142 	}
    143 	KASSERT(memlimit > 0);
    144 
    145 	if (args->ta_nodes_max <= 3) {
    146 		nodes = 3 + (memlimit / 1024);
    147 		set_nodes = false;
    148 	} else {
    149 		nodes = args->ta_nodes_max;
    150 		set_nodes = true;
    151 	}
    152 	nodes = MIN(nodes, INT_MAX);
    153 	KASSERT(nodes >= 3);
    154 
    155 	if (mp->mnt_flag & MNT_UPDATE) {
    156 		tmp = VFS_TO_TMPFS(mp);
    157 		if (set_nodes && nodes < tmp->tm_nodes_cnt)
    158 			return EBUSY;
    159 		if ((mp->mnt_iflag & IMNT_WANTRDONLY)) {
    160 			/* Changing from read/write to read-only. */
    161 			flags = WRITECLOSE;
    162 			if ((mp->mnt_flag & MNT_FORCE))
    163 				flags |= FORCECLOSE;
    164 			error = vflush(mp, NULL, flags);
    165 			if (error)
    166 				return error;
    167 		}
    168 		if (set_memlimit) {
    169 			if ((error = tmpfs_mntmem_set(tmp, memlimit)) != 0)
    170 				return error;
    171 		}
    172 		if (set_nodes)
    173 			tmp->tm_nodes_max = nodes;
    174 		root = tmp->tm_root;
    175 		root->tn_uid = args->ta_root_uid;
    176 		root->tn_gid = args->ta_root_gid;
    177 		root->tn_mode = args->ta_root_mode;
    178 		return 0;
    179 	}
    180 
    181 	mp->mnt_flag |= MNT_LOCAL;
    182 	mp->mnt_stat.f_namemax = TMPFS_MAXNAMLEN;
    183 	mp->mnt_fs_bshift = PAGE_SHIFT;
    184 	mp->mnt_dev_bshift = DEV_BSHIFT;
    185 	mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO;
    186 	vfs_getnewfsid(mp);
    187 
    188 	/* Allocate the tmpfs mount structure and fill it. */
    189 	tmp = kmem_zalloc(sizeof(tmpfs_mount_t), KM_SLEEP);
    190 	tmp->tm_nodes_max = nodes;
    191 	tmp->tm_nodes_cnt = 0;
    192 	LIST_INIT(&tmp->tm_nodes);
    193 
    194 	mutex_init(&tmp->tm_lock, MUTEX_DEFAULT, IPL_NONE);
    195 	tmpfs_mntmem_init(tmp, memlimit);
    196 	mp->mnt_data = tmp;
    197 
    198 	/* Allocate the root node. */
    199 	vattr_null(&va);
    200 	va.va_type = VDIR;
    201 	va.va_mode = args->ta_root_mode & ALLPERMS;
    202 	va.va_uid = args->ta_root_uid;
    203 	va.va_gid = args->ta_root_gid;
    204 	error = vcache_new(mp, NULL, &va, NOCRED, NULL, &vp);
    205 	if (error) {
    206 		mp->mnt_data = NULL;
    207 		tmpfs_mntmem_destroy(tmp);
    208 		mutex_destroy(&tmp->tm_lock);
    209 		kmem_free(tmp, sizeof(*tmp));
    210 		return error;
    211 	}
    212 	KASSERT(vp != NULL);
    213 	root = VP_TO_TMPFS_NODE(vp);
    214 	KASSERT(root != NULL);
    215 
    216 	/*
    217 	 * Parent of the root inode is itself.  Also, root inode has no
    218 	 * directory entry (i.e. is never attached), thus hold an extra
    219 	 * reference (link) for it.
    220 	 */
    221 	root->tn_links++;
    222 	root->tn_spec.tn_dir.tn_parent = root;
    223 	tmp->tm_root = root;
    224 	vrele(vp);
    225 
    226 	error = set_statvfs_info(path, UIO_USERSPACE, "tmpfs", UIO_SYSSPACE,
    227 	    mp->mnt_op->vfs_name, mp, curlwp);
    228 	if (error) {
    229 		(void)tmpfs_unmount(mp, MNT_FORCE);
    230 	}
    231 	return error;
    232 }
    233 
    234 int
    235 tmpfs_start(struct mount *mp, int flags)
    236 {
    237 
    238 	return 0;
    239 }
    240 
    241 int
    242 tmpfs_unmount(struct mount *mp, int mntflags)
    243 {
    244 	tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp);
    245 	tmpfs_node_t *node, *cnode;
    246 	int error, flags = 0;
    247 
    248 	/* Handle forced unmounts. */
    249 	if (mntflags & MNT_FORCE)
    250 		flags |= FORCECLOSE;
    251 
    252 	/* Finalize all pending I/O. */
    253 	error = vflush(mp, NULL, flags);
    254 	if (error != 0)
    255 		return error;
    256 
    257 	/*
    258 	 * First round, detach and destroy all directory entries.
    259 	 * Also, clear the pointers to the vnodes - they are gone.
    260 	 */
    261 	LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) {
    262 		tmpfs_dirent_t *de;
    263 
    264 		node->tn_vnode = NULL;
    265 		if (node->tn_type != VDIR) {
    266 			continue;
    267 		}
    268 		while ((de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir)) != NULL) {
    269 			cnode = de->td_node;
    270 			if (cnode && cnode != TMPFS_NODE_WHITEOUT) {
    271 				cnode->tn_vnode = NULL;
    272 			}
    273 			tmpfs_dir_detach(node, de);
    274 			tmpfs_free_dirent(tmp, de);
    275 		}
    276 		/* Extra virtual entry (itself for the root). */
    277 		node->tn_links--;
    278 	}
    279 
    280 	/* Release the reference on root (diagnostic). */
    281 	node = tmp->tm_root;
    282 	node->tn_links--;
    283 
    284 	/* Second round, destroy all inodes. */
    285 	while ((node = LIST_FIRST(&tmp->tm_nodes)) != NULL) {
    286 		tmpfs_free_node(tmp, node);
    287 	}
    288 
    289 	/* Throw away the tmpfs_mount structure. */
    290 	tmpfs_mntmem_destroy(tmp);
    291 	mutex_destroy(&tmp->tm_lock);
    292 	kmem_free(tmp, sizeof(*tmp));
    293 	mp->mnt_data = NULL;
    294 
    295 	return 0;
    296 }
    297 
    298 int
    299 tmpfs_root(struct mount *mp, int lktype, vnode_t **vpp)
    300 {
    301 	tmpfs_node_t *node = VFS_TO_TMPFS(mp)->tm_root;
    302 	int error;
    303 
    304 	error = vcache_get(mp, &node, sizeof(node), vpp);
    305 	if (error)
    306 		return error;
    307 	error = vn_lock(*vpp, lktype);
    308 	if (error) {
    309 		vrele(*vpp);
    310 		*vpp = NULL;
    311 		return error;
    312 	}
    313 
    314 	return 0;
    315 }
    316 
    317 int
    318 tmpfs_vget(struct mount *mp, ino_t ino, int lktype, vnode_t **vpp)
    319 {
    320 
    321 	return EOPNOTSUPP;
    322 }
    323 
    324 int
    325 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int lktype, vnode_t **vpp)
    326 {
    327 	tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp);
    328 	tmpfs_node_t *node;
    329 	tmpfs_fid_t tfh;
    330 	int error;
    331 
    332 	if (fhp->fid_len != sizeof(tmpfs_fid_t)) {
    333 		return EINVAL;
    334 	}
    335 	memcpy(&tfh, fhp, sizeof(tmpfs_fid_t));
    336 
    337 	mutex_enter(&tmp->tm_lock);
    338 	/* XXX big oof .. use a better data structure */
    339 	LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) {
    340 		if (node->tn_id == tfh.tf_id) {
    341 			/* Prevent this node from disappearing. */
    342 			atomic_inc_32(&node->tn_holdcount);
    343 			break;
    344 		}
    345 	}
    346 	mutex_exit(&tmp->tm_lock);
    347 	if (node == NULL)
    348 		return ESTALE;
    349 
    350 	error = vcache_get(mp, &node, sizeof(node), vpp);
    351 	/* If this node has been reclaimed free it now. */
    352 	if (atomic_dec_32_nv(&node->tn_holdcount) == TMPFS_NODE_RECLAIMED) {
    353 		KASSERT(error != 0);
    354 		tmpfs_free_node(tmp, node);
    355 	}
    356 	if (error)
    357 		return (error == ENOENT ? ESTALE : error);
    358 	error = vn_lock(*vpp, lktype);
    359 	if (error) {
    360 		vrele(*vpp);
    361 		*vpp = NULL;
    362 		return error;
    363 	}
    364 	if (TMPFS_NODE_GEN(node) != tfh.tf_gen) {
    365 		vput(*vpp);
    366 		*vpp = NULL;
    367 		return ESTALE;
    368 	}
    369 
    370 	return 0;
    371 }
    372 
    373 int
    374 tmpfs_vptofh(vnode_t *vp, struct fid *fhp, size_t *fh_size)
    375 {
    376 	tmpfs_fid_t tfh;
    377 	tmpfs_node_t *node;
    378 
    379 	if (*fh_size < sizeof(tmpfs_fid_t)) {
    380 		*fh_size = sizeof(tmpfs_fid_t);
    381 		return E2BIG;
    382 	}
    383 	*fh_size = sizeof(tmpfs_fid_t);
    384 	node = VP_TO_TMPFS_NODE(vp);
    385 
    386 	memset(&tfh, 0, sizeof(tfh));
    387 	tfh.tf_len = sizeof(tmpfs_fid_t);
    388 	tfh.tf_gen = TMPFS_NODE_GEN(node);
    389 	tfh.tf_id = node->tn_id;
    390 	memcpy(fhp, &tfh, sizeof(tfh));
    391 
    392 	return 0;
    393 }
    394 
    395 int
    396 tmpfs_statvfs(struct mount *mp, struct statvfs *sbp)
    397 {
    398 	tmpfs_mount_t *tmp;
    399 	fsfilcnt_t freenodes;
    400 	size_t avail;
    401 
    402 	tmp = VFS_TO_TMPFS(mp);
    403 
    404 	sbp->f_iosize = sbp->f_frsize = sbp->f_bsize = PAGE_SIZE;
    405 
    406 	mutex_enter(&tmp->tm_acc_lock);
    407 	avail =  tmpfs_pages_avail(tmp);
    408 	sbp->f_blocks = (tmpfs_bytes_max(tmp) >> PAGE_SHIFT);
    409 	sbp->f_bavail = sbp->f_bfree = avail;
    410 	sbp->f_bresvd = 0;
    411 
    412 	freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_cnt,
    413 	    avail * PAGE_SIZE / sizeof(tmpfs_node_t));
    414 
    415 	sbp->f_files = tmp->tm_nodes_cnt + freenodes;
    416 	sbp->f_favail = sbp->f_ffree = freenodes;
    417 	sbp->f_fresvd = 0;
    418 	mutex_exit(&tmp->tm_acc_lock);
    419 
    420 	copy_statvfs_info(sbp, mp);
    421 
    422 	return 0;
    423 }
    424 
    425 int
    426 tmpfs_sync(struct mount *mp, int waitfor, kauth_cred_t uc)
    427 {
    428 
    429 	return 0;
    430 }
    431 
    432 int
    433 tmpfs_snapshot(struct mount *mp, vnode_t *vp, struct timespec *ctime)
    434 {
    435 
    436 	return EOPNOTSUPP;
    437 }
    438 
    439 /*
    440  * tmpfs vfs operations.
    441  */
    442 
    443 extern const struct vnodeopv_desc tmpfs_fifoop_opv_desc;
    444 extern const struct vnodeopv_desc tmpfs_specop_opv_desc;
    445 extern const struct vnodeopv_desc tmpfs_vnodeop_opv_desc;
    446 
    447 const struct vnodeopv_desc * const tmpfs_vnodeopv_descs[] = {
    448 	&tmpfs_fifoop_opv_desc,
    449 	&tmpfs_specop_opv_desc,
    450 	&tmpfs_vnodeop_opv_desc,
    451 	NULL,
    452 };
    453 
    454 struct vfsops tmpfs_vfsops = {
    455 	.vfs_name = MOUNT_TMPFS,
    456 	.vfs_min_mount_data = sizeof (struct tmpfs_args),
    457 	.vfs_mount = tmpfs_mount,
    458 	.vfs_start = tmpfs_start,
    459 	.vfs_unmount = tmpfs_unmount,
    460 	.vfs_root = tmpfs_root,
    461 	.vfs_quotactl = (void *)eopnotsupp,
    462 	.vfs_statvfs = tmpfs_statvfs,
    463 	.vfs_sync = tmpfs_sync,
    464 	.vfs_vget = tmpfs_vget,
    465 	.vfs_loadvnode = tmpfs_loadvnode,
    466 	.vfs_newvnode = tmpfs_newvnode,
    467 	.vfs_fhtovp = tmpfs_fhtovp,
    468 	.vfs_vptofh = tmpfs_vptofh,
    469 	.vfs_init = tmpfs_init,
    470 	.vfs_done = tmpfs_done,
    471 	.vfs_snapshot = tmpfs_snapshot,
    472 	.vfs_extattrctl = vfs_stdextattrctl,
    473 	.vfs_suspendctl = genfs_suspendctl,
    474 	.vfs_renamelock_enter = genfs_renamelock_enter,
    475 	.vfs_renamelock_exit = genfs_renamelock_exit,
    476 	.vfs_fsync = (void *)eopnotsupp,
    477 	.vfs_opv_descs = tmpfs_vnodeopv_descs
    478 };
    479 
    480 static int
    481 tmpfs_modcmd(modcmd_t cmd, void *arg)
    482 {
    483 
    484 	switch (cmd) {
    485 	case MODULE_CMD_INIT:
    486 		return vfs_attach(&tmpfs_vfsops);
    487 	case MODULE_CMD_FINI:
    488 		return vfs_detach(&tmpfs_vfsops);
    489 	default:
    490 		return ENOTTY;
    491 	}
    492 }
    493