tmpfs_vfsops.c revision 1.65 1 1.65 hannken /* $NetBSD: tmpfs_vfsops.c,v 1.65 2015/07/06 10:07:12 hannken Exp $ */
2 1.1 jmmv
3 1.1 jmmv /*
4 1.34 ad * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
5 1.1 jmmv * All rights reserved.
6 1.1 jmmv *
7 1.1 jmmv * This code is derived from software contributed to The NetBSD Foundation
8 1.6 jmmv * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 1.6 jmmv * 2005 program.
10 1.1 jmmv *
11 1.1 jmmv * Redistribution and use in source and binary forms, with or without
12 1.1 jmmv * modification, are permitted provided that the following conditions
13 1.1 jmmv * are met:
14 1.1 jmmv * 1. Redistributions of source code must retain the above copyright
15 1.1 jmmv * notice, this list of conditions and the following disclaimer.
16 1.1 jmmv * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 jmmv * notice, this list of conditions and the following disclaimer in the
18 1.1 jmmv * documentation and/or other materials provided with the distribution.
19 1.1 jmmv *
20 1.1 jmmv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 jmmv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 jmmv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 jmmv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 jmmv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 jmmv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 jmmv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 jmmv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 jmmv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 jmmv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 jmmv * POSSIBILITY OF SUCH DAMAGE.
31 1.1 jmmv */
32 1.1 jmmv
33 1.1 jmmv /*
34 1.2 jmmv * Efficient memory file system.
35 1.6 jmmv *
36 1.6 jmmv * tmpfs is a file system that uses NetBSD's virtual memory sub-system
37 1.6 jmmv * (the well-known UVM) to store file data and metadata in an efficient
38 1.6 jmmv * way. This means that it does not follow the structure of an on-disk
39 1.6 jmmv * file system because it simply does not need to. Instead, it uses
40 1.6 jmmv * memory-specific data structures and algorithms to automatically
41 1.6 jmmv * allocate and release resources.
42 1.1 jmmv */
43 1.1 jmmv
44 1.1 jmmv #include <sys/cdefs.h>
45 1.65 hannken __KERNEL_RCSID(0, "$NetBSD: tmpfs_vfsops.c,v 1.65 2015/07/06 10:07:12 hannken Exp $");
46 1.1 jmmv
47 1.1 jmmv #include <sys/param.h>
48 1.65 hannken #include <sys/atomic.h>
49 1.1 jmmv #include <sys/types.h>
50 1.34 ad #include <sys/kmem.h>
51 1.1 jmmv #include <sys/mount.h>
52 1.8 jmmv #include <sys/stat.h>
53 1.1 jmmv #include <sys/systm.h>
54 1.1 jmmv #include <sys/vnode.h>
55 1.65 hannken #include <sys/kauth.h>
56 1.41 rumble #include <sys/module.h>
57 1.1 jmmv
58 1.36 dholland #include <miscfs/genfs/genfs.h>
59 1.1 jmmv #include <fs/tmpfs/tmpfs.h>
60 1.44 pooka #include <fs/tmpfs/tmpfs_args.h>
61 1.1 jmmv
62 1.41 rumble MODULE(MODULE_CLASS_VFS, tmpfs, NULL);
63 1.41 rumble
64 1.48 rmind struct pool tmpfs_dirent_pool;
65 1.48 rmind struct pool tmpfs_node_pool;
66 1.1 jmmv
67 1.64 hannken void
68 1.48 rmind tmpfs_init(void)
69 1.48 rmind {
70 1.48 rmind
71 1.50 rmind pool_init(&tmpfs_dirent_pool, sizeof(tmpfs_dirent_t), 0, 0, 0,
72 1.48 rmind "tmpfs_dirent", &pool_allocator_nointr, IPL_NONE);
73 1.50 rmind pool_init(&tmpfs_node_pool, sizeof(tmpfs_node_t), 0, 0, 0,
74 1.48 rmind "tmpfs_node", &pool_allocator_nointr, IPL_NONE);
75 1.48 rmind }
76 1.48 rmind
77 1.64 hannken void
78 1.48 rmind tmpfs_done(void)
79 1.48 rmind {
80 1.48 rmind
81 1.48 rmind pool_destroy(&tmpfs_dirent_pool);
82 1.48 rmind pool_destroy(&tmpfs_node_pool);
83 1.48 rmind }
84 1.1 jmmv
85 1.64 hannken int
86 1.32 pooka tmpfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
87 1.1 jmmv {
88 1.23 dsl struct tmpfs_args *args = data;
89 1.50 rmind tmpfs_mount_t *tmp;
90 1.50 rmind tmpfs_node_t *root;
91 1.65 hannken struct vattr va;
92 1.65 hannken struct vnode *vp;
93 1.45 rmind uint64_t memlimit;
94 1.45 rmind ino_t nodes;
95 1.45 rmind int error;
96 1.23 dsl
97 1.59 maxv if (args == NULL)
98 1.59 maxv return EINVAL;
99 1.59 maxv
100 1.49 rmind /* Validate the version. */
101 1.49 rmind if (*data_len < sizeof(*args) ||
102 1.49 rmind args->ta_version != TMPFS_ARGS_VERSION)
103 1.23 dsl return EINVAL;
104 1.1 jmmv
105 1.1 jmmv /* Handle retrieval of mount point arguments. */
106 1.1 jmmv if (mp->mnt_flag & MNT_GETARGS) {
107 1.1 jmmv if (mp->mnt_data == NULL)
108 1.1 jmmv return EIO;
109 1.1 jmmv tmp = VFS_TO_TMPFS(mp);
110 1.1 jmmv
111 1.23 dsl args->ta_version = TMPFS_ARGS_VERSION;
112 1.23 dsl args->ta_nodes_max = tmp->tm_nodes_max;
113 1.45 rmind args->ta_size_max = tmp->tm_mem_limit;
114 1.31 ad
115 1.30 ad root = tmp->tm_root;
116 1.23 dsl args->ta_root_uid = root->tn_uid;
117 1.23 dsl args->ta_root_gid = root->tn_gid;
118 1.23 dsl args->ta_root_mode = root->tn_mode;
119 1.1 jmmv
120 1.45 rmind *data_len = sizeof(*args);
121 1.23 dsl return 0;
122 1.1 jmmv }
123 1.1 jmmv
124 1.1 jmmv
125 1.49 rmind /* Prohibit mounts if there is not enough memory. */
126 1.62 martin if (tmpfs_mem_info(true) < uvmexp.freetarg)
127 1.1 jmmv return EINVAL;
128 1.1 jmmv
129 1.63 martin /* Check for invalid uid and gid arguments */
130 1.63 martin if (args->ta_root_uid == VNOVAL || args->ta_root_gid == VNOVAL)
131 1.63 martin return EINVAL;
132 1.63 martin
133 1.63 martin /* This can never happen? */
134 1.63 martin if ((args->ta_root_mode & ALLPERMS) == VNOVAL)
135 1.63 martin return EINVAL;
136 1.63 martin
137 1.45 rmind /* Get the memory usage limit for this file-system. */
138 1.45 rmind if (args->ta_size_max < PAGE_SIZE) {
139 1.45 rmind memlimit = UINT64_MAX;
140 1.45 rmind } else {
141 1.45 rmind memlimit = args->ta_size_max;
142 1.45 rmind }
143 1.45 rmind KASSERT(memlimit > 0);
144 1.45 rmind
145 1.45 rmind if (args->ta_nodes_max <= 3) {
146 1.45 rmind nodes = 3 + (memlimit / 1024);
147 1.45 rmind } else {
148 1.23 dsl nodes = args->ta_nodes_max;
149 1.45 rmind }
150 1.45 rmind nodes = MIN(nodes, INT_MAX);
151 1.7 jmmv KASSERT(nodes >= 3);
152 1.1 jmmv
153 1.60 christos if (mp->mnt_flag & MNT_UPDATE) {
154 1.60 christos tmp = VFS_TO_TMPFS(mp);
155 1.60 christos if (nodes < tmp->tm_nodes_cnt)
156 1.60 christos return EBUSY;
157 1.60 christos if ((error = tmpfs_mntmem_set(tmp, memlimit)) != 0)
158 1.60 christos return error;
159 1.60 christos tmp->tm_nodes_max = nodes;
160 1.61 christos root = tmp->tm_root;
161 1.60 christos root->tn_uid = args->ta_root_uid;
162 1.60 christos root->tn_gid = args->ta_root_gid;
163 1.60 christos root->tn_mode = args->ta_root_mode;
164 1.60 christos return 0;
165 1.60 christos }
166 1.60 christos
167 1.1 jmmv /* Allocate the tmpfs mount structure and fill it. */
168 1.50 rmind tmp = kmem_zalloc(sizeof(tmpfs_mount_t), KM_SLEEP);
169 1.34 ad if (tmp == NULL)
170 1.34 ad return ENOMEM;
171 1.1 jmmv
172 1.1 jmmv tmp->tm_nodes_max = nodes;
173 1.34 ad tmp->tm_nodes_cnt = 0;
174 1.34 ad LIST_INIT(&tmp->tm_nodes);
175 1.34 ad
176 1.34 ad mutex_init(&tmp->tm_lock, MUTEX_DEFAULT, IPL_NONE);
177 1.45 rmind tmpfs_mntmem_init(tmp, memlimit);
178 1.65 hannken mp->mnt_data = tmp;
179 1.1 jmmv
180 1.1 jmmv /* Allocate the root node. */
181 1.65 hannken vattr_null(&va);
182 1.65 hannken va.va_type = VDIR;
183 1.65 hannken va.va_mode = args->ta_root_mode & ALLPERMS;
184 1.65 hannken va.va_uid = args->ta_root_uid;
185 1.65 hannken va.va_gid = args->ta_root_gid;
186 1.65 hannken error = vcache_new(mp, NULL, &va, NOCRED, &vp);
187 1.65 hannken root = VP_TO_TMPFS_NODE(vp);
188 1.1 jmmv KASSERT(error == 0 && root != NULL);
189 1.51 rmind
190 1.51 rmind /*
191 1.51 rmind * Parent of the root inode is itself. Also, root inode has no
192 1.51 rmind * directory entry (i.e. is never attached), thus hold an extra
193 1.51 rmind * reference (link) for it.
194 1.51 rmind */
195 1.34 ad root->tn_links++;
196 1.51 rmind root->tn_spec.tn_dir.tn_parent = root;
197 1.1 jmmv tmp->tm_root = root;
198 1.65 hannken vrele(vp);
199 1.1 jmmv
200 1.1 jmmv mp->mnt_flag |= MNT_LOCAL;
201 1.52 christos mp->mnt_stat.f_namemax = TMPFS_MAXNAMLEN;
202 1.27 pooka mp->mnt_fs_bshift = PAGE_SHIFT;
203 1.27 pooka mp->mnt_dev_bshift = DEV_BSHIFT;
204 1.34 ad mp->mnt_iflag |= IMNT_MPSAFE;
205 1.1 jmmv vfs_getnewfsid(mp);
206 1.1 jmmv
207 1.48 rmind error = set_statvfs_info(path, UIO_USERSPACE, "tmpfs", UIO_SYSSPACE,
208 1.45 rmind mp->mnt_op->vfs_name, mp, curlwp);
209 1.48 rmind if (error) {
210 1.48 rmind (void)tmpfs_unmount(mp, MNT_FORCE);
211 1.48 rmind }
212 1.48 rmind return error;
213 1.1 jmmv }
214 1.1 jmmv
215 1.64 hannken int
216 1.32 pooka tmpfs_start(struct mount *mp, int flags)
217 1.1 jmmv {
218 1.1 jmmv
219 1.1 jmmv return 0;
220 1.1 jmmv }
221 1.1 jmmv
222 1.64 hannken int
223 1.32 pooka tmpfs_unmount(struct mount *mp, int mntflags)
224 1.1 jmmv {
225 1.53 rmind tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp);
226 1.53 rmind tmpfs_node_t *node, *cnode;
227 1.48 rmind int error, flags = 0;
228 1.1 jmmv
229 1.1 jmmv /* Handle forced unmounts. */
230 1.1 jmmv if (mntflags & MNT_FORCE)
231 1.1 jmmv flags |= FORCECLOSE;
232 1.1 jmmv
233 1.1 jmmv /* Finalize all pending I/O. */
234 1.1 jmmv error = vflush(mp, NULL, flags);
235 1.1 jmmv if (error != 0)
236 1.1 jmmv return error;
237 1.1 jmmv
238 1.53 rmind /*
239 1.53 rmind * First round, detach and destroy all directory entries.
240 1.53 rmind * Also, clear the pointers to the vnodes - they are gone.
241 1.53 rmind */
242 1.53 rmind LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) {
243 1.53 rmind tmpfs_dirent_t *de;
244 1.1 jmmv
245 1.53 rmind node->tn_vnode = NULL;
246 1.53 rmind if (node->tn_type != VDIR) {
247 1.53 rmind continue;
248 1.53 rmind }
249 1.53 rmind while ((de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir)) != NULL) {
250 1.54 rmind cnode = de->td_node;
251 1.54 rmind if (cnode && cnode != TMPFS_NODE_WHITEOUT) {
252 1.53 rmind cnode->tn_vnode = NULL;
253 1.1 jmmv }
254 1.53 rmind tmpfs_dir_detach(node, de);
255 1.53 rmind tmpfs_free_dirent(tmp, de);
256 1.1 jmmv }
257 1.55 rmind /* Extra virtual entry (itself for the root). */
258 1.55 rmind node->tn_links--;
259 1.53 rmind }
260 1.53 rmind
261 1.55 rmind /* Release the reference on root (diagnostic). */
262 1.55 rmind node = tmp->tm_root;
263 1.55 rmind node->tn_links--;
264 1.55 rmind
265 1.53 rmind /* Second round, destroy all inodes. */
266 1.53 rmind while ((node = LIST_FIRST(&tmp->tm_nodes)) != NULL) {
267 1.1 jmmv tmpfs_free_node(tmp, node);
268 1.31 ad }
269 1.1 jmmv
270 1.1 jmmv /* Throw away the tmpfs_mount structure. */
271 1.45 rmind tmpfs_mntmem_destroy(tmp);
272 1.34 ad mutex_destroy(&tmp->tm_lock);
273 1.34 ad kmem_free(tmp, sizeof(*tmp));
274 1.1 jmmv mp->mnt_data = NULL;
275 1.1 jmmv
276 1.1 jmmv return 0;
277 1.1 jmmv }
278 1.1 jmmv
279 1.64 hannken int
280 1.50 rmind tmpfs_root(struct mount *mp, vnode_t **vpp)
281 1.1 jmmv {
282 1.51 rmind tmpfs_node_t *node = VFS_TO_TMPFS(mp)->tm_root;
283 1.65 hannken int error;
284 1.65 hannken
285 1.65 hannken error = vcache_get(mp, &node, sizeof(node), vpp);
286 1.65 hannken if (error)
287 1.65 hannken return error;
288 1.65 hannken error = vn_lock(*vpp, LK_EXCLUSIVE);
289 1.65 hannken if (error) {
290 1.65 hannken vrele(*vpp);
291 1.65 hannken *vpp = NULL;
292 1.65 hannken return error;
293 1.65 hannken }
294 1.1 jmmv
295 1.65 hannken return 0;
296 1.1 jmmv }
297 1.1 jmmv
298 1.64 hannken int
299 1.50 rmind tmpfs_vget(struct mount *mp, ino_t ino, vnode_t **vpp)
300 1.1 jmmv {
301 1.1 jmmv
302 1.1 jmmv return EOPNOTSUPP;
303 1.1 jmmv }
304 1.1 jmmv
305 1.64 hannken int
306 1.50 rmind tmpfs_fhtovp(struct mount *mp, struct fid *fhp, vnode_t **vpp)
307 1.1 jmmv {
308 1.51 rmind tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp);
309 1.50 rmind tmpfs_node_t *node;
310 1.50 rmind tmpfs_fid_t tfh;
311 1.56 hannken int error;
312 1.1 jmmv
313 1.51 rmind if (fhp->fid_len != sizeof(tmpfs_fid_t)) {
314 1.1 jmmv return EINVAL;
315 1.51 rmind }
316 1.50 rmind memcpy(&tfh, fhp, sizeof(tmpfs_fid_t));
317 1.13 martin
318 1.34 ad mutex_enter(&tmp->tm_lock);
319 1.34 ad LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) {
320 1.56 hannken if (node->tn_id == tfh.tf_id) {
321 1.65 hannken /* Prevent this node from disappearing. */
322 1.65 hannken atomic_inc_32(&node->tn_holdcount);
323 1.56 hannken break;
324 1.1 jmmv }
325 1.1 jmmv }
326 1.34 ad mutex_exit(&tmp->tm_lock);
327 1.56 hannken if (node == NULL)
328 1.56 hannken return ESTALE;
329 1.65 hannken
330 1.65 hannken error = vcache_get(mp, &node, sizeof(node), vpp);
331 1.65 hannken /* If this node has been reclaimed free it now. */
332 1.65 hannken if (atomic_dec_32_nv(&node->tn_holdcount) == TMPFS_NODE_RECLAIMED) {
333 1.65 hannken KASSERT(error != 0);
334 1.65 hannken tmpfs_free_node(tmp, node);
335 1.65 hannken }
336 1.65 hannken if (error)
337 1.65 hannken return (error == ENOENT ? ESTALE : error);
338 1.65 hannken error = vn_lock(*vpp, LK_EXCLUSIVE);
339 1.65 hannken if (error) {
340 1.65 hannken vrele(*vpp);
341 1.65 hannken *vpp = NULL;
342 1.56 hannken return error;
343 1.65 hannken }
344 1.56 hannken if (TMPFS_NODE_GEN(node) != tfh.tf_gen) {
345 1.56 hannken vput(*vpp);
346 1.56 hannken *vpp = NULL;
347 1.56 hannken return ESTALE;
348 1.56 hannken }
349 1.56 hannken
350 1.56 hannken return 0;
351 1.1 jmmv }
352 1.1 jmmv
353 1.64 hannken int
354 1.50 rmind tmpfs_vptofh(vnode_t *vp, struct fid *fhp, size_t *fh_size)
355 1.1 jmmv {
356 1.50 rmind tmpfs_fid_t tfh;
357 1.50 rmind tmpfs_node_t *node;
358 1.1 jmmv
359 1.50 rmind if (*fh_size < sizeof(tmpfs_fid_t)) {
360 1.50 rmind *fh_size = sizeof(tmpfs_fid_t);
361 1.13 martin return E2BIG;
362 1.13 martin }
363 1.50 rmind *fh_size = sizeof(tmpfs_fid_t);
364 1.1 jmmv node = VP_TO_TMPFS_NODE(vp);
365 1.1 jmmv
366 1.13 martin memset(&tfh, 0, sizeof(tfh));
367 1.50 rmind tfh.tf_len = sizeof(tmpfs_fid_t);
368 1.51 rmind tfh.tf_gen = TMPFS_NODE_GEN(node);
369 1.13 martin tfh.tf_id = node->tn_id;
370 1.13 martin memcpy(fhp, &tfh, sizeof(tfh));
371 1.1 jmmv
372 1.1 jmmv return 0;
373 1.1 jmmv }
374 1.1 jmmv
375 1.64 hannken int
376 1.32 pooka tmpfs_statvfs(struct mount *mp, struct statvfs *sbp)
377 1.1 jmmv {
378 1.50 rmind tmpfs_mount_t *tmp;
379 1.34 ad fsfilcnt_t freenodes;
380 1.46 rmind size_t avail;
381 1.1 jmmv
382 1.1 jmmv tmp = VFS_TO_TMPFS(mp);
383 1.1 jmmv
384 1.1 jmmv sbp->f_iosize = sbp->f_frsize = sbp->f_bsize = PAGE_SIZE;
385 1.1 jmmv
386 1.46 rmind mutex_enter(&tmp->tm_acc_lock);
387 1.46 rmind avail = tmpfs_pages_avail(tmp);
388 1.45 rmind sbp->f_blocks = (tmpfs_bytes_max(tmp) >> PAGE_SHIFT);
389 1.46 rmind sbp->f_bavail = sbp->f_bfree = avail;
390 1.1 jmmv sbp->f_bresvd = 0;
391 1.1 jmmv
392 1.34 ad freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_cnt,
393 1.50 rmind avail * PAGE_SIZE / sizeof(tmpfs_node_t));
394 1.31 ad
395 1.34 ad sbp->f_files = tmp->tm_nodes_cnt + freenodes;
396 1.1 jmmv sbp->f_favail = sbp->f_ffree = freenodes;
397 1.1 jmmv sbp->f_fresvd = 0;
398 1.46 rmind mutex_exit(&tmp->tm_acc_lock);
399 1.1 jmmv
400 1.1 jmmv copy_statvfs_info(sbp, mp);
401 1.1 jmmv
402 1.1 jmmv return 0;
403 1.1 jmmv }
404 1.1 jmmv
405 1.64 hannken int
406 1.50 rmind tmpfs_sync(struct mount *mp, int waitfor, kauth_cred_t uc)
407 1.1 jmmv {
408 1.1 jmmv
409 1.1 jmmv return 0;
410 1.1 jmmv }
411 1.1 jmmv
412 1.64 hannken int
413 1.50 rmind tmpfs_snapshot(struct mount *mp, vnode_t *vp, struct timespec *ctime)
414 1.1 jmmv {
415 1.1 jmmv
416 1.1 jmmv return EOPNOTSUPP;
417 1.1 jmmv }
418 1.1 jmmv
419 1.1 jmmv /*
420 1.1 jmmv * tmpfs vfs operations.
421 1.1 jmmv */
422 1.1 jmmv
423 1.1 jmmv extern const struct vnodeopv_desc tmpfs_fifoop_opv_desc;
424 1.1 jmmv extern const struct vnodeopv_desc tmpfs_specop_opv_desc;
425 1.1 jmmv extern const struct vnodeopv_desc tmpfs_vnodeop_opv_desc;
426 1.1 jmmv
427 1.1 jmmv const struct vnodeopv_desc * const tmpfs_vnodeopv_descs[] = {
428 1.1 jmmv &tmpfs_fifoop_opv_desc,
429 1.1 jmmv &tmpfs_specop_opv_desc,
430 1.1 jmmv &tmpfs_vnodeop_opv_desc,
431 1.1 jmmv NULL,
432 1.1 jmmv };
433 1.1 jmmv
434 1.1 jmmv struct vfsops tmpfs_vfsops = {
435 1.58 hannken .vfs_name = MOUNT_TMPFS,
436 1.58 hannken .vfs_min_mount_data = sizeof (struct tmpfs_args),
437 1.58 hannken .vfs_mount = tmpfs_mount,
438 1.58 hannken .vfs_start = tmpfs_start,
439 1.58 hannken .vfs_unmount = tmpfs_unmount,
440 1.58 hannken .vfs_root = tmpfs_root,
441 1.58 hannken .vfs_quotactl = (void *)eopnotsupp,
442 1.58 hannken .vfs_statvfs = tmpfs_statvfs,
443 1.58 hannken .vfs_sync = tmpfs_sync,
444 1.58 hannken .vfs_vget = tmpfs_vget,
445 1.65 hannken .vfs_loadvnode = tmpfs_loadvnode,
446 1.65 hannken .vfs_newvnode = tmpfs_newvnode,
447 1.58 hannken .vfs_fhtovp = tmpfs_fhtovp,
448 1.58 hannken .vfs_vptofh = tmpfs_vptofh,
449 1.58 hannken .vfs_init = tmpfs_init,
450 1.58 hannken .vfs_done = tmpfs_done,
451 1.58 hannken .vfs_snapshot = tmpfs_snapshot,
452 1.58 hannken .vfs_extattrctl = vfs_stdextattrctl,
453 1.58 hannken .vfs_suspendctl = (void *)eopnotsupp,
454 1.58 hannken .vfs_renamelock_enter = genfs_renamelock_enter,
455 1.58 hannken .vfs_renamelock_exit = genfs_renamelock_exit,
456 1.58 hannken .vfs_fsync = (void *)eopnotsupp,
457 1.58 hannken .vfs_opv_descs = tmpfs_vnodeopv_descs
458 1.1 jmmv };
459 1.41 rumble
460 1.41 rumble static int
461 1.41 rumble tmpfs_modcmd(modcmd_t cmd, void *arg)
462 1.41 rumble {
463 1.41 rumble
464 1.41 rumble switch (cmd) {
465 1.41 rumble case MODULE_CMD_INIT:
466 1.41 rumble return vfs_attach(&tmpfs_vfsops);
467 1.41 rumble case MODULE_CMD_FINI:
468 1.41 rumble return vfs_detach(&tmpfs_vfsops);
469 1.41 rumble default:
470 1.41 rumble return ENOTTY;
471 1.41 rumble }
472 1.41 rumble }
473