tmpfs_subr.c revision 1.5 1 1.5 yamt /* $NetBSD: tmpfs_subr.c,v 1.5 2005/09/15 12:34:35 yamt Exp $ */
2 1.1 jmmv
3 1.1 jmmv /*
4 1.1 jmmv * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 1.1 jmmv * All rights reserved.
6 1.1 jmmv *
7 1.1 jmmv * This code is derived from software contributed to The NetBSD Foundation
8 1.1 jmmv * by Julio M. Merino Vidal.
9 1.1 jmmv *
10 1.1 jmmv * Redistribution and use in source and binary forms, with or without
11 1.1 jmmv * modification, are permitted provided that the following conditions
12 1.1 jmmv * are met:
13 1.1 jmmv * 1. Redistributions of source code must retain the above copyright
14 1.1 jmmv * notice, this list of conditions and the following disclaimer.
15 1.1 jmmv * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 jmmv * notice, this list of conditions and the following disclaimer in the
17 1.1 jmmv * documentation and/or other materials provided with the distribution.
18 1.1 jmmv * 3. All advertising materials mentioning features or use of this software
19 1.1 jmmv * must display the following acknowledgement:
20 1.1 jmmv * This product includes software developed by the NetBSD
21 1.1 jmmv * Foundation, Inc. and its contributors.
22 1.1 jmmv * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 jmmv * contributors may be used to endorse or promote products derived
24 1.1 jmmv * from this software without specific prior written permission.
25 1.1 jmmv *
26 1.1 jmmv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 jmmv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 jmmv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 jmmv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 jmmv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 jmmv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 jmmv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 jmmv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 jmmv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 jmmv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 jmmv * POSSIBILITY OF SUCH DAMAGE.
37 1.1 jmmv */
38 1.1 jmmv
39 1.1 jmmv /*
40 1.2 jmmv * Efficient memory file system supporting functions.
41 1.1 jmmv */
42 1.1 jmmv
43 1.1 jmmv #include <sys/cdefs.h>
44 1.5 yamt __KERNEL_RCSID(0, "$NetBSD: tmpfs_subr.c,v 1.5 2005/09/15 12:34:35 yamt Exp $");
45 1.1 jmmv
46 1.1 jmmv #include <sys/param.h>
47 1.1 jmmv #include <sys/dirent.h>
48 1.1 jmmv #include <sys/event.h>
49 1.1 jmmv #include <sys/malloc.h>
50 1.1 jmmv #include <sys/mount.h>
51 1.1 jmmv #include <sys/namei.h>
52 1.1 jmmv #include <sys/time.h>
53 1.1 jmmv #include <sys/stat.h>
54 1.1 jmmv #include <sys/systm.h>
55 1.1 jmmv #include <sys/swap.h>
56 1.1 jmmv #include <sys/vnode.h>
57 1.1 jmmv
58 1.1 jmmv #include <uvm/uvm.h>
59 1.1 jmmv
60 1.1 jmmv #include <miscfs/specfs/specdev.h>
61 1.1 jmmv #include <fs/tmpfs/tmpfs.h>
62 1.1 jmmv #include <fs/tmpfs/tmpfs_fifoops.h>
63 1.1 jmmv #include <fs/tmpfs/tmpfs_specops.h>
64 1.1 jmmv #include <fs/tmpfs/tmpfs_vnops.h>
65 1.1 jmmv
66 1.1 jmmv /* --------------------------------------------------------------------- */
67 1.1 jmmv
68 1.1 jmmv int
69 1.1 jmmv tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type,
70 1.1 jmmv uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
71 1.1 jmmv char *target, dev_t rdev, struct proc *p, struct tmpfs_node **node)
72 1.1 jmmv {
73 1.1 jmmv struct tmpfs_node *nnode;
74 1.1 jmmv
75 1.2 jmmv /* If the root directory of the 'tmp' file system is not yet
76 1.1 jmmv * allocated, this must be the request to do it. */
77 1.1 jmmv KASSERT(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
78 1.1 jmmv
79 1.1 jmmv KASSERT(IFF(type == VLNK, target != NULL));
80 1.1 jmmv KASSERT(IFF(type == VBLK || type == VCHR, rdev != VNOVAL));
81 1.1 jmmv
82 1.1 jmmv KASSERT(uid != VNOVAL && gid != VNOVAL && mode != VNOVAL);
83 1.1 jmmv
84 1.1 jmmv nnode = NULL;
85 1.1 jmmv if (LIST_EMPTY(&tmp->tm_nodes_avail)) {
86 1.1 jmmv KASSERT(tmp->tm_nodes_last <= tmp->tm_nodes_max);
87 1.1 jmmv if (tmp->tm_nodes_last == tmp->tm_nodes_max)
88 1.1 jmmv return ENOSPC;
89 1.1 jmmv
90 1.1 jmmv nnode =
91 1.1 jmmv (struct tmpfs_node *)TMPFS_POOL_GET(&tmp->tm_node_pool, 0);
92 1.1 jmmv if (nnode == NULL)
93 1.1 jmmv return ENOSPC;
94 1.1 jmmv nnode->tn_id = tmp->tm_nodes_last++;
95 1.1 jmmv nnode->tn_gen = 0;
96 1.1 jmmv } else {
97 1.1 jmmv nnode = LIST_FIRST(&tmp->tm_nodes_avail);
98 1.1 jmmv LIST_REMOVE(nnode, tn_entries);
99 1.1 jmmv nnode->tn_gen++;
100 1.1 jmmv }
101 1.1 jmmv KASSERT(nnode != NULL);
102 1.1 jmmv LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries);
103 1.1 jmmv
104 1.1 jmmv /* Generic initialization. */
105 1.1 jmmv nnode->tn_type = type;
106 1.1 jmmv nnode->tn_size = 0;
107 1.1 jmmv nnode->tn_status = 0;
108 1.1 jmmv nnode->tn_flags = 0;
109 1.1 jmmv nnode->tn_links = 0;
110 1.3 christos (void)nanotime(&nnode->tn_atime);
111 1.1 jmmv nnode->tn_birthtime = nnode->tn_ctime = nnode->tn_mtime =
112 1.1 jmmv nnode->tn_atime;
113 1.1 jmmv nnode->tn_uid = uid;
114 1.1 jmmv nnode->tn_gid = gid;
115 1.1 jmmv nnode->tn_mode = mode;
116 1.1 jmmv nnode->tn_vnode = NULL;
117 1.1 jmmv
118 1.1 jmmv /* Type-specific initialization. */
119 1.1 jmmv switch (nnode->tn_type) {
120 1.1 jmmv case VBLK:
121 1.1 jmmv case VCHR:
122 1.1 jmmv nnode->tn_rdev = rdev;
123 1.1 jmmv break;
124 1.1 jmmv
125 1.1 jmmv case VDIR:
126 1.1 jmmv TAILQ_INIT(&nnode->tn_dir);
127 1.1 jmmv nnode->tn_parent = (parent == NULL) ? nnode : parent;
128 1.1 jmmv nnode->tn_readdir_lastn = 0;
129 1.1 jmmv nnode->tn_readdir_lastp = NULL;
130 1.1 jmmv nnode->tn_links++;
131 1.1 jmmv nnode->tn_parent->tn_links++;
132 1.1 jmmv break;
133 1.1 jmmv
134 1.1 jmmv case VFIFO:
135 1.1 jmmv /* FALLTHROUGH */
136 1.1 jmmv case VSOCK:
137 1.1 jmmv break;
138 1.1 jmmv
139 1.1 jmmv case VLNK:
140 1.1 jmmv KASSERT(strlen(target) < MAXPATHLEN);
141 1.1 jmmv nnode->tn_link = tmpfs_str_pool_get(&tmp->tm_str_pool,
142 1.1 jmmv strlen(target), 0);
143 1.1 jmmv if (nnode->tn_link == NULL) {
144 1.1 jmmv nnode->tn_type = VNON;
145 1.1 jmmv tmpfs_free_node(tmp, nnode);
146 1.1 jmmv return ENOSPC;
147 1.1 jmmv }
148 1.1 jmmv strcpy(nnode->tn_link, target);
149 1.1 jmmv nnode->tn_size = strlen(target);
150 1.1 jmmv break;
151 1.1 jmmv
152 1.1 jmmv case VREG:
153 1.4 yamt nnode->tn_aobj = uao_create(INT32_MAX - PAGE_SIZE, 0);
154 1.1 jmmv nnode->tn_aobj_pages = 0;
155 1.1 jmmv break;
156 1.1 jmmv
157 1.1 jmmv default:
158 1.1 jmmv KASSERT(0);
159 1.1 jmmv }
160 1.1 jmmv
161 1.1 jmmv *node = nnode;
162 1.1 jmmv return 0;
163 1.1 jmmv }
164 1.1 jmmv
165 1.1 jmmv /* --------------------------------------------------------------------- */
166 1.1 jmmv
167 1.1 jmmv void
168 1.1 jmmv tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
169 1.1 jmmv {
170 1.1 jmmv ino_t id;
171 1.1 jmmv unsigned long gen;
172 1.1 jmmv size_t pages;
173 1.1 jmmv
174 1.1 jmmv switch (node->tn_type) {
175 1.1 jmmv case VNON:
176 1.1 jmmv /* Do not do anything. VNON is provided to let the
177 1.1 jmmv * allocation routine clean itself easily by avoiding
178 1.1 jmmv * duplicating code in it. */
179 1.1 jmmv /* FALLTHROUGH */
180 1.1 jmmv case VBLK:
181 1.1 jmmv /* FALLTHROUGH */
182 1.1 jmmv case VCHR:
183 1.1 jmmv /* FALLTHROUGH */
184 1.1 jmmv case VDIR:
185 1.1 jmmv /* FALLTHROUGH */
186 1.1 jmmv case VFIFO:
187 1.1 jmmv /* FALLTHROUGH */
188 1.1 jmmv case VSOCK:
189 1.1 jmmv pages = 0;
190 1.1 jmmv break;
191 1.1 jmmv
192 1.1 jmmv case VLNK:
193 1.1 jmmv tmpfs_str_pool_put(&tmp->tm_str_pool, node->tn_link,
194 1.1 jmmv strlen(node->tn_link));
195 1.1 jmmv pages = 0;
196 1.1 jmmv break;
197 1.1 jmmv
198 1.1 jmmv case VREG:
199 1.1 jmmv if (node->tn_aobj != NULL)
200 1.1 jmmv uao_detach(node->tn_aobj);
201 1.1 jmmv pages = node->tn_aobj_pages;
202 1.1 jmmv break;
203 1.1 jmmv
204 1.1 jmmv default:
205 1.1 jmmv KASSERT(0);
206 1.1 jmmv pages = 0; /* Shut up gcc when !DIAGNOSTIC. */
207 1.1 jmmv break;
208 1.1 jmmv }
209 1.1 jmmv
210 1.1 jmmv tmp->tm_pages_used -= pages;
211 1.1 jmmv
212 1.1 jmmv LIST_REMOVE(node, tn_entries);
213 1.1 jmmv id = node->tn_id;
214 1.1 jmmv gen = node->tn_gen;
215 1.1 jmmv memset(node, 0, sizeof(struct tmpfs_node));
216 1.1 jmmv node->tn_id = id;
217 1.1 jmmv node->tn_type = VNON;
218 1.1 jmmv node->tn_gen = gen;
219 1.1 jmmv LIST_INSERT_HEAD(&tmp->tm_nodes_avail, node, tn_entries);
220 1.1 jmmv }
221 1.1 jmmv
222 1.1 jmmv /* --------------------------------------------------------------------- */
223 1.1 jmmv
224 1.1 jmmv int
225 1.1 jmmv tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
226 1.1 jmmv const char *name, uint16_t len, struct tmpfs_dirent **de)
227 1.1 jmmv {
228 1.1 jmmv struct tmpfs_dirent *nde;
229 1.1 jmmv
230 1.1 jmmv nde = (struct tmpfs_dirent *)TMPFS_POOL_GET(&tmp->tm_dirent_pool, 0);
231 1.1 jmmv if (nde == NULL)
232 1.1 jmmv return ENOSPC;
233 1.1 jmmv
234 1.1 jmmv nde->td_name = tmpfs_str_pool_get(&tmp->tm_str_pool, len, 0);
235 1.1 jmmv if (nde->td_name == NULL) {
236 1.1 jmmv TMPFS_POOL_PUT(&tmp->tm_dirent_pool, nde);
237 1.1 jmmv return ENOSPC;
238 1.1 jmmv }
239 1.1 jmmv nde->td_namelen = len;
240 1.1 jmmv memcpy(nde->td_name, name, len);
241 1.1 jmmv nde->td_node = node;
242 1.1 jmmv
243 1.1 jmmv node->tn_links++;
244 1.1 jmmv *de = nde;
245 1.1 jmmv
246 1.1 jmmv return 0;
247 1.1 jmmv }
248 1.1 jmmv
249 1.1 jmmv /* --------------------------------------------------------------------- */
250 1.1 jmmv
251 1.1 jmmv void
252 1.1 jmmv tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de,
253 1.1 jmmv boolean_t node_exists)
254 1.1 jmmv {
255 1.1 jmmv if (node_exists) {
256 1.1 jmmv struct tmpfs_node *node;
257 1.1 jmmv
258 1.1 jmmv node = de->td_node;
259 1.1 jmmv
260 1.1 jmmv KASSERT(node->tn_links > 0);
261 1.1 jmmv node->tn_links--;
262 1.1 jmmv }
263 1.1 jmmv
264 1.1 jmmv tmpfs_str_pool_put(&tmp->tm_str_pool, de->td_name, de->td_namelen);
265 1.1 jmmv TMPFS_POOL_PUT(&tmp->tm_dirent_pool, de);
266 1.1 jmmv }
267 1.1 jmmv
268 1.1 jmmv /* --------------------------------------------------------------------- */
269 1.1 jmmv
270 1.1 jmmv int
271 1.1 jmmv tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, struct vnode **vpp)
272 1.1 jmmv {
273 1.1 jmmv int error;
274 1.1 jmmv struct vnode *nvp;
275 1.1 jmmv struct vnode *vp;
276 1.1 jmmv
277 1.1 jmmv vp = NULL;
278 1.1 jmmv
279 1.1 jmmv if (node->tn_vnode != NULL) {
280 1.1 jmmv vp = node->tn_vnode;
281 1.1 jmmv vget(vp, LK_EXCLUSIVE | LK_RETRY);
282 1.1 jmmv error = 0;
283 1.1 jmmv goto out;
284 1.1 jmmv }
285 1.1 jmmv
286 1.1 jmmv /* Get a new vnode and associate it with our node. */
287 1.1 jmmv error = getnewvnode(VT_TMPFS, mp, tmpfs_vnodeop_p, &vp);
288 1.1 jmmv if (error != 0)
289 1.1 jmmv goto out;
290 1.1 jmmv KASSERT(vp != NULL);
291 1.1 jmmv
292 1.1 jmmv error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
293 1.1 jmmv if (error != 0) {
294 1.1 jmmv vp->v_data = NULL;
295 1.1 jmmv ungetnewvnode(vp);
296 1.1 jmmv vp = NULL;
297 1.1 jmmv goto out;
298 1.1 jmmv }
299 1.1 jmmv
300 1.1 jmmv vp->v_data = node;
301 1.1 jmmv vp->v_type = node->tn_type;
302 1.1 jmmv
303 1.1 jmmv /* Type-specific initialization. */
304 1.1 jmmv switch (node->tn_type) {
305 1.1 jmmv case VBLK:
306 1.1 jmmv /* FALLTHROUGH */
307 1.1 jmmv case VCHR:
308 1.1 jmmv vp->v_op = tmpfs_specop_p;
309 1.1 jmmv nvp = checkalias(vp, node->tn_rdev, mp);
310 1.1 jmmv if (nvp != NULL) {
311 1.1 jmmv /* Discard unneeded vnode, but save its inode. */
312 1.1 jmmv nvp->v_data = vp->v_data;
313 1.1 jmmv vp->v_data = NULL;
314 1.1 jmmv
315 1.1 jmmv /* XXX spec_vnodeops has no locking, so we have to
316 1.1 jmmv * do it explicitly. */
317 1.1 jmmv VOP_UNLOCK(vp, 0);
318 1.1 jmmv vp->v_op = spec_vnodeop_p;
319 1.1 jmmv vp->v_flag &= ~VLOCKSWORK;
320 1.1 jmmv vrele(vp);
321 1.1 jmmv vgone(vp);
322 1.1 jmmv
323 1.1 jmmv /* Reinitialize aliased node. */
324 1.1 jmmv vp = nvp;
325 1.1 jmmv error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
326 1.1 jmmv if (error != 0) {
327 1.1 jmmv vp->v_data = NULL;
328 1.1 jmmv vp = NULL;
329 1.1 jmmv goto out;
330 1.1 jmmv }
331 1.1 jmmv }
332 1.1 jmmv break;
333 1.1 jmmv
334 1.1 jmmv case VDIR:
335 1.1 jmmv vp->v_flag = node->tn_parent == node ? VROOT : 0;
336 1.1 jmmv break;
337 1.1 jmmv
338 1.1 jmmv case VFIFO:
339 1.1 jmmv vp->v_op = tmpfs_fifoop_p;
340 1.1 jmmv break;
341 1.1 jmmv
342 1.1 jmmv case VLNK:
343 1.1 jmmv /* FALLTHROUGH */
344 1.1 jmmv case VREG:
345 1.1 jmmv /* FALLTHROUGH */
346 1.1 jmmv case VSOCK:
347 1.1 jmmv break;
348 1.1 jmmv
349 1.1 jmmv default:
350 1.1 jmmv KASSERT(0);
351 1.1 jmmv }
352 1.1 jmmv
353 1.1 jmmv uvm_vnp_setsize(vp, node->tn_size);
354 1.1 jmmv
355 1.1 jmmv error = 0;
356 1.1 jmmv
357 1.1 jmmv out:
358 1.1 jmmv *vpp = node->tn_vnode = vp;
359 1.1 jmmv
360 1.1 jmmv KASSERT(IFF(error == 0, *vpp != NULL && VOP_ISLOCKED(*vpp)));
361 1.1 jmmv KASSERT(*vpp == node->tn_vnode);
362 1.1 jmmv
363 1.1 jmmv return error;
364 1.1 jmmv }
365 1.1 jmmv
366 1.1 jmmv /* --------------------------------------------------------------------- */
367 1.1 jmmv
368 1.1 jmmv void
369 1.1 jmmv tmpfs_free_vp(struct vnode *vp)
370 1.1 jmmv {
371 1.1 jmmv struct tmpfs_node *node;
372 1.1 jmmv
373 1.1 jmmv node = VP_TO_TMPFS_NODE(vp);
374 1.1 jmmv
375 1.1 jmmv node->tn_vnode = NULL;
376 1.1 jmmv vp->v_data = NULL;
377 1.1 jmmv }
378 1.1 jmmv
379 1.1 jmmv /* --------------------------------------------------------------------- */
380 1.1 jmmv
381 1.1 jmmv /* Allocates a new file of type 'type' and adds it to the parent directory
382 1.1 jmmv * 'dvp'; this addition is done using the component name given in 'cnp'.
383 1.1 jmmv * The ownership of the new file is automatically assigned based on the
384 1.1 jmmv * credentials of the caller (through 'cnp'), the group is set based on
385 1.1 jmmv * the parent directory and the mode is determined from the 'vap' argument.
386 1.1 jmmv * If successful, *vpp holds a vnode to the newly created file and zero
387 1.1 jmmv * is returned. Otherwise *vpp is NULL and the function returns an
388 1.1 jmmv * appropriate error code .*/
389 1.1 jmmv int
390 1.1 jmmv tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
391 1.1 jmmv struct componentname *cnp, char *target)
392 1.1 jmmv {
393 1.1 jmmv int error;
394 1.1 jmmv struct tmpfs_dirent *de;
395 1.1 jmmv struct tmpfs_mount *tmp;
396 1.1 jmmv struct tmpfs_node *dnode;
397 1.1 jmmv struct tmpfs_node *node;
398 1.1 jmmv struct tmpfs_node *parent;
399 1.1 jmmv
400 1.1 jmmv KASSERT(VOP_ISLOCKED(dvp));
401 1.1 jmmv KASSERT(cnp->cn_flags & HASBUF);
402 1.1 jmmv
403 1.1 jmmv tmp = VFS_TO_TMPFS(dvp->v_mount);
404 1.1 jmmv dnode = VP_TO_TMPFS_DIR(dvp);
405 1.1 jmmv *vpp = NULL;
406 1.1 jmmv
407 1.1 jmmv /* If the entry we are creating is a directory, we cannot overflow
408 1.1 jmmv * the number of links of its parent, because it will get a new
409 1.1 jmmv * link. */
410 1.1 jmmv if (vap->va_type == VDIR) {
411 1.1 jmmv /* Ensure that we do not overflow the maximum number of links
412 1.1 jmmv * imposed by the system. */
413 1.1 jmmv KASSERT(dnode->tn_links <= LINK_MAX);
414 1.1 jmmv if (dnode->tn_links == LINK_MAX) {
415 1.1 jmmv error = EMLINK;
416 1.1 jmmv goto out;
417 1.1 jmmv }
418 1.1 jmmv
419 1.1 jmmv parent = dnode;
420 1.1 jmmv } else
421 1.1 jmmv parent = NULL;
422 1.1 jmmv
423 1.1 jmmv /* Allocate a node that represents the new file. */
424 1.1 jmmv error = tmpfs_alloc_node(tmp, vap->va_type, cnp->cn_cred->cr_uid,
425 1.1 jmmv dnode->tn_gid, vap->va_mode, parent, target, vap->va_rdev,
426 1.1 jmmv cnp->cn_proc, &node);
427 1.1 jmmv if (error != 0)
428 1.1 jmmv goto out;
429 1.1 jmmv
430 1.1 jmmv /* Allocate a directory entry that points to the new file. */
431 1.1 jmmv error = tmpfs_alloc_dirent(tmp, node, cnp->cn_nameptr, cnp->cn_namelen,
432 1.1 jmmv &de);
433 1.1 jmmv if (error != 0) {
434 1.1 jmmv tmpfs_free_node(tmp, node);
435 1.1 jmmv goto out;
436 1.1 jmmv }
437 1.1 jmmv
438 1.1 jmmv /* Allocate a vnode for the new file. */
439 1.1 jmmv error = tmpfs_alloc_vp(dvp->v_mount, node, vpp);
440 1.1 jmmv if (error != 0) {
441 1.1 jmmv tmpfs_free_dirent(tmp, de, TRUE);
442 1.1 jmmv tmpfs_free_node(tmp, node);
443 1.1 jmmv goto out;
444 1.1 jmmv }
445 1.1 jmmv
446 1.1 jmmv /* Now that all required items are allocated, we can proceed to
447 1.1 jmmv * insert the new node into the directory, an operation that
448 1.1 jmmv * cannot fail. */
449 1.1 jmmv tmpfs_dir_attach(dvp, de);
450 1.1 jmmv VN_KNOTE(dvp, NOTE_WRITE);
451 1.1 jmmv
452 1.1 jmmv out:
453 1.1 jmmv if (error != 0 || !(cnp->cn_flags & SAVESTART))
454 1.1 jmmv PNBUF_PUT(cnp->cn_pnbuf);
455 1.1 jmmv vput(dvp);
456 1.1 jmmv
457 1.1 jmmv KASSERT(!VOP_ISLOCKED(dvp));
458 1.1 jmmv KASSERT(IFF(error == 0, *vpp != NULL));
459 1.1 jmmv
460 1.1 jmmv return error;
461 1.1 jmmv }
462 1.1 jmmv
463 1.1 jmmv /* --------------------------------------------------------------------- */
464 1.1 jmmv
465 1.1 jmmv void
466 1.1 jmmv tmpfs_dir_attach(struct vnode *vp, struct tmpfs_dirent *de)
467 1.1 jmmv {
468 1.1 jmmv struct tmpfs_node *dnode;
469 1.1 jmmv
470 1.1 jmmv dnode = VP_TO_TMPFS_DIR(vp);
471 1.1 jmmv
472 1.1 jmmv TAILQ_INSERT_TAIL(&dnode->tn_dir, de, td_entries);
473 1.1 jmmv dnode->tn_size += sizeof(struct tmpfs_dirent);
474 1.1 jmmv dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
475 1.1 jmmv TMPFS_NODE_MODIFIED;
476 1.1 jmmv uvm_vnp_setsize(vp, dnode->tn_size);
477 1.1 jmmv }
478 1.1 jmmv
479 1.1 jmmv /* --------------------------------------------------------------------- */
480 1.1 jmmv
481 1.1 jmmv void
482 1.1 jmmv tmpfs_dir_detach(struct vnode *vp, struct tmpfs_dirent *de)
483 1.1 jmmv {
484 1.1 jmmv struct tmpfs_node *dnode;
485 1.1 jmmv
486 1.5 yamt KASSERT(VOP_ISLOCKED(vp));
487 1.5 yamt
488 1.1 jmmv dnode = VP_TO_TMPFS_DIR(vp);
489 1.1 jmmv
490 1.5 yamt if (dnode->tn_readdir_lastp == de) {
491 1.5 yamt dnode->tn_readdir_lastn = 0;
492 1.5 yamt dnode->tn_readdir_lastp = NULL;
493 1.5 yamt }
494 1.5 yamt
495 1.1 jmmv TAILQ_REMOVE(&dnode->tn_dir, de, td_entries);
496 1.1 jmmv dnode->tn_size -= sizeof(struct tmpfs_dirent);
497 1.1 jmmv dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
498 1.1 jmmv TMPFS_NODE_MODIFIED;
499 1.1 jmmv uvm_vnp_setsize(vp, dnode->tn_size);
500 1.1 jmmv }
501 1.1 jmmv
502 1.1 jmmv /* --------------------------------------------------------------------- */
503 1.1 jmmv
504 1.1 jmmv struct tmpfs_dirent *
505 1.1 jmmv tmpfs_dir_lookup(struct tmpfs_node *node, struct componentname *cnp)
506 1.1 jmmv {
507 1.1 jmmv boolean_t found;
508 1.1 jmmv struct tmpfs_dirent *de;
509 1.1 jmmv
510 1.1 jmmv KASSERT(IMPLIES(cnp->cn_namelen == 1, cnp->cn_nameptr[0] != '.'));
511 1.1 jmmv KASSERT(IMPLIES(cnp->cn_namelen == 2, !(cnp->cn_nameptr[0] == '.' &&
512 1.1 jmmv cnp->cn_nameptr[1] == '.')));
513 1.1 jmmv TMPFS_VALIDATE_DIR(node);
514 1.1 jmmv
515 1.1 jmmv node->tn_status |= TMPFS_NODE_ACCESSED;
516 1.1 jmmv
517 1.1 jmmv found = 0;
518 1.1 jmmv TAILQ_FOREACH(de, &node->tn_dir, td_entries) {
519 1.1 jmmv KASSERT(cnp->cn_namelen < 0xffff);
520 1.1 jmmv if (de->td_namelen == (uint16_t)cnp->cn_namelen &&
521 1.1 jmmv memcmp(de->td_name, cnp->cn_nameptr, de->td_namelen) == 0) {
522 1.1 jmmv found = 1;
523 1.1 jmmv break;
524 1.1 jmmv }
525 1.1 jmmv }
526 1.1 jmmv
527 1.1 jmmv return found ? de : NULL;
528 1.1 jmmv }
529 1.1 jmmv
530 1.1 jmmv /* --------------------------------------------------------------------- */
531 1.1 jmmv
532 1.1 jmmv /* Helper function for tmpfs_readdir. Creates a '.' entry for the given
533 1.1 jmmv * directory and returns it in the uio space. The function returns 0
534 1.1 jmmv * on success, -1 if there was not enough space in the uio structure to
535 1.1 jmmv * hold the directory entry or an appropriate error code if another
536 1.1 jmmv * error happens. */
537 1.1 jmmv int
538 1.1 jmmv tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
539 1.1 jmmv {
540 1.1 jmmv int error;
541 1.1 jmmv struct dirent dent;
542 1.1 jmmv
543 1.1 jmmv TMPFS_VALIDATE_DIR(node);
544 1.5 yamt KASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
545 1.1 jmmv
546 1.1 jmmv dent.d_fileno = node->tn_id;
547 1.1 jmmv dent.d_type = DT_DIR;
548 1.1 jmmv dent.d_namlen = 1;
549 1.1 jmmv dent.d_name[0] = '.';
550 1.1 jmmv dent.d_name[1] = '\0';
551 1.1 jmmv dent.d_reclen = _DIRENT_SIZE(&dent);
552 1.1 jmmv
553 1.1 jmmv if (dent.d_reclen > uio->uio_resid)
554 1.1 jmmv error = -1;
555 1.1 jmmv else {
556 1.1 jmmv error = uiomove(&dent, dent.d_reclen, uio);
557 1.1 jmmv if (error == 0)
558 1.5 yamt uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT;
559 1.1 jmmv }
560 1.1 jmmv
561 1.1 jmmv node->tn_status |= TMPFS_NODE_ACCESSED;
562 1.1 jmmv
563 1.1 jmmv return error;
564 1.1 jmmv }
565 1.1 jmmv
566 1.1 jmmv /* --------------------------------------------------------------------- */
567 1.1 jmmv
568 1.1 jmmv /* Helper function for tmpfs_readdir. Creates a '..' entry for the given
569 1.1 jmmv * directory and returns it in the uio space. The function returns 0
570 1.1 jmmv * on success, -1 if there was not enough space in the uio structure to
571 1.1 jmmv * hold the directory entry or an appropriate error code if another
572 1.1 jmmv * error happens. */
573 1.1 jmmv int
574 1.1 jmmv tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio)
575 1.1 jmmv {
576 1.1 jmmv int error;
577 1.1 jmmv struct dirent dent;
578 1.1 jmmv
579 1.1 jmmv TMPFS_VALIDATE_DIR(node);
580 1.5 yamt KASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
581 1.1 jmmv
582 1.1 jmmv dent.d_fileno = node->tn_id;
583 1.1 jmmv dent.d_type = DT_DIR;
584 1.1 jmmv dent.d_namlen = 2;
585 1.1 jmmv dent.d_name[0] = '.';
586 1.1 jmmv dent.d_name[1] = '.';
587 1.1 jmmv dent.d_name[2] = '\0';
588 1.1 jmmv dent.d_reclen = _DIRENT_SIZE(&dent);
589 1.1 jmmv
590 1.1 jmmv if (dent.d_reclen > uio->uio_resid)
591 1.1 jmmv error = -1;
592 1.1 jmmv else {
593 1.1 jmmv error = uiomove(&dent, dent.d_reclen, uio);
594 1.5 yamt if (error == 0) {
595 1.5 yamt struct tmpfs_dirent *de;
596 1.5 yamt
597 1.5 yamt de = TAILQ_FIRST(&node->tn_dir);
598 1.5 yamt if (de == NULL)
599 1.5 yamt uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
600 1.5 yamt else
601 1.5 yamt uio->uio_offset = TMPFS_DIRCOOKIE(de);
602 1.5 yamt }
603 1.1 jmmv }
604 1.1 jmmv
605 1.1 jmmv node->tn_status |= TMPFS_NODE_ACCESSED;
606 1.1 jmmv
607 1.1 jmmv return error;
608 1.1 jmmv }
609 1.1 jmmv
610 1.1 jmmv /* --------------------------------------------------------------------- */
611 1.1 jmmv
612 1.5 yamt /* lookup a directory entry by cookie */
613 1.5 yamt struct tmpfs_dirent *
614 1.5 yamt tmpfs_dir_lookupbycookie(struct tmpfs_node *node, off_t cookie)
615 1.5 yamt {
616 1.5 yamt struct tmpfs_dirent *de;
617 1.5 yamt
618 1.5 yamt if (cookie == node->tn_readdir_lastn &&
619 1.5 yamt node->tn_readdir_lastp != NULL) {
620 1.5 yamt return node->tn_readdir_lastp;
621 1.5 yamt }
622 1.5 yamt
623 1.5 yamt TAILQ_FOREACH(de, &node->tn_dir, td_entries) {
624 1.5 yamt if (TMPFS_DIRCOOKIE(de) == cookie) {
625 1.5 yamt break;
626 1.5 yamt }
627 1.5 yamt }
628 1.5 yamt
629 1.5 yamt return de;
630 1.5 yamt }
631 1.5 yamt
632 1.5 yamt /* --------------------------------------------------------------------- */
633 1.5 yamt
634 1.1 jmmv /* Helper function for tmpfs_readdir. Returns as much directory entries
635 1.1 jmmv * as can fit in the uio space. The read starts at uio->uio_offset.
636 1.1 jmmv * The function returns 0 on success, -1 if there was not enough space
637 1.1 jmmv * in the uio structure to hold the directory entry or an appropriate
638 1.1 jmmv * error code if another error happens. */
639 1.1 jmmv int
640 1.5 yamt tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp)
641 1.1 jmmv {
642 1.1 jmmv int error;
643 1.5 yamt off_t startcookie;
644 1.1 jmmv struct tmpfs_dirent *de;
645 1.1 jmmv
646 1.1 jmmv TMPFS_VALIDATE_DIR(node);
647 1.1 jmmv
648 1.1 jmmv /* Locate the first directory entry we have to return. We have cached
649 1.1 jmmv * the last readdir in the node, so use those values if appropriate.
650 1.1 jmmv * Otherwise do a linear scan to find the requested entry. */
651 1.5 yamt startcookie = uio->uio_offset;
652 1.5 yamt KASSERT(startcookie != TMPFS_DIRCOOKIE_DOT);
653 1.5 yamt KASSERT(startcookie != TMPFS_DIRCOOKIE_DOTDOT);
654 1.5 yamt if (startcookie == TMPFS_DIRCOOKIE_EOF) {
655 1.5 yamt return 0;
656 1.1 jmmv } else {
657 1.5 yamt de = tmpfs_dir_lookupbycookie(node, startcookie);
658 1.5 yamt }
659 1.5 yamt if (de == NULL) {
660 1.5 yamt return EINVAL;
661 1.1 jmmv }
662 1.1 jmmv
663 1.1 jmmv /* Read as much entries as possible; i.e., until we reach the end of
664 1.1 jmmv * the directory or we exhaust uio space. */
665 1.1 jmmv do {
666 1.1 jmmv struct dirent d;
667 1.1 jmmv
668 1.1 jmmv /* Create a dirent structure representing the current
669 1.1 jmmv * tmpfs_node and fill it. */
670 1.1 jmmv d.d_fileno = de->td_node->tn_id;
671 1.1 jmmv switch (de->td_node->tn_type) {
672 1.1 jmmv case VBLK:
673 1.1 jmmv d.d_type = DT_BLK;
674 1.1 jmmv break;
675 1.1 jmmv
676 1.1 jmmv case VCHR:
677 1.1 jmmv d.d_type = DT_CHR;
678 1.1 jmmv break;
679 1.1 jmmv
680 1.1 jmmv case VDIR:
681 1.1 jmmv d.d_type = DT_DIR;
682 1.1 jmmv break;
683 1.1 jmmv
684 1.1 jmmv case VFIFO:
685 1.1 jmmv d.d_type = DT_FIFO;
686 1.1 jmmv break;
687 1.1 jmmv
688 1.1 jmmv case VLNK:
689 1.1 jmmv d.d_type = DT_LNK;
690 1.1 jmmv break;
691 1.1 jmmv
692 1.1 jmmv case VREG:
693 1.1 jmmv d.d_type = DT_REG;
694 1.1 jmmv break;
695 1.1 jmmv
696 1.1 jmmv case VSOCK:
697 1.1 jmmv d.d_type = DT_SOCK;
698 1.1 jmmv break;
699 1.1 jmmv
700 1.1 jmmv default:
701 1.1 jmmv KASSERT(0);
702 1.1 jmmv }
703 1.1 jmmv d.d_namlen = de->td_namelen;
704 1.1 jmmv KASSERT(de->td_namelen < sizeof(d.d_name));
705 1.1 jmmv (void)memcpy(d.d_name, de->td_name, de->td_namelen);
706 1.1 jmmv d.d_name[de->td_namelen] = '\0';
707 1.1 jmmv d.d_reclen = _DIRENT_SIZE(&d);
708 1.1 jmmv
709 1.1 jmmv /* Stop reading if the directory entry we are treating is
710 1.1 jmmv * bigger than the amount of data that can be returned. */
711 1.1 jmmv if (d.d_reclen > uio->uio_resid) {
712 1.1 jmmv error = -1;
713 1.1 jmmv break;
714 1.1 jmmv }
715 1.1 jmmv
716 1.1 jmmv /* Copy the new dirent structure into the output buffer and
717 1.1 jmmv * advance pointers. */
718 1.1 jmmv error = uiomove(&d, d.d_reclen, uio);
719 1.1 jmmv
720 1.5 yamt (*cntp)++;
721 1.1 jmmv de = TAILQ_NEXT(de, td_entries);
722 1.1 jmmv } while (error == 0 && uio->uio_resid > 0 && de != NULL);
723 1.1 jmmv
724 1.5 yamt /* Update the offset and cache. */
725 1.1 jmmv if (de == NULL) {
726 1.5 yamt uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
727 1.1 jmmv node->tn_readdir_lastn = 0;
728 1.1 jmmv node->tn_readdir_lastp = NULL;
729 1.1 jmmv } else {
730 1.5 yamt node->tn_readdir_lastn = uio->uio_offset = TMPFS_DIRCOOKIE(de);
731 1.1 jmmv node->tn_readdir_lastp = de;
732 1.1 jmmv }
733 1.1 jmmv
734 1.1 jmmv node->tn_status |= TMPFS_NODE_ACCESSED;
735 1.1 jmmv
736 1.1 jmmv return error;
737 1.1 jmmv }
738 1.1 jmmv
739 1.1 jmmv /* --------------------------------------------------------------------- */
740 1.1 jmmv
741 1.1 jmmv int
742 1.1 jmmv tmpfs_reg_resize(struct vnode *vp, off_t newsize)
743 1.1 jmmv {
744 1.1 jmmv int error;
745 1.1 jmmv size_t newpages, oldpages;
746 1.1 jmmv struct tmpfs_mount *tmp;
747 1.1 jmmv struct tmpfs_node *node;
748 1.1 jmmv
749 1.1 jmmv KASSERT(vp->v_type == VREG);
750 1.1 jmmv KASSERT(newsize >= 0);
751 1.1 jmmv
752 1.1 jmmv node = VP_TO_TMPFS_NODE(vp);
753 1.1 jmmv tmp = VFS_TO_TMPFS(vp->v_mount);
754 1.1 jmmv
755 1.1 jmmv /* Convert the old and new sizes to the number of pages needed to
756 1.1 jmmv * store them. It may happen that we do not need to do anything
757 1.1 jmmv * because the last allocated page can accommodate the change on
758 1.1 jmmv * its own. */
759 1.1 jmmv oldpages = round_page(node->tn_size) / PAGE_SIZE;
760 1.1 jmmv KASSERT(oldpages == node->tn_aobj_pages);
761 1.1 jmmv newpages = round_page(newsize) / PAGE_SIZE;
762 1.1 jmmv
763 1.1 jmmv if (newpages > oldpages &&
764 1.1 jmmv newpages - oldpages > TMPFS_PAGES_AVAIL(tmp)) {
765 1.1 jmmv error = ENOSPC;
766 1.1 jmmv goto out;
767 1.1 jmmv }
768 1.1 jmmv
769 1.4 yamt node->tn_aobj_pages = newpages;
770 1.1 jmmv
771 1.1 jmmv tmp->tm_pages_used += (newpages - oldpages);
772 1.1 jmmv node->tn_size = newsize;
773 1.1 jmmv uvm_vnp_setsize(vp, newsize);
774 1.1 jmmv
775 1.1 jmmv error = 0;
776 1.1 jmmv
777 1.1 jmmv out:
778 1.1 jmmv return error;
779 1.1 jmmv }
780 1.1 jmmv
781 1.1 jmmv /* --------------------------------------------------------------------- */
782 1.1 jmmv
783 1.1 jmmv /* Returns information about the number of available memory pages,
784 1.1 jmmv * including physical and virtual ones.
785 1.1 jmmv *
786 1.1 jmmv * If 'total' is TRUE, the value returned is the total amount of memory
787 1.1 jmmv * pages configured for the system (either in use or free).
788 1.1 jmmv * If it is FALSE, the value returned is the amount of free memory pages.
789 1.1 jmmv *
790 1.1 jmmv * Remember to remove TMPFS_PAGES_RESERVED from the returned value to avoid
791 1.1 jmmv * excessive memory usage.
792 1.1 jmmv *
793 1.1 jmmv * XXX: This function is used every time TMPFS_PAGES_MAX is called to gather
794 1.1 jmmv * the amount of free memory, something that happens during _each_
795 1.1 jmmv * object allocation. The time it takes to run this function so many
796 1.1 jmmv * times is not negligible, so this value should be stored as an
797 1.1 jmmv * aggregate somewhere, possibly within UVM (we cannot do it ourselves
798 1.1 jmmv * because we can't get notifications on memory usage changes). */
799 1.1 jmmv size_t
800 1.1 jmmv tmpfs_mem_info(boolean_t total)
801 1.1 jmmv {
802 1.1 jmmv int i, sec;
803 1.1 jmmv register_t retval;
804 1.1 jmmv size_t size;
805 1.1 jmmv struct swapent *sep;
806 1.1 jmmv
807 1.1 jmmv sec = uvmexp.nswapdev;
808 1.1 jmmv sep = (struct swapent *)malloc(sizeof(struct swapent) * sec, M_TEMP,
809 1.1 jmmv M_WAITOK);
810 1.1 jmmv KASSERT(sep != NULL);
811 1.1 jmmv uvm_swap_stats(SWAP_STATS, sep, sec, &retval);
812 1.1 jmmv KASSERT(retval == sec);
813 1.1 jmmv
814 1.1 jmmv size = 0;
815 1.1 jmmv if (total) {
816 1.1 jmmv for (i = 0; i < sec; i++)
817 1.1 jmmv size += dbtob(sep[i].se_nblks) / PAGE_SIZE;
818 1.1 jmmv } else {
819 1.1 jmmv for (i = 0; i < sec; i++)
820 1.1 jmmv size += dbtob(sep[i].se_nblks - sep[i].se_inuse) /
821 1.1 jmmv PAGE_SIZE;
822 1.1 jmmv }
823 1.1 jmmv size += uvmexp.free;
824 1.1 jmmv
825 1.1 jmmv free(sep, M_TEMP);
826 1.1 jmmv
827 1.1 jmmv return size;
828 1.1 jmmv }
829 1.1 jmmv
830 1.1 jmmv /* --------------------------------------------------------------------- */
831 1.1 jmmv
832 1.1 jmmv /* Change flags of the given vnode.
833 1.1 jmmv * Caller should execute VOP_UPDATE on vp after a successful execution.
834 1.1 jmmv * The vnode must be locked on entry and remain locked on exit. */
835 1.1 jmmv int
836 1.1 jmmv tmpfs_chflags(struct vnode *vp, int flags, struct ucred *cred, struct proc *p)
837 1.1 jmmv {
838 1.1 jmmv int error;
839 1.1 jmmv struct tmpfs_node *node;
840 1.1 jmmv
841 1.1 jmmv KASSERT(VOP_ISLOCKED(vp));
842 1.1 jmmv
843 1.1 jmmv node = VP_TO_TMPFS_NODE(vp);
844 1.1 jmmv
845 1.1 jmmv /* Disallow this operation if the file system is mounted read-only. */
846 1.1 jmmv if (vp->v_mount->mnt_flag & MNT_RDONLY)
847 1.1 jmmv return EROFS;
848 1.1 jmmv
849 1.1 jmmv /* XXX: The following comes from UFS code, and can be found in
850 1.1 jmmv * several other file systems. Shouldn't this be centralized
851 1.1 jmmv * somewhere? */
852 1.1 jmmv if (cred->cr_uid != node->tn_uid &&
853 1.1 jmmv (error = suser(cred, &p->p_acflag)))
854 1.1 jmmv return error;
855 1.1 jmmv if (cred->cr_uid == 0) {
856 1.1 jmmv /* The super-user is only allowed to change flags if the file
857 1.1 jmmv * wasn't protected before and the securelevel is zero. */
858 1.1 jmmv if ((node->tn_flags & (SF_IMMUTABLE | SF_APPEND)) &&
859 1.1 jmmv securelevel > 0)
860 1.1 jmmv return EPERM;
861 1.1 jmmv node->tn_flags = flags;
862 1.1 jmmv } else {
863 1.1 jmmv /* Regular users can change flags provided they only want to
864 1.1 jmmv * change user-specific ones, not those reserved for the
865 1.1 jmmv * super-user. */
866 1.1 jmmv if ((node->tn_flags & (SF_IMMUTABLE | SF_APPEND)) ||
867 1.1 jmmv (flags & UF_SETTABLE) != flags)
868 1.1 jmmv return EPERM;
869 1.1 jmmv if ((node->tn_flags & SF_SETTABLE) != (flags & SF_SETTABLE))
870 1.1 jmmv return EPERM;
871 1.1 jmmv node->tn_flags &= SF_SETTABLE;
872 1.1 jmmv node->tn_flags |= (flags & UF_SETTABLE);
873 1.1 jmmv }
874 1.1 jmmv
875 1.1 jmmv node->tn_status |= TMPFS_NODE_CHANGED;
876 1.1 jmmv VN_KNOTE(vp, NOTE_ATTRIB);
877 1.1 jmmv
878 1.1 jmmv KASSERT(VOP_ISLOCKED(vp));
879 1.1 jmmv
880 1.1 jmmv return 0;
881 1.1 jmmv }
882 1.1 jmmv
883 1.1 jmmv /* --------------------------------------------------------------------- */
884 1.1 jmmv
885 1.1 jmmv /* Change access mode on the given vnode.
886 1.1 jmmv * Caller should execute VOP_UPDATE on vp after a successful execution.
887 1.1 jmmv * The vnode must be locked on entry and remain locked on exit. */
888 1.1 jmmv int
889 1.1 jmmv tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct proc *p)
890 1.1 jmmv {
891 1.1 jmmv int error;
892 1.1 jmmv struct tmpfs_node *node;
893 1.1 jmmv
894 1.1 jmmv KASSERT(VOP_ISLOCKED(vp));
895 1.1 jmmv
896 1.1 jmmv node = VP_TO_TMPFS_NODE(vp);
897 1.1 jmmv
898 1.1 jmmv /* Disallow this operation if the file system is mounted read-only. */
899 1.1 jmmv if (vp->v_mount->mnt_flag & MNT_RDONLY)
900 1.1 jmmv return EROFS;
901 1.1 jmmv
902 1.1 jmmv /* Immutable or append-only files cannot be modified, either. */
903 1.1 jmmv if (node->tn_flags & (IMMUTABLE | APPEND))
904 1.1 jmmv return EPERM;
905 1.1 jmmv
906 1.1 jmmv /* XXX: The following comes from UFS code, and can be found in
907 1.1 jmmv * several other file systems. Shouldn't this be centralized
908 1.1 jmmv * somewhere? */
909 1.1 jmmv if (cred->cr_uid != node->tn_uid &&
910 1.1 jmmv (error = suser(cred, &p->p_acflag)))
911 1.1 jmmv return error;
912 1.1 jmmv if (cred->cr_uid != 0) {
913 1.1 jmmv if (vp->v_type != VDIR && (mode & S_ISTXT))
914 1.1 jmmv return EFTYPE;
915 1.1 jmmv
916 1.1 jmmv if (!groupmember(node->tn_gid, cred) && (mode & S_ISGID))
917 1.1 jmmv return EPERM;
918 1.1 jmmv }
919 1.1 jmmv
920 1.1 jmmv node->tn_mode = (mode & ALLPERMS);
921 1.1 jmmv
922 1.1 jmmv node->tn_status |= TMPFS_NODE_CHANGED;
923 1.1 jmmv VN_KNOTE(vp, NOTE_ATTRIB);
924 1.1 jmmv
925 1.1 jmmv KASSERT(VOP_ISLOCKED(vp));
926 1.1 jmmv
927 1.1 jmmv return 0;
928 1.1 jmmv }
929 1.1 jmmv
930 1.1 jmmv /* --------------------------------------------------------------------- */
931 1.1 jmmv
932 1.1 jmmv /* Change ownership of the given vnode. At least one of uid or gid must
933 1.1 jmmv * be different than VNOVAL. If one is set to that value, the attribute
934 1.1 jmmv * is unchanged.
935 1.1 jmmv * Caller should execute VOP_UPDATE on vp after a successful execution.
936 1.1 jmmv * The vnode must be locked on entry and remain locked on exit. */
937 1.1 jmmv int
938 1.1 jmmv tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
939 1.1 jmmv struct proc *p)
940 1.1 jmmv {
941 1.1 jmmv int error;
942 1.1 jmmv struct tmpfs_node *node;
943 1.1 jmmv
944 1.1 jmmv KASSERT(VOP_ISLOCKED(vp));
945 1.1 jmmv
946 1.1 jmmv node = VP_TO_TMPFS_NODE(vp);
947 1.1 jmmv
948 1.1 jmmv /* Assign default values if they are unknown. */
949 1.1 jmmv KASSERT(uid != VNOVAL || gid != VNOVAL);
950 1.1 jmmv if (uid == VNOVAL)
951 1.1 jmmv uid = node->tn_uid;
952 1.1 jmmv if (gid == VNOVAL)
953 1.1 jmmv gid = node->tn_gid;
954 1.1 jmmv KASSERT(uid != VNOVAL && gid != VNOVAL);
955 1.1 jmmv
956 1.1 jmmv /* Disallow this operation if the file system is mounted read-only. */
957 1.1 jmmv if (vp->v_mount->mnt_flag & MNT_RDONLY)
958 1.1 jmmv return EROFS;
959 1.1 jmmv
960 1.1 jmmv /* Immutable or append-only files cannot be modified, either. */
961 1.1 jmmv if (node->tn_flags & (IMMUTABLE | APPEND))
962 1.1 jmmv return EPERM;
963 1.1 jmmv
964 1.1 jmmv /* XXX: The following comes from UFS code, and can be found in
965 1.1 jmmv * several other file systems. Shouldn't this be centralized
966 1.1 jmmv * somewhere? */
967 1.1 jmmv if ((cred->cr_uid != node->tn_uid || uid != node->tn_uid ||
968 1.1 jmmv (gid != node->tn_gid && !(cred->cr_gid == node->tn_gid ||
969 1.1 jmmv groupmember(gid, cred)))) &&
970 1.1 jmmv ((error = suser(cred, &p->p_acflag)) != 0))
971 1.1 jmmv return error;
972 1.1 jmmv
973 1.1 jmmv node->tn_uid = uid;
974 1.1 jmmv node->tn_gid = gid;
975 1.1 jmmv
976 1.1 jmmv node->tn_status |= TMPFS_NODE_CHANGED;
977 1.1 jmmv VN_KNOTE(vp, NOTE_ATTRIB);
978 1.1 jmmv
979 1.1 jmmv KASSERT(VOP_ISLOCKED(vp));
980 1.1 jmmv
981 1.1 jmmv return 0;
982 1.1 jmmv }
983 1.1 jmmv
984 1.1 jmmv /* --------------------------------------------------------------------- */
985 1.1 jmmv
986 1.1 jmmv /* Change size of the given vnode.
987 1.1 jmmv * Caller should execute VOP_UPDATE on vp after a successful execution.
988 1.1 jmmv * The vnode must be locked on entry and remain locked on exit. */
989 1.1 jmmv int
990 1.1 jmmv tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
991 1.1 jmmv struct proc *p)
992 1.1 jmmv {
993 1.1 jmmv int error;
994 1.1 jmmv struct tmpfs_node *node;
995 1.1 jmmv
996 1.1 jmmv KASSERT(VOP_ISLOCKED(vp));
997 1.1 jmmv
998 1.1 jmmv node = VP_TO_TMPFS_NODE(vp);
999 1.1 jmmv
1000 1.1 jmmv /* Decide whether this is a valid operation based on the file type. */
1001 1.1 jmmv error = 0;
1002 1.1 jmmv switch (vp->v_type) {
1003 1.1 jmmv case VDIR:
1004 1.1 jmmv return EISDIR;
1005 1.1 jmmv
1006 1.1 jmmv case VLNK:
1007 1.1 jmmv /* FALLTHROUGH */
1008 1.1 jmmv case VREG:
1009 1.1 jmmv if (vp->v_mount->mnt_flag & MNT_RDONLY)
1010 1.1 jmmv return EROFS;
1011 1.1 jmmv break;
1012 1.1 jmmv
1013 1.1 jmmv case VBLK:
1014 1.1 jmmv /* FALLTHROUGH */
1015 1.1 jmmv case VCHR:
1016 1.1 jmmv /* FALLTHROUGH */
1017 1.1 jmmv case VSOCK:
1018 1.1 jmmv /* FALLTHROUGH */
1019 1.1 jmmv case VFIFO:
1020 1.1 jmmv /* Allow modifications of special files even if in the file
1021 1.1 jmmv * system is mounted read-only (we are not modifying the
1022 1.1 jmmv * files themselves, but the objects they represent). */
1023 1.1 jmmv break;
1024 1.1 jmmv
1025 1.1 jmmv default:
1026 1.1 jmmv /* Anything else is unsupported. */
1027 1.1 jmmv return EINVAL;
1028 1.1 jmmv }
1029 1.1 jmmv
1030 1.1 jmmv /* Immutable or append-only files cannot be modified, either. */
1031 1.1 jmmv if (node->tn_flags & (IMMUTABLE | APPEND))
1032 1.1 jmmv return EPERM;
1033 1.1 jmmv
1034 1.1 jmmv error = VOP_TRUNCATE(vp, size, 0, cred, p);
1035 1.1 jmmv /* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
1036 1.1 jmmv * for us, as will update tn_status; no need to do that here. */
1037 1.1 jmmv
1038 1.1 jmmv KASSERT(VOP_ISLOCKED(vp));
1039 1.1 jmmv
1040 1.1 jmmv return error;
1041 1.1 jmmv }
1042 1.1 jmmv
1043 1.1 jmmv /* --------------------------------------------------------------------- */
1044 1.1 jmmv
1045 1.1 jmmv /* Change access and modification times of the given vnode.
1046 1.1 jmmv * Caller should execute VOP_UPDATE on vp after a successful execution.
1047 1.1 jmmv * The vnode must be locked on entry and remain locked on exit. */
1048 1.1 jmmv int
1049 1.1 jmmv tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime,
1050 1.1 jmmv int vaflags, struct ucred *cred, struct proc *p)
1051 1.1 jmmv {
1052 1.1 jmmv int error;
1053 1.1 jmmv struct tmpfs_node *node;
1054 1.1 jmmv
1055 1.1 jmmv KASSERT(VOP_ISLOCKED(vp));
1056 1.1 jmmv
1057 1.1 jmmv node = VP_TO_TMPFS_NODE(vp);
1058 1.1 jmmv
1059 1.1 jmmv /* Disallow this operation if the file system is mounted read-only. */
1060 1.1 jmmv if (vp->v_mount->mnt_flag & MNT_RDONLY)
1061 1.1 jmmv return EROFS;
1062 1.1 jmmv
1063 1.1 jmmv /* Immutable or append-only files cannot be modified, either. */
1064 1.1 jmmv if (node->tn_flags & (IMMUTABLE | APPEND))
1065 1.1 jmmv return EPERM;
1066 1.1 jmmv
1067 1.1 jmmv /* XXX: The following comes from UFS code, and can be found in
1068 1.1 jmmv * several other file systems. Shouldn't this be centralized
1069 1.1 jmmv * somewhere? */
1070 1.1 jmmv if (cred->cr_uid != node->tn_uid &&
1071 1.1 jmmv (error = suser(cred, &p->p_acflag)) &&
1072 1.1 jmmv ((vaflags & VA_UTIMES_NULL) == 0 ||
1073 1.1 jmmv (error = VOP_ACCESS(vp, VWRITE, cred, p))))
1074 1.1 jmmv return error;
1075 1.1 jmmv
1076 1.1 jmmv if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL)
1077 1.1 jmmv node->tn_status |= TMPFS_NODE_ACCESSED;
1078 1.1 jmmv
1079 1.1 jmmv if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL)
1080 1.1 jmmv node->tn_status |= TMPFS_NODE_MODIFIED;
1081 1.1 jmmv
1082 1.1 jmmv error = VOP_UPDATE(vp, atime, mtime, 0);
1083 1.1 jmmv
1084 1.1 jmmv KASSERT(VOP_ISLOCKED(vp));
1085 1.1 jmmv
1086 1.1 jmmv return error;
1087 1.1 jmmv }
1088