tmpfs.h revision 1.20.4.4 1 1.20.4.4 yamt /* $NetBSD: tmpfs.h,v 1.20.4.4 2007/02/26 09:11:00 yamt Exp $ */
2 1.20.4.2 yamt
3 1.20.4.2 yamt /*
4 1.20.4.2 yamt * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
5 1.20.4.2 yamt * All rights reserved.
6 1.20.4.2 yamt *
7 1.20.4.2 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.20.4.2 yamt * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 1.20.4.2 yamt * 2005 program.
10 1.20.4.2 yamt *
11 1.20.4.2 yamt * Redistribution and use in source and binary forms, with or without
12 1.20.4.2 yamt * modification, are permitted provided that the following conditions
13 1.20.4.2 yamt * are met:
14 1.20.4.2 yamt * 1. Redistributions of source code must retain the above copyright
15 1.20.4.2 yamt * notice, this list of conditions and the following disclaimer.
16 1.20.4.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
17 1.20.4.2 yamt * notice, this list of conditions and the following disclaimer in the
18 1.20.4.2 yamt * documentation and/or other materials provided with the distribution.
19 1.20.4.2 yamt * 3. All advertising materials mentioning features or use of this software
20 1.20.4.2 yamt * must display the following acknowledgement:
21 1.20.4.2 yamt * This product includes software developed by the NetBSD
22 1.20.4.2 yamt * Foundation, Inc. and its contributors.
23 1.20.4.2 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.20.4.2 yamt * contributors may be used to endorse or promote products derived
25 1.20.4.2 yamt * from this software without specific prior written permission.
26 1.20.4.2 yamt *
27 1.20.4.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.20.4.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.20.4.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.20.4.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.20.4.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.20.4.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.20.4.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.20.4.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.20.4.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.20.4.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.20.4.2 yamt * POSSIBILITY OF SUCH DAMAGE.
38 1.20.4.2 yamt */
39 1.20.4.2 yamt
40 1.20.4.2 yamt #ifndef _FS_TMPFS_TMPFS_H_
41 1.20.4.2 yamt #define _FS_TMPFS_TMPFS_H_
42 1.20.4.2 yamt
43 1.20.4.2 yamt /* ---------------------------------------------------------------------
44 1.20.4.2 yamt * KERNEL-SPECIFIC DEFINITIONS
45 1.20.4.2 yamt * --------------------------------------------------------------------- */
46 1.20.4.2 yamt #include <sys/dirent.h>
47 1.20.4.2 yamt #include <sys/mount.h>
48 1.20.4.2 yamt #include <sys/queue.h>
49 1.20.4.2 yamt #include <sys/vnode.h>
50 1.20.4.2 yamt
51 1.20.4.2 yamt #if defined(_KERNEL)
52 1.20.4.2 yamt #include <fs/tmpfs/tmpfs_pool.h>
53 1.20.4.2 yamt #endif /* defined(_KERNEL) */
54 1.20.4.2 yamt
55 1.20.4.2 yamt /* --------------------------------------------------------------------- */
56 1.20.4.2 yamt
57 1.20.4.2 yamt /*
58 1.20.4.2 yamt * Internal representation of a tmpfs directory entry.
59 1.20.4.2 yamt */
60 1.20.4.2 yamt struct tmpfs_dirent {
61 1.20.4.2 yamt TAILQ_ENTRY(tmpfs_dirent) td_entries;
62 1.20.4.2 yamt
63 1.20.4.2 yamt /* Length of the name stored in this directory entry. This avoids
64 1.20.4.2 yamt * the need to recalculate it every time the name is used. */
65 1.20.4.2 yamt uint16_t td_namelen;
66 1.20.4.2 yamt
67 1.20.4.2 yamt /* The name of the entry, allocated from a string pool. This
68 1.20.4.2 yamt * string is not required to be zero-terminated; therefore, the
69 1.20.4.2 yamt * td_namelen field must always be used when accessing its value. */
70 1.20.4.2 yamt char * td_name;
71 1.20.4.2 yamt
72 1.20.4.2 yamt /* Pointer to the node this entry refers to. */
73 1.20.4.2 yamt struct tmpfs_node * td_node;
74 1.20.4.2 yamt };
75 1.20.4.2 yamt
76 1.20.4.2 yamt /* A directory in tmpfs holds a sorted list of directory entries, which in
77 1.20.4.2 yamt * turn point to other files (which can be directories themselves).
78 1.20.4.2 yamt *
79 1.20.4.2 yamt * In tmpfs, this list is managed by a tail queue, whose head is defined by
80 1.20.4.2 yamt * the struct tmpfs_dir type.
81 1.20.4.2 yamt *
82 1.20.4.2 yamt * It is imporant to notice that directories do not have entries for . and
83 1.20.4.2 yamt * .. as other file systems do. These can be generated when requested
84 1.20.4.2 yamt * based on information available by other means, such as the pointer to
85 1.20.4.2 yamt * the node itself in the former case or the pointer to the parent directory
86 1.20.4.2 yamt * in the latter case. This is done to simplify tmpfs's code and, more
87 1.20.4.2 yamt * importantly, to remove redundancy. */
88 1.20.4.2 yamt TAILQ_HEAD(tmpfs_dir, tmpfs_dirent);
89 1.20.4.2 yamt
90 1.20.4.3 yamt /* Each entry in a directory has a cookie that identifies it. Cookies
91 1.20.4.3 yamt * supersede offsets within directories because, given how tmpfs stores
92 1.20.4.3 yamt * directories in memory, there is no such thing as an offset. (Emulating
93 1.20.4.3 yamt * a real offset could be very difficult.)
94 1.20.4.3 yamt *
95 1.20.4.3 yamt * The '.', '..' and the end of directory markers have fixed cookies which
96 1.20.4.3 yamt * cannot collide with the cookies generated by other entries. The cookies
97 1.20.4.3 yamt * fot the other entries are generated based on the memory address on which
98 1.20.4.3 yamt * stores their information is stored.
99 1.20.4.3 yamt *
100 1.20.4.3 yamt * Ideally, using the entry's memory pointer as the cookie would be enough
101 1.20.4.3 yamt * to represent it and it wouldn't cause collisions in any system.
102 1.20.4.3 yamt * Unfortunately, this results in "offsets" with very large values which
103 1.20.4.3 yamt * later raise problems in the Linux compatibility layer (and maybe in other
104 1.20.4.3 yamt * places) as described in PR kern/32034. Hence we need to workaround this
105 1.20.4.3 yamt * with a rather ugly hack.
106 1.20.4.3 yamt *
107 1.20.4.3 yamt * Linux 32-bit binaries, unless built with _FILE_OFFSET_BITS=64, have off_t
108 1.20.4.3 yamt * set to 'long', which is a 32-bit *signed* long integer. Regardless of
109 1.20.4.3 yamt * the macro value, GLIBC (2.3 at least) always uses the getdents64
110 1.20.4.3 yamt * system call (when calling readdir) which internally returns off64_t
111 1.20.4.3 yamt * offsets. In order to make 32-bit binaries work, *GLIBC* converts the
112 1.20.4.3 yamt * 64-bit values returned by the kernel to 32-bit ones and aborts with
113 1.20.4.3 yamt * EOVERFLOW if the conversion results in values that won't fit in 32-bit
114 1.20.4.3 yamt * integers (which it assumes is because the directory is extremely large).
115 1.20.4.3 yamt * This wouldn't cause problems if we were dealing with unsigned integers,
116 1.20.4.3 yamt * but as we have signed integers, this check fails due to sign expansion.
117 1.20.4.3 yamt *
118 1.20.4.3 yamt * For example, consider that the kernel returns the 0xc1234567 cookie to
119 1.20.4.3 yamt * userspace in a off64_t integer. Later on, GLIBC casts this value to
120 1.20.4.3 yamt * off_t (remember, signed) with code similar to:
121 1.20.4.3 yamt * system call returns the offset in kernel_value;
122 1.20.4.3 yamt * off_t casted_value = kernel_value;
123 1.20.4.3 yamt * if (sizeof(off_t) != sizeof(off64_t) &&
124 1.20.4.3 yamt * kernel_value != casted_value)
125 1.20.4.3 yamt * error!
126 1.20.4.3 yamt * In this case, casted_value still has 0xc1234567, but when it is compared
127 1.20.4.3 yamt * for equality against kernel_value, it is promoted to a 64-bit integer and
128 1.20.4.3 yamt * becomes 0xffffffffc1234567, which is different than 0x00000000c1234567.
129 1.20.4.3 yamt * Then, GLIBC assumes this is because the directory is very large.
130 1.20.4.3 yamt *
131 1.20.4.3 yamt * Given that all the above happens in user-space, we have no control over
132 1.20.4.3 yamt * it; therefore we must workaround the issue here. We do this by
133 1.20.4.3 yamt * truncating the pointer value to a 32-bit integer and hope that there
134 1.20.4.3 yamt * won't be collisions. In fact, this will not cause any problems in
135 1.20.4.3 yamt * 32-bit platforms but some might arise in 64-bit machines (I'm not sure
136 1.20.4.3 yamt * if they can happen at all in practice).
137 1.20.4.3 yamt *
138 1.20.4.3 yamt * XXX A nicer solution shall be attempted. */
139 1.20.4.3 yamt #if defined(_KERNEL)
140 1.20.4.2 yamt #define TMPFS_DIRCOOKIE_DOT 0
141 1.20.4.2 yamt #define TMPFS_DIRCOOKIE_DOTDOT 1
142 1.20.4.2 yamt #define TMPFS_DIRCOOKIE_EOF 2
143 1.20.4.3 yamt static __inline
144 1.20.4.3 yamt off_t
145 1.20.4.3 yamt tmpfs_dircookie(struct tmpfs_dirent *de)
146 1.20.4.3 yamt {
147 1.20.4.3 yamt off_t cookie;
148 1.20.4.3 yamt
149 1.20.4.3 yamt cookie = ((off_t)(uintptr_t)de >> 1) & 0x7FFFFFFF;
150 1.20.4.3 yamt KASSERT(cookie != TMPFS_DIRCOOKIE_DOT);
151 1.20.4.3 yamt KASSERT(cookie != TMPFS_DIRCOOKIE_DOTDOT);
152 1.20.4.3 yamt KASSERT(cookie != TMPFS_DIRCOOKIE_EOF);
153 1.20.4.3 yamt
154 1.20.4.3 yamt return cookie;
155 1.20.4.3 yamt }
156 1.20.4.3 yamt #endif /* defined(_KERNEL) */
157 1.20.4.2 yamt
158 1.20.4.2 yamt /* --------------------------------------------------------------------- */
159 1.20.4.2 yamt
160 1.20.4.2 yamt /*
161 1.20.4.2 yamt * Internal representation of a tmpfs file system node.
162 1.20.4.2 yamt *
163 1.20.4.2 yamt * This structure is splitted in two parts: one holds attributes common
164 1.20.4.2 yamt * to all file types and the other holds data that is only applicable to
165 1.20.4.2 yamt * a particular type. The code must be careful to only access those
166 1.20.4.2 yamt * attributes that are actually allowed by the node's type.
167 1.20.4.2 yamt */
168 1.20.4.2 yamt struct tmpfs_node {
169 1.20.4.2 yamt /* Doubly-linked list entry which links all existing nodes for a
170 1.20.4.2 yamt * single file system. This is provided to ease the removal of
171 1.20.4.2 yamt * all nodes during the unmount operation. */
172 1.20.4.2 yamt LIST_ENTRY(tmpfs_node) tn_entries;
173 1.20.4.2 yamt
174 1.20.4.2 yamt /* The node's type. Any of 'VBLK', 'VCHR', 'VDIR', 'VFIFO',
175 1.20.4.2 yamt * 'VLNK', 'VREG' and 'VSOCK' is allowed. The usage of vnode
176 1.20.4.2 yamt * types instead of a custom enumeration is to make things simpler
177 1.20.4.2 yamt * and faster, as we do not need to convert between two types. */
178 1.20.4.2 yamt enum vtype tn_type;
179 1.20.4.2 yamt
180 1.20.4.2 yamt /* Node identifier. */
181 1.20.4.2 yamt ino_t tn_id;
182 1.20.4.2 yamt
183 1.20.4.2 yamt /* Node's internal status. This is used by several file system
184 1.20.4.2 yamt * operations to do modifications to the node in a delayed
185 1.20.4.2 yamt * fashion. */
186 1.20.4.2 yamt int tn_status;
187 1.20.4.2 yamt #define TMPFS_NODE_ACCESSED (1 << 1)
188 1.20.4.2 yamt #define TMPFS_NODE_MODIFIED (1 << 2)
189 1.20.4.2 yamt #define TMPFS_NODE_CHANGED (1 << 3)
190 1.20.4.2 yamt
191 1.20.4.2 yamt /* The node size. It does not necessarily match the real amount
192 1.20.4.2 yamt * of memory consumed by it. */
193 1.20.4.2 yamt off_t tn_size;
194 1.20.4.2 yamt
195 1.20.4.2 yamt /* Generic node attributes. */
196 1.20.4.2 yamt uid_t tn_uid;
197 1.20.4.2 yamt gid_t tn_gid;
198 1.20.4.2 yamt mode_t tn_mode;
199 1.20.4.2 yamt int tn_flags;
200 1.20.4.2 yamt nlink_t tn_links;
201 1.20.4.2 yamt struct timespec tn_atime;
202 1.20.4.2 yamt struct timespec tn_mtime;
203 1.20.4.2 yamt struct timespec tn_ctime;
204 1.20.4.2 yamt struct timespec tn_birthtime;
205 1.20.4.2 yamt unsigned long tn_gen;
206 1.20.4.2 yamt
207 1.20.4.2 yamt /* Head of byte-level lock list (used by tmpfs_advlock). */
208 1.20.4.2 yamt struct lockf * tn_lockf;
209 1.20.4.2 yamt
210 1.20.4.2 yamt /* As there is a single vnode for each active file within the
211 1.20.4.2 yamt * system, care has to be taken to avoid allocating more than one
212 1.20.4.2 yamt * vnode per file. In order to do this, a bidirectional association
213 1.20.4.2 yamt * is kept between vnodes and nodes.
214 1.20.4.2 yamt *
215 1.20.4.2 yamt * Whenever a vnode is allocated, its v_data field is updated to
216 1.20.4.2 yamt * point to the node it references. At the same time, the node's
217 1.20.4.2 yamt * tn_vnode field is modified to point to the new vnode representing
218 1.20.4.2 yamt * it. Further attempts to allocate a vnode for this same node will
219 1.20.4.2 yamt * result in returning a new reference to the value stored in
220 1.20.4.2 yamt * tn_vnode.
221 1.20.4.2 yamt *
222 1.20.4.2 yamt * May be NULL when the node is unused (that is, no vnode has been
223 1.20.4.2 yamt * allocated for it or it has been reclaimed). */
224 1.20.4.2 yamt struct vnode * tn_vnode;
225 1.20.4.2 yamt
226 1.20.4.2 yamt /* Pointer to the node returned by tmpfs_lookup() after doing a
227 1.20.4.2 yamt * delete or a rename lookup; its value is only valid in these two
228 1.20.4.2 yamt * situations. In case we were looking up . or .., it holds a null
229 1.20.4.2 yamt * pointer. */
230 1.20.4.2 yamt struct tmpfs_dirent * tn_lookup_dirent;
231 1.20.4.2 yamt
232 1.20.4.2 yamt union {
233 1.20.4.2 yamt /* Valid when tn_type == VBLK || tn_type == VCHR. */
234 1.20.4.2 yamt struct {
235 1.20.4.2 yamt dev_t tn_rdev;
236 1.20.4.2 yamt } tn_dev;
237 1.20.4.2 yamt
238 1.20.4.2 yamt /* Valid when tn_type == VDIR. */
239 1.20.4.2 yamt struct {
240 1.20.4.2 yamt /* Pointer to the parent directory. The root
241 1.20.4.2 yamt * directory has a pointer to itself in this field;
242 1.20.4.2 yamt * this property identifies the root node. */
243 1.20.4.2 yamt struct tmpfs_node * tn_parent;
244 1.20.4.2 yamt
245 1.20.4.2 yamt /* Head of a tail-queue that links the contents of
246 1.20.4.2 yamt * the directory together. See above for a
247 1.20.4.2 yamt * description of its contents. */
248 1.20.4.2 yamt struct tmpfs_dir tn_dir;
249 1.20.4.2 yamt
250 1.20.4.2 yamt /* Number and pointer of the first directory entry
251 1.20.4.2 yamt * returned by the readdir operation if it were
252 1.20.4.2 yamt * called again to continue reading data from the
253 1.20.4.2 yamt * same directory as before. This is used to speed
254 1.20.4.2 yamt * up reads of long directories, assuming that no
255 1.20.4.2 yamt * more than one read is in progress at a given time.
256 1.20.4.2 yamt * Otherwise, these values are discarded and a linear
257 1.20.4.2 yamt * scan is performed from the beginning up to the
258 1.20.4.2 yamt * point where readdir starts returning values. */
259 1.20.4.2 yamt off_t tn_readdir_lastn;
260 1.20.4.2 yamt struct tmpfs_dirent * tn_readdir_lastp;
261 1.20.4.2 yamt } tn_dir;
262 1.20.4.2 yamt
263 1.20.4.2 yamt /* Valid when tn_type == VLNK. */
264 1.20.4.2 yamt struct tn_lnk {
265 1.20.4.2 yamt /* The link's target, allocated from a string pool. */
266 1.20.4.2 yamt char * tn_link;
267 1.20.4.2 yamt } tn_lnk;
268 1.20.4.2 yamt
269 1.20.4.2 yamt /* Valid when tn_type == VREG. */
270 1.20.4.2 yamt struct tn_reg {
271 1.20.4.2 yamt /* The contents of regular files stored in a tmpfs
272 1.20.4.2 yamt * file system are represented by a single anonymous
273 1.20.4.2 yamt * memory object (aobj, for short). The aobj provides
274 1.20.4.2 yamt * direct access to any position within the file,
275 1.20.4.2 yamt * because its contents are always mapped in a
276 1.20.4.2 yamt * contiguous region of virtual memory. It is a task
277 1.20.4.2 yamt * of the memory management subsystem (see uvm(9)) to
278 1.20.4.2 yamt * issue the required page ins or page outs whenever
279 1.20.4.2 yamt * a position within the file is accessed. */
280 1.20.4.2 yamt struct uvm_object * tn_aobj;
281 1.20.4.2 yamt size_t tn_aobj_pages;
282 1.20.4.2 yamt } tn_reg;
283 1.20.4.2 yamt } tn_spec;
284 1.20.4.2 yamt };
285 1.20.4.2 yamt
286 1.20.4.2 yamt #if defined(_KERNEL)
287 1.20.4.2 yamt
288 1.20.4.2 yamt LIST_HEAD(tmpfs_node_list, tmpfs_node);
289 1.20.4.2 yamt
290 1.20.4.2 yamt /* --------------------------------------------------------------------- */
291 1.20.4.2 yamt
292 1.20.4.2 yamt /*
293 1.20.4.2 yamt * Internal representation of a tmpfs mount point.
294 1.20.4.2 yamt */
295 1.20.4.2 yamt struct tmpfs_mount {
296 1.20.4.2 yamt /* Maximum number of memory pages available for use by the file
297 1.20.4.2 yamt * system, set during mount time. This variable must never be
298 1.20.4.3 yamt * used directly as it may be bigger than the current amount of
299 1.20.4.2 yamt * free memory; in the extreme case, it will hold the SIZE_MAX
300 1.20.4.2 yamt * value. Instead, use the TMPFS_PAGES_MAX macro. */
301 1.20.4.2 yamt size_t tm_pages_max;
302 1.20.4.2 yamt
303 1.20.4.2 yamt /* Number of pages in use by the file system. Cannot be bigger
304 1.20.4.2 yamt * than the value returned by TMPFS_PAGES_MAX in any case. */
305 1.20.4.2 yamt size_t tm_pages_used;
306 1.20.4.2 yamt
307 1.20.4.2 yamt /* Pointer to the node representing the root directory of this
308 1.20.4.2 yamt * file system. */
309 1.20.4.2 yamt struct tmpfs_node * tm_root;
310 1.20.4.2 yamt
311 1.20.4.2 yamt /* Maximum number of possible nodes for this file system; set
312 1.20.4.2 yamt * during mount time. We need a hard limit on the maximum number
313 1.20.4.2 yamt * of nodes to avoid allocating too much of them; their objects
314 1.20.4.2 yamt * cannot be released until the file system is unmounted.
315 1.20.4.2 yamt * Otherwise, we could easily run out of memory by creating lots
316 1.20.4.2 yamt * of empty files and then simply removing them. */
317 1.20.4.2 yamt ino_t tm_nodes_max;
318 1.20.4.2 yamt
319 1.20.4.2 yamt /* Number of nodes currently allocated. This number only grows.
320 1.20.4.2 yamt * When it reaches tm_nodes_max, no more new nodes can be allocated.
321 1.20.4.2 yamt * Of course, the old, unused ones can be reused. */
322 1.20.4.2 yamt ino_t tm_nodes_last;
323 1.20.4.2 yamt
324 1.20.4.2 yamt /* Nodes are organized in two different lists. The used list
325 1.20.4.2 yamt * contains all nodes that are currently used by the file system;
326 1.20.4.2 yamt * i.e., they refer to existing files. The available list contains
327 1.20.4.2 yamt * all nodes that are currently available for use by new files.
328 1.20.4.2 yamt * Nodes must be kept in this list (instead of deleting them)
329 1.20.4.2 yamt * because we need to keep track of their generation number (tn_gen
330 1.20.4.2 yamt * field).
331 1.20.4.2 yamt *
332 1.20.4.2 yamt * Note that nodes are lazily allocated: if the available list is
333 1.20.4.2 yamt * empty and we have enough space to create more nodes, they will be
334 1.20.4.2 yamt * created and inserted in the used list. Once these are released,
335 1.20.4.2 yamt * they will go into the available list, remaining alive until the
336 1.20.4.2 yamt * file system is unmounted. */
337 1.20.4.2 yamt struct tmpfs_node_list tm_nodes_used;
338 1.20.4.2 yamt struct tmpfs_node_list tm_nodes_avail;
339 1.20.4.2 yamt
340 1.20.4.2 yamt /* Pools used to store file system meta data. These are not shared
341 1.20.4.2 yamt * across several instances of tmpfs for the reasons described in
342 1.20.4.2 yamt * tmpfs_pool.c. */
343 1.20.4.2 yamt struct tmpfs_pool tm_dirent_pool;
344 1.20.4.2 yamt struct tmpfs_pool tm_node_pool;
345 1.20.4.2 yamt struct tmpfs_str_pool tm_str_pool;
346 1.20.4.2 yamt };
347 1.20.4.2 yamt
348 1.20.4.2 yamt /* --------------------------------------------------------------------- */
349 1.20.4.2 yamt
350 1.20.4.2 yamt /*
351 1.20.4.2 yamt * This structure maps a file identifier to a tmpfs node. Used by the
352 1.20.4.2 yamt * NFS code.
353 1.20.4.2 yamt */
354 1.20.4.2 yamt struct tmpfs_fid {
355 1.20.4.2 yamt uint16_t tf_len;
356 1.20.4.2 yamt uint16_t tf_pad;
357 1.20.4.2 yamt uint32_t tf_gen;
358 1.20.4.2 yamt ino_t tf_id;
359 1.20.4.2 yamt };
360 1.20.4.2 yamt
361 1.20.4.2 yamt /* --------------------------------------------------------------------- */
362 1.20.4.2 yamt
363 1.20.4.2 yamt /*
364 1.20.4.2 yamt * Prototypes for tmpfs_subr.c.
365 1.20.4.2 yamt */
366 1.20.4.2 yamt
367 1.20.4.2 yamt int tmpfs_alloc_node(struct tmpfs_mount *, enum vtype,
368 1.20.4.2 yamt uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *,
369 1.20.4.2 yamt char *, dev_t, struct proc *, struct tmpfs_node **);
370 1.20.4.2 yamt void tmpfs_free_node(struct tmpfs_mount *, struct tmpfs_node *);
371 1.20.4.2 yamt int tmpfs_alloc_dirent(struct tmpfs_mount *, struct tmpfs_node *,
372 1.20.4.2 yamt const char *, uint16_t, struct tmpfs_dirent **);
373 1.20.4.2 yamt void tmpfs_free_dirent(struct tmpfs_mount *, struct tmpfs_dirent *,
374 1.20.4.4 yamt bool);
375 1.20.4.2 yamt int tmpfs_alloc_vp(struct mount *, struct tmpfs_node *, struct vnode **);
376 1.20.4.2 yamt void tmpfs_free_vp(struct vnode *);
377 1.20.4.2 yamt int tmpfs_alloc_file(struct vnode *, struct vnode **, struct vattr *,
378 1.20.4.2 yamt struct componentname *, char *);
379 1.20.4.2 yamt void tmpfs_dir_attach(struct vnode *, struct tmpfs_dirent *);
380 1.20.4.2 yamt void tmpfs_dir_detach(struct vnode *, struct tmpfs_dirent *);
381 1.20.4.2 yamt struct tmpfs_dirent * tmpfs_dir_lookup(struct tmpfs_node *node,
382 1.20.4.2 yamt struct componentname *cnp);
383 1.20.4.2 yamt int tmpfs_dir_getdotdent(struct tmpfs_node *, struct uio *);
384 1.20.4.2 yamt int tmpfs_dir_getdotdotdent(struct tmpfs_node *, struct uio *);
385 1.20.4.2 yamt struct tmpfs_dirent * tmpfs_dir_lookupbycookie(struct tmpfs_node *, off_t);
386 1.20.4.2 yamt int tmpfs_dir_getdents(struct tmpfs_node *, struct uio *, off_t *);
387 1.20.4.2 yamt int tmpfs_reg_resize(struct vnode *, off_t);
388 1.20.4.4 yamt size_t tmpfs_mem_info(bool);
389 1.20.4.3 yamt int tmpfs_chflags(struct vnode *, int, kauth_cred_t, struct lwp *);
390 1.20.4.3 yamt int tmpfs_chmod(struct vnode *, mode_t, kauth_cred_t, struct lwp *);
391 1.20.4.3 yamt int tmpfs_chown(struct vnode *, uid_t, gid_t, kauth_cred_t, struct lwp *);
392 1.20.4.3 yamt int tmpfs_chsize(struct vnode *, u_quad_t, kauth_cred_t, struct lwp *);
393 1.20.4.2 yamt int tmpfs_chtimes(struct vnode *, struct timespec *, struct timespec *,
394 1.20.4.2 yamt int, kauth_cred_t, struct lwp *);
395 1.20.4.2 yamt void tmpfs_itimes(struct vnode *, const struct timespec *,
396 1.20.4.2 yamt const struct timespec *);
397 1.20.4.2 yamt
398 1.20.4.2 yamt void tmpfs_update(struct vnode *, const struct timespec *,
399 1.20.4.2 yamt const struct timespec *, int);
400 1.20.4.2 yamt int tmpfs_truncate(struct vnode *, off_t);
401 1.20.4.2 yamt
402 1.20.4.2 yamt /* --------------------------------------------------------------------- */
403 1.20.4.2 yamt
404 1.20.4.2 yamt /*
405 1.20.4.2 yamt * Convenience macros to simplify some logical expressions.
406 1.20.4.2 yamt */
407 1.20.4.2 yamt #define IMPLIES(a, b) (!(a) || (b))
408 1.20.4.2 yamt #define IFF(a, b) (IMPLIES(a, b) && IMPLIES(b, a))
409 1.20.4.2 yamt
410 1.20.4.2 yamt /* --------------------------------------------------------------------- */
411 1.20.4.2 yamt
412 1.20.4.2 yamt /*
413 1.20.4.2 yamt * Checks that the directory entry pointed by 'de' matches the name 'name'
414 1.20.4.2 yamt * with a length of 'len'.
415 1.20.4.2 yamt */
416 1.20.4.2 yamt #define TMPFS_DIRENT_MATCHES(de, name, len) \
417 1.20.4.2 yamt (de->td_namelen == (uint16_t)len && \
418 1.20.4.2 yamt memcmp((de)->td_name, (name), (de)->td_namelen) == 0)
419 1.20.4.2 yamt
420 1.20.4.2 yamt /* --------------------------------------------------------------------- */
421 1.20.4.2 yamt
422 1.20.4.2 yamt /*
423 1.20.4.2 yamt * Ensures that the node pointed by 'node' is a directory and that its
424 1.20.4.2 yamt * contents are consistent with respect to directories.
425 1.20.4.2 yamt */
426 1.20.4.2 yamt #define TMPFS_VALIDATE_DIR(node) \
427 1.20.4.2 yamt KASSERT((node)->tn_type == VDIR); \
428 1.20.4.2 yamt KASSERT((node)->tn_size % sizeof(struct tmpfs_dirent) == 0); \
429 1.20.4.2 yamt KASSERT((node)->tn_spec.tn_dir.tn_readdir_lastp == NULL || \
430 1.20.4.3 yamt tmpfs_dircookie((node)->tn_spec.tn_dir.tn_readdir_lastp) == \
431 1.20.4.2 yamt (node)->tn_spec.tn_dir.tn_readdir_lastn);
432 1.20.4.2 yamt
433 1.20.4.2 yamt /* --------------------------------------------------------------------- */
434 1.20.4.2 yamt
435 1.20.4.2 yamt /*
436 1.20.4.2 yamt * Memory management stuff.
437 1.20.4.2 yamt */
438 1.20.4.2 yamt
439 1.20.4.2 yamt /* Amount of memory pages to reserve for the system (e.g., to not use by
440 1.20.4.2 yamt * tmpfs).
441 1.20.4.2 yamt * XXX: Should this be tunable through sysctl, for instance? */
442 1.20.4.2 yamt #define TMPFS_PAGES_RESERVED (4 * 1024 * 1024 / PAGE_SIZE)
443 1.20.4.2 yamt
444 1.20.4.2 yamt /* Returns the maximum size allowed for a tmpfs file system. This macro
445 1.20.4.2 yamt * must be used instead of directly retrieving the value from tm_pages_max.
446 1.20.4.2 yamt * The reason is that the size of a tmpfs file system is dynamic: it lets
447 1.20.4.2 yamt * the user store files as long as there is enough free memory (including
448 1.20.4.2 yamt * physical memory and swap space). Therefore, the amount of memory to be
449 1.20.4.2 yamt * used is either the limit imposed by the user during mount time or the
450 1.20.4.2 yamt * amount of available memory, whichever is lower. To avoid consuming all
451 1.20.4.2 yamt * the memory for a given mount point, the system will always reserve a
452 1.20.4.2 yamt * minimum of TMPFS_PAGES_RESERVED pages, which is also taken into account
453 1.20.4.2 yamt * by this macro (see above). */
454 1.20.4.2 yamt static __inline size_t
455 1.20.4.2 yamt TMPFS_PAGES_MAX(struct tmpfs_mount *tmp)
456 1.20.4.2 yamt {
457 1.20.4.2 yamt size_t freepages;
458 1.20.4.2 yamt
459 1.20.4.4 yamt freepages = tmpfs_mem_info(false);
460 1.20.4.2 yamt if (freepages < TMPFS_PAGES_RESERVED)
461 1.20.4.2 yamt freepages = 0;
462 1.20.4.2 yamt else
463 1.20.4.2 yamt freepages -= TMPFS_PAGES_RESERVED;
464 1.20.4.2 yamt
465 1.20.4.2 yamt return MIN(tmp->tm_pages_max, freepages + tmp->tm_pages_used);
466 1.20.4.2 yamt }
467 1.20.4.2 yamt
468 1.20.4.2 yamt /* Returns the available space for the given file system. */
469 1.20.4.2 yamt #define TMPFS_PAGES_AVAIL(tmp) (TMPFS_PAGES_MAX(tmp) - (tmp)->tm_pages_used)
470 1.20.4.2 yamt
471 1.20.4.2 yamt /* --------------------------------------------------------------------- */
472 1.20.4.2 yamt
473 1.20.4.2 yamt /*
474 1.20.4.2 yamt * Macros/functions to convert from generic data structures to tmpfs
475 1.20.4.2 yamt * specific ones.
476 1.20.4.2 yamt */
477 1.20.4.2 yamt
478 1.20.4.2 yamt static __inline
479 1.20.4.2 yamt struct tmpfs_mount *
480 1.20.4.2 yamt VFS_TO_TMPFS(struct mount *mp)
481 1.20.4.2 yamt {
482 1.20.4.2 yamt struct tmpfs_mount *tmp;
483 1.20.4.2 yamt
484 1.20.4.2 yamt #ifdef KASSERT
485 1.20.4.2 yamt KASSERT((mp) != NULL && (mp)->mnt_data != NULL);
486 1.20.4.2 yamt #endif
487 1.20.4.2 yamt tmp = (struct tmpfs_mount *)(mp)->mnt_data;
488 1.20.4.2 yamt return tmp;
489 1.20.4.2 yamt }
490 1.20.4.2 yamt
491 1.20.4.2 yamt #endif /* defined(_KERNEL) */
492 1.20.4.2 yamt
493 1.20.4.2 yamt static __inline
494 1.20.4.2 yamt struct tmpfs_node *
495 1.20.4.2 yamt VP_TO_TMPFS_NODE(struct vnode *vp)
496 1.20.4.2 yamt {
497 1.20.4.2 yamt struct tmpfs_node *node;
498 1.20.4.2 yamt
499 1.20.4.2 yamt #ifdef KASSERT
500 1.20.4.2 yamt KASSERT((vp) != NULL && (vp)->v_data != NULL);
501 1.20.4.2 yamt #endif
502 1.20.4.2 yamt node = (struct tmpfs_node *)vp->v_data;
503 1.20.4.2 yamt return node;
504 1.20.4.2 yamt }
505 1.20.4.2 yamt
506 1.20.4.2 yamt #if defined(_KERNEL)
507 1.20.4.2 yamt
508 1.20.4.2 yamt static __inline
509 1.20.4.2 yamt struct tmpfs_node *
510 1.20.4.2 yamt VP_TO_TMPFS_DIR(struct vnode *vp)
511 1.20.4.2 yamt {
512 1.20.4.2 yamt struct tmpfs_node *node;
513 1.20.4.2 yamt
514 1.20.4.2 yamt node = VP_TO_TMPFS_NODE(vp);
515 1.20.4.2 yamt #ifdef KASSERT
516 1.20.4.2 yamt TMPFS_VALIDATE_DIR(node);
517 1.20.4.2 yamt #endif
518 1.20.4.2 yamt return node;
519 1.20.4.2 yamt }
520 1.20.4.2 yamt
521 1.20.4.2 yamt #endif /* defined(_KERNEL) */
522 1.20.4.2 yamt
523 1.20.4.2 yamt /* ---------------------------------------------------------------------
524 1.20.4.2 yamt * USER AND KERNEL DEFINITIONS
525 1.20.4.2 yamt * --------------------------------------------------------------------- */
526 1.20.4.2 yamt
527 1.20.4.2 yamt /*
528 1.20.4.2 yamt * This structure is used to communicate mount parameters between userland
529 1.20.4.2 yamt * and kernel space.
530 1.20.4.2 yamt */
531 1.20.4.2 yamt #define TMPFS_ARGS_VERSION 1
532 1.20.4.2 yamt struct tmpfs_args {
533 1.20.4.2 yamt int ta_version;
534 1.20.4.2 yamt
535 1.20.4.2 yamt /* Size counters. */
536 1.20.4.2 yamt ino_t ta_nodes_max;
537 1.20.4.2 yamt off_t ta_size_max;
538 1.20.4.2 yamt
539 1.20.4.2 yamt /* Root node attributes. */
540 1.20.4.2 yamt uid_t ta_root_uid;
541 1.20.4.2 yamt gid_t ta_root_gid;
542 1.20.4.2 yamt mode_t ta_root_mode;
543 1.20.4.2 yamt };
544 1.20.4.2 yamt #endif /* _FS_TMPFS_TMPFS_H_ */
545