ulfs_inode.c revision 1.9.4.2 1 1.9.4.2 yamt /* $NetBSD: ulfs_inode.c,v 1.9.4.2 2014/05/22 11:41:19 yamt Exp $ */
2 1.9.4.2 yamt /* from NetBSD: ufs_inode.c,v 1.89 2013/01/22 09:39:18 dholland Exp */
3 1.9.4.2 yamt
4 1.9.4.2 yamt /*
5 1.9.4.2 yamt * Copyright (c) 1991, 1993
6 1.9.4.2 yamt * The Regents of the University of California. All rights reserved.
7 1.9.4.2 yamt * (c) UNIX System Laboratories, Inc.
8 1.9.4.2 yamt * All or some portions of this file are derived from material licensed
9 1.9.4.2 yamt * to the University of California by American Telephone and Telegraph
10 1.9.4.2 yamt * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 1.9.4.2 yamt * the permission of UNIX System Laboratories, Inc.
12 1.9.4.2 yamt *
13 1.9.4.2 yamt * Redistribution and use in source and binary forms, with or without
14 1.9.4.2 yamt * modification, are permitted provided that the following conditions
15 1.9.4.2 yamt * are met:
16 1.9.4.2 yamt * 1. Redistributions of source code must retain the above copyright
17 1.9.4.2 yamt * notice, this list of conditions and the following disclaimer.
18 1.9.4.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
19 1.9.4.2 yamt * notice, this list of conditions and the following disclaimer in the
20 1.9.4.2 yamt * documentation and/or other materials provided with the distribution.
21 1.9.4.2 yamt * 3. Neither the name of the University nor the names of its contributors
22 1.9.4.2 yamt * may be used to endorse or promote products derived from this software
23 1.9.4.2 yamt * without specific prior written permission.
24 1.9.4.2 yamt *
25 1.9.4.2 yamt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 1.9.4.2 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 1.9.4.2 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 1.9.4.2 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 1.9.4.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 1.9.4.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 1.9.4.2 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.9.4.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.9.4.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.9.4.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.9.4.2 yamt * SUCH DAMAGE.
36 1.9.4.2 yamt *
37 1.9.4.2 yamt * @(#)ufs_inode.c 8.9 (Berkeley) 5/14/95
38 1.9.4.2 yamt */
39 1.9.4.2 yamt
40 1.9.4.2 yamt #include <sys/cdefs.h>
41 1.9.4.2 yamt __KERNEL_RCSID(0, "$NetBSD: ulfs_inode.c,v 1.9.4.2 2014/05/22 11:41:19 yamt Exp $");
42 1.9.4.2 yamt
43 1.9.4.2 yamt #if defined(_KERNEL_OPT)
44 1.9.4.2 yamt #include "opt_lfs.h"
45 1.9.4.2 yamt #include "opt_quota.h"
46 1.9.4.2 yamt #include "opt_wapbl.h"
47 1.9.4.2 yamt #endif
48 1.9.4.2 yamt
49 1.9.4.2 yamt #include <sys/param.h>
50 1.9.4.2 yamt #include <sys/systm.h>
51 1.9.4.2 yamt #include <sys/proc.h>
52 1.9.4.2 yamt #include <sys/vnode.h>
53 1.9.4.2 yamt #include <sys/mount.h>
54 1.9.4.2 yamt #include <sys/kernel.h>
55 1.9.4.2 yamt #include <sys/namei.h>
56 1.9.4.2 yamt #include <sys/kauth.h>
57 1.9.4.2 yamt #include <sys/wapbl.h>
58 1.9.4.2 yamt #include <sys/fstrans.h>
59 1.9.4.2 yamt #include <sys/kmem.h>
60 1.9.4.2 yamt
61 1.9.4.2 yamt #include <ufs/lfs/lfs_extern.h>
62 1.9.4.2 yamt
63 1.9.4.2 yamt #include <ufs/lfs/ulfs_inode.h>
64 1.9.4.2 yamt #include <ufs/lfs/ulfsmount.h>
65 1.9.4.2 yamt #include <ufs/lfs/ulfs_extern.h>
66 1.9.4.2 yamt #ifdef LFS_DIRHASH
67 1.9.4.2 yamt #include <ufs/lfs/ulfs_dirhash.h>
68 1.9.4.2 yamt #endif
69 1.9.4.2 yamt #ifdef LFS_EXTATTR
70 1.9.4.2 yamt #include <ufs/lfs/ulfs_extattr.h>
71 1.9.4.2 yamt #endif
72 1.9.4.2 yamt
73 1.9.4.2 yamt #include <uvm/uvm.h>
74 1.9.4.2 yamt
75 1.9.4.2 yamt extern int prtactive;
76 1.9.4.2 yamt
77 1.9.4.2 yamt /*
78 1.9.4.2 yamt * Last reference to an inode. If necessary, write or delete it.
79 1.9.4.2 yamt */
80 1.9.4.2 yamt int
81 1.9.4.2 yamt ulfs_inactive(void *v)
82 1.9.4.2 yamt {
83 1.9.4.2 yamt struct vop_inactive_args /* {
84 1.9.4.2 yamt struct vnode *a_vp;
85 1.9.4.2 yamt struct bool *a_recycle;
86 1.9.4.2 yamt } */ *ap = v;
87 1.9.4.2 yamt struct vnode *vp = ap->a_vp;
88 1.9.4.2 yamt struct inode *ip = VTOI(vp);
89 1.9.4.2 yamt struct mount *transmp;
90 1.9.4.2 yamt mode_t mode;
91 1.9.4.2 yamt int error = 0;
92 1.9.4.2 yamt
93 1.9.4.2 yamt transmp = vp->v_mount;
94 1.9.4.2 yamt fstrans_start(transmp, FSTRANS_LAZY);
95 1.9.4.2 yamt /*
96 1.9.4.2 yamt * Ignore inodes related to stale file handles.
97 1.9.4.2 yamt */
98 1.9.4.2 yamt if (ip->i_mode == 0)
99 1.9.4.2 yamt goto out;
100 1.9.4.2 yamt if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
101 1.9.4.2 yamt #ifdef LFS_EXTATTR
102 1.9.4.2 yamt ulfs_extattr_vnode_inactive(vp, curlwp);
103 1.9.4.2 yamt #endif
104 1.9.4.2 yamt if (ip->i_size != 0) {
105 1.9.4.2 yamt error = lfs_truncate(vp, (off_t)0, 0, NOCRED);
106 1.9.4.2 yamt }
107 1.9.4.2 yamt #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
108 1.9.4.2 yamt (void)lfs_chkiq(ip, -1, NOCRED, 0);
109 1.9.4.2 yamt #endif
110 1.9.4.2 yamt DIP_ASSIGN(ip, rdev, 0);
111 1.9.4.2 yamt mode = ip->i_mode;
112 1.9.4.2 yamt ip->i_mode = 0;
113 1.9.4.2 yamt ip->i_omode = mode;
114 1.9.4.2 yamt DIP_ASSIGN(ip, mode, 0);
115 1.9.4.2 yamt ip->i_flag |= IN_CHANGE | IN_UPDATE;
116 1.9.4.2 yamt /*
117 1.9.4.2 yamt * Defer final inode free and update to ulfs_reclaim().
118 1.9.4.2 yamt */
119 1.9.4.2 yamt }
120 1.9.4.2 yamt
121 1.9.4.2 yamt if (ip->i_flag & (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) {
122 1.9.4.2 yamt lfs_update(vp, NULL, NULL, 0);
123 1.9.4.2 yamt }
124 1.9.4.2 yamt
125 1.9.4.2 yamt out:
126 1.9.4.2 yamt /*
127 1.9.4.2 yamt * If we are done with the inode, reclaim it
128 1.9.4.2 yamt * so that it can be reused immediately.
129 1.9.4.2 yamt */
130 1.9.4.2 yamt *ap->a_recycle = (ip->i_mode == 0);
131 1.9.4.2 yamt VOP_UNLOCK(vp);
132 1.9.4.2 yamt fstrans_done(transmp);
133 1.9.4.2 yamt return (error);
134 1.9.4.2 yamt }
135 1.9.4.2 yamt
136 1.9.4.2 yamt /*
137 1.9.4.2 yamt * Reclaim an inode so that it can be used for other purposes.
138 1.9.4.2 yamt */
139 1.9.4.2 yamt int
140 1.9.4.2 yamt ulfs_reclaim(struct vnode *vp)
141 1.9.4.2 yamt {
142 1.9.4.2 yamt struct inode *ip = VTOI(vp);
143 1.9.4.2 yamt
144 1.9.4.2 yamt if (prtactive && vp->v_usecount > 1)
145 1.9.4.2 yamt vprint("ulfs_reclaim: pushing active", vp);
146 1.9.4.2 yamt
147 1.9.4.2 yamt /* XXX: do we really need two of these? */
148 1.9.4.2 yamt /* note: originally the first was inside a wapbl txn */
149 1.9.4.2 yamt lfs_update(vp, NULL, NULL, UPDATE_CLOSE);
150 1.9.4.2 yamt lfs_update(vp, NULL, NULL, UPDATE_CLOSE);
151 1.9.4.2 yamt
152 1.9.4.2 yamt /*
153 1.9.4.2 yamt * Remove the inode from its hash chain.
154 1.9.4.2 yamt */
155 1.9.4.2 yamt ulfs_ihashrem(ip);
156 1.9.4.2 yamt
157 1.9.4.2 yamt if (ip->i_devvp) {
158 1.9.4.2 yamt vrele(ip->i_devvp);
159 1.9.4.2 yamt ip->i_devvp = 0;
160 1.9.4.2 yamt }
161 1.9.4.2 yamt #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
162 1.9.4.2 yamt ulfsquota_free(ip);
163 1.9.4.2 yamt #endif
164 1.9.4.2 yamt #ifdef LFS_DIRHASH
165 1.9.4.2 yamt if (ip->i_dirhash != NULL)
166 1.9.4.2 yamt ulfsdirhash_free(ip);
167 1.9.4.2 yamt #endif
168 1.9.4.2 yamt return (0);
169 1.9.4.2 yamt }
170 1.9.4.2 yamt
171 1.9.4.2 yamt /*
172 1.9.4.2 yamt * allocate a range of blocks in a file.
173 1.9.4.2 yamt * after this function returns, any page entirely contained within the range
174 1.9.4.2 yamt * will map to invalid data and thus must be overwritten before it is made
175 1.9.4.2 yamt * accessible to others.
176 1.9.4.2 yamt */
177 1.9.4.2 yamt
178 1.9.4.2 yamt int
179 1.9.4.2 yamt ulfs_balloc_range(struct vnode *vp, off_t off, off_t len, kauth_cred_t cred,
180 1.9.4.2 yamt int flags)
181 1.9.4.2 yamt {
182 1.9.4.2 yamt off_t neweof; /* file size after the operation */
183 1.9.4.2 yamt off_t neweob; /* offset next to the last block after the operation */
184 1.9.4.2 yamt off_t pagestart; /* starting offset of range covered by pgs */
185 1.9.4.2 yamt off_t eob; /* offset next to allocated blocks */
186 1.9.4.2 yamt struct uvm_object *uobj;
187 1.9.4.2 yamt int i, delta, error, npages;
188 1.9.4.2 yamt int bshift = vp->v_mount->mnt_fs_bshift;
189 1.9.4.2 yamt int bsize = 1 << bshift;
190 1.9.4.2 yamt int ppb = MAX(bsize >> PAGE_SHIFT, 1);
191 1.9.4.2 yamt struct vm_page **pgs;
192 1.9.4.2 yamt size_t pgssize;
193 1.9.4.2 yamt UVMHIST_FUNC("ulfs_balloc_range"); UVMHIST_CALLED(ubchist);
194 1.9.4.2 yamt UVMHIST_LOG(ubchist, "vp %p off 0x%x len 0x%x u_size 0x%x",
195 1.9.4.2 yamt vp, off, len, vp->v_size);
196 1.9.4.2 yamt
197 1.9.4.2 yamt neweof = MAX(vp->v_size, off + len);
198 1.9.4.2 yamt GOP_SIZE(vp, neweof, &neweob, 0);
199 1.9.4.2 yamt
200 1.9.4.2 yamt error = 0;
201 1.9.4.2 yamt uobj = &vp->v_uobj;
202 1.9.4.2 yamt
203 1.9.4.2 yamt /*
204 1.9.4.2 yamt * read or create pages covering the range of the allocation and
205 1.9.4.2 yamt * keep them locked until the new block is allocated, so there
206 1.9.4.2 yamt * will be no window where the old contents of the new block are
207 1.9.4.2 yamt * visible to racing threads.
208 1.9.4.2 yamt */
209 1.9.4.2 yamt
210 1.9.4.2 yamt pagestart = trunc_page(off) & ~(bsize - 1);
211 1.9.4.2 yamt npages = MIN(ppb, (round_page(neweob) - pagestart) >> PAGE_SHIFT);
212 1.9.4.2 yamt pgssize = npages * sizeof(struct vm_page *);
213 1.9.4.2 yamt pgs = kmem_zalloc(pgssize, KM_SLEEP);
214 1.9.4.2 yamt
215 1.9.4.2 yamt /*
216 1.9.4.2 yamt * adjust off to be block-aligned.
217 1.9.4.2 yamt */
218 1.9.4.2 yamt
219 1.9.4.2 yamt delta = off & (bsize - 1);
220 1.9.4.2 yamt off -= delta;
221 1.9.4.2 yamt len += delta;
222 1.9.4.2 yamt
223 1.9.4.2 yamt genfs_node_wrlock(vp);
224 1.9.4.2 yamt mutex_enter(uobj->vmobjlock);
225 1.9.4.2 yamt error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0,
226 1.9.4.2 yamt VM_PROT_WRITE, 0, PGO_SYNCIO | PGO_PASTEOF | PGO_NOBLOCKALLOC |
227 1.9.4.2 yamt PGO_NOTIMESTAMP | PGO_GLOCKHELD);
228 1.9.4.2 yamt if (error) {
229 1.9.4.2 yamt goto out;
230 1.9.4.2 yamt }
231 1.9.4.2 yamt
232 1.9.4.2 yamt /*
233 1.9.4.2 yamt * now allocate the range.
234 1.9.4.2 yamt */
235 1.9.4.2 yamt
236 1.9.4.2 yamt error = GOP_ALLOC(vp, off, len, flags, cred);
237 1.9.4.2 yamt genfs_node_unlock(vp);
238 1.9.4.2 yamt
239 1.9.4.2 yamt /*
240 1.9.4.2 yamt * if the allocation succeeded, clear PG_CLEAN on all the pages
241 1.9.4.2 yamt * and clear PG_RDONLY on any pages that are now fully backed
242 1.9.4.2 yamt * by disk blocks. if the allocation failed, we do not invalidate
243 1.9.4.2 yamt * the pages since they might have already existed and been dirty,
244 1.9.4.2 yamt * in which case we need to keep them around. if we created the pages,
245 1.9.4.2 yamt * they will be clean and read-only, and leaving such pages
246 1.9.4.2 yamt * in the cache won't cause any problems.
247 1.9.4.2 yamt */
248 1.9.4.2 yamt
249 1.9.4.2 yamt GOP_SIZE(vp, off + len, &eob, 0);
250 1.9.4.2 yamt mutex_enter(uobj->vmobjlock);
251 1.9.4.2 yamt mutex_enter(&uvm_pageqlock);
252 1.9.4.2 yamt for (i = 0; i < npages; i++) {
253 1.9.4.2 yamt KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
254 1.9.4.2 yamt if (!error) {
255 1.9.4.2 yamt if (off <= pagestart + (i << PAGE_SHIFT) &&
256 1.9.4.2 yamt pagestart + ((i + 1) << PAGE_SHIFT) <= eob) {
257 1.9.4.2 yamt pgs[i]->flags &= ~PG_RDONLY;
258 1.9.4.2 yamt }
259 1.9.4.2 yamt pgs[i]->flags &= ~PG_CLEAN;
260 1.9.4.2 yamt }
261 1.9.4.2 yamt uvm_pageactivate(pgs[i]);
262 1.9.4.2 yamt }
263 1.9.4.2 yamt mutex_exit(&uvm_pageqlock);
264 1.9.4.2 yamt uvm_page_unbusy(pgs, npages);
265 1.9.4.2 yamt mutex_exit(uobj->vmobjlock);
266 1.9.4.2 yamt
267 1.9.4.2 yamt out:
268 1.9.4.2 yamt kmem_free(pgs, pgssize);
269 1.9.4.2 yamt return error;
270 1.9.4.2 yamt }
271