1 1.26 riastrad /* $NetBSD: ulfs_inode.c,v 1.26 2020/09/05 16:30:13 riastradh Exp $ */ 2 1.15 dholland /* from NetBSD: ufs_inode.c,v 1.95 2015/06/13 14:56:45 hannken Exp */ 3 1.1 dholland 4 1.1 dholland /* 5 1.1 dholland * Copyright (c) 1991, 1993 6 1.1 dholland * The Regents of the University of California. All rights reserved. 7 1.1 dholland * (c) UNIX System Laboratories, Inc. 8 1.1 dholland * All or some portions of this file are derived from material licensed 9 1.1 dholland * to the University of California by American Telephone and Telegraph 10 1.1 dholland * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 1.1 dholland * the permission of UNIX System Laboratories, Inc. 12 1.1 dholland * 13 1.1 dholland * Redistribution and use in source and binary forms, with or without 14 1.1 dholland * modification, are permitted provided that the following conditions 15 1.1 dholland * are met: 16 1.1 dholland * 1. Redistributions of source code must retain the above copyright 17 1.1 dholland * notice, this list of conditions and the following disclaimer. 18 1.1 dholland * 2. Redistributions in binary form must reproduce the above copyright 19 1.1 dholland * notice, this list of conditions and the following disclaimer in the 20 1.1 dholland * documentation and/or other materials provided with the distribution. 21 1.1 dholland * 3. Neither the name of the University nor the names of its contributors 22 1.1 dholland * may be used to endorse or promote products derived from this software 23 1.1 dholland * without specific prior written permission. 24 1.1 dholland * 25 1.1 dholland * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 1.1 dholland * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 1.1 dholland * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 1.1 dholland * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 1.1 dholland * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 1.1 dholland * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 1.1 dholland * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 1.1 dholland * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 1.1 dholland * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 1.1 dholland * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 1.1 dholland * SUCH DAMAGE. 36 1.1 dholland * 37 1.1 dholland * @(#)ufs_inode.c 8.9 (Berkeley) 5/14/95 38 1.1 dholland */ 39 1.1 dholland 40 1.1 dholland #include <sys/cdefs.h> 41 1.26 riastrad __KERNEL_RCSID(0, "$NetBSD: ulfs_inode.c,v 1.26 2020/09/05 16:30:13 riastradh Exp $"); 42 1.1 dholland 43 1.1 dholland #if defined(_KERNEL_OPT) 44 1.3 dholland #include "opt_lfs.h" 45 1.1 dholland #include "opt_quota.h" 46 1.26 riastrad #include "opt_uvmhist.h" 47 1.1 dholland #endif 48 1.1 dholland 49 1.1 dholland #include <sys/param.h> 50 1.1 dholland #include <sys/systm.h> 51 1.1 dholland #include <sys/proc.h> 52 1.1 dholland #include <sys/vnode.h> 53 1.1 dholland #include <sys/mount.h> 54 1.1 dholland #include <sys/kernel.h> 55 1.1 dholland #include <sys/namei.h> 56 1.1 dholland #include <sys/kauth.h> 57 1.1 dholland #include <sys/kmem.h> 58 1.1 dholland 59 1.11 dholland #include <ufs/lfs/lfs.h> 60 1.11 dholland #include <ufs/lfs/lfs_accessors.h> 61 1.8 dholland #include <ufs/lfs/lfs_extern.h> 62 1.8 dholland 63 1.2 dholland #include <ufs/lfs/ulfs_inode.h> 64 1.2 dholland #include <ufs/lfs/ulfsmount.h> 65 1.2 dholland #include <ufs/lfs/ulfs_extern.h> 66 1.3 dholland #ifdef LFS_DIRHASH 67 1.2 dholland #include <ufs/lfs/ulfs_dirhash.h> 68 1.1 dholland #endif 69 1.3 dholland #ifdef LFS_EXTATTR 70 1.2 dholland #include <ufs/lfs/ulfs_extattr.h> 71 1.1 dholland #endif 72 1.1 dholland 73 1.26 riastrad #ifdef UVMHIST 74 1.1 dholland #include <uvm/uvm.h> 75 1.26 riastrad #endif 76 1.26 riastrad #include <uvm/uvm_page.h> 77 1.26 riastrad #include <uvm/uvm_stat.h> 78 1.1 dholland 79 1.1 dholland /* 80 1.1 dholland * Last reference to an inode. If necessary, write or delete it. 81 1.1 dholland */ 82 1.1 dholland int 83 1.4 dholland ulfs_inactive(void *v) 84 1.1 dholland { 85 1.18 riastrad struct vop_inactive_v2_args /* { 86 1.1 dholland struct vnode *a_vp; 87 1.1 dholland struct bool *a_recycle; 88 1.1 dholland } */ *ap = v; 89 1.1 dholland struct vnode *vp = ap->a_vp; 90 1.1 dholland struct inode *ip = VTOI(vp); 91 1.1 dholland mode_t mode; 92 1.1 dholland int error = 0; 93 1.1 dholland 94 1.1 dholland /* 95 1.1 dholland * Ignore inodes related to stale file handles. 96 1.1 dholland */ 97 1.1 dholland if (ip->i_mode == 0) 98 1.1 dholland goto out; 99 1.1 dholland if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 100 1.3 dholland #ifdef LFS_EXTATTR 101 1.4 dholland ulfs_extattr_vnode_inactive(vp, curlwp); 102 1.1 dholland #endif 103 1.1 dholland if (ip->i_size != 0) { 104 1.9 dholland error = lfs_truncate(vp, (off_t)0, 0, NOCRED); 105 1.1 dholland } 106 1.3 dholland #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) 107 1.5 dholland (void)lfs_chkiq(ip, -1, NOCRED, 0); 108 1.1 dholland #endif 109 1.1 dholland DIP_ASSIGN(ip, rdev, 0); 110 1.1 dholland mode = ip->i_mode; 111 1.1 dholland ip->i_mode = 0; 112 1.1 dholland ip->i_omode = mode; 113 1.1 dholland DIP_ASSIGN(ip, mode, 0); 114 1.20 maya ip->i_state |= IN_CHANGE | IN_UPDATE; 115 1.1 dholland /* 116 1.4 dholland * Defer final inode free and update to ulfs_reclaim(). 117 1.1 dholland */ 118 1.1 dholland } 119 1.1 dholland 120 1.20 maya if (ip->i_state & (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) { 121 1.9 dholland lfs_update(vp, NULL, NULL, 0); 122 1.1 dholland } 123 1.7 dholland 124 1.1 dholland out: 125 1.1 dholland /* 126 1.1 dholland * If we are done with the inode, reclaim it 127 1.1 dholland * so that it can be reused immediately. 128 1.1 dholland */ 129 1.1 dholland *ap->a_recycle = (ip->i_mode == 0); 130 1.18 riastrad 131 1.1 dholland return (error); 132 1.1 dholland } 133 1.1 dholland 134 1.1 dholland /* 135 1.1 dholland * Reclaim an inode so that it can be used for other purposes. 136 1.1 dholland */ 137 1.1 dholland int 138 1.4 dholland ulfs_reclaim(struct vnode *vp) 139 1.1 dholland { 140 1.1 dholland struct inode *ip = VTOI(vp); 141 1.1 dholland 142 1.7 dholland /* XXX: do we really need two of these? */ 143 1.7 dholland /* note: originally the first was inside a wapbl txn */ 144 1.9 dholland lfs_update(vp, NULL, NULL, UPDATE_CLOSE); 145 1.9 dholland lfs_update(vp, NULL, NULL, UPDATE_CLOSE); 146 1.1 dholland 147 1.1 dholland if (ip->i_devvp) { 148 1.1 dholland vrele(ip->i_devvp); 149 1.1 dholland ip->i_devvp = 0; 150 1.1 dholland } 151 1.3 dholland #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) 152 1.4 dholland ulfsquota_free(ip); 153 1.1 dholland #endif 154 1.3 dholland #ifdef LFS_DIRHASH 155 1.1 dholland if (ip->i_dirhash != NULL) 156 1.4 dholland ulfsdirhash_free(ip); 157 1.1 dholland #endif 158 1.1 dholland return (0); 159 1.1 dholland } 160 1.1 dholland 161 1.1 dholland /* 162 1.1 dholland * allocate a range of blocks in a file. 163 1.1 dholland * after this function returns, any page entirely contained within the range 164 1.1 dholland * will map to invalid data and thus must be overwritten before it is made 165 1.1 dholland * accessible to others. 166 1.1 dholland */ 167 1.1 dholland 168 1.1 dholland int 169 1.4 dholland ulfs_balloc_range(struct vnode *vp, off_t off, off_t len, kauth_cred_t cred, 170 1.1 dholland int flags) 171 1.1 dholland { 172 1.1 dholland off_t neweof; /* file size after the operation */ 173 1.1 dholland off_t neweob; /* offset next to the last block after the operation */ 174 1.1 dholland off_t pagestart; /* starting offset of range covered by pgs */ 175 1.1 dholland off_t eob; /* offset next to allocated blocks */ 176 1.1 dholland struct uvm_object *uobj; 177 1.1 dholland int i, delta, error, npages; 178 1.1 dholland int bshift = vp->v_mount->mnt_fs_bshift; 179 1.1 dholland int bsize = 1 << bshift; 180 1.1 dholland int ppb = MAX(bsize >> PAGE_SHIFT, 1); 181 1.1 dholland struct vm_page **pgs; 182 1.1 dholland size_t pgssize; 183 1.4 dholland UVMHIST_FUNC("ulfs_balloc_range"); UVMHIST_CALLED(ubchist); 184 1.21 pgoyette UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx len 0x%jx u_size 0x%jx", 185 1.21 pgoyette (uintptr_t)vp, off, len, vp->v_size); 186 1.1 dholland 187 1.1 dholland neweof = MAX(vp->v_size, off + len); 188 1.1 dholland GOP_SIZE(vp, neweof, &neweob, 0); 189 1.1 dholland 190 1.1 dholland error = 0; 191 1.1 dholland uobj = &vp->v_uobj; 192 1.1 dholland 193 1.1 dholland /* 194 1.1 dholland * read or create pages covering the range of the allocation and 195 1.1 dholland * keep them locked until the new block is allocated, so there 196 1.1 dholland * will be no window where the old contents of the new block are 197 1.1 dholland * visible to racing threads. 198 1.1 dholland */ 199 1.1 dholland 200 1.1 dholland pagestart = trunc_page(off) & ~(bsize - 1); 201 1.1 dholland npages = MIN(ppb, (round_page(neweob) - pagestart) >> PAGE_SHIFT); 202 1.1 dholland pgssize = npages * sizeof(struct vm_page *); 203 1.1 dholland pgs = kmem_zalloc(pgssize, KM_SLEEP); 204 1.1 dholland 205 1.1 dholland /* 206 1.1 dholland * adjust off to be block-aligned. 207 1.1 dholland */ 208 1.1 dholland 209 1.1 dholland delta = off & (bsize - 1); 210 1.1 dholland off -= delta; 211 1.1 dholland len += delta; 212 1.1 dholland 213 1.1 dholland genfs_node_wrlock(vp); 214 1.25 ad rw_enter(uobj->vmobjlock, RW_WRITER); 215 1.1 dholland error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0, 216 1.1 dholland VM_PROT_WRITE, 0, PGO_SYNCIO | PGO_PASTEOF | PGO_NOBLOCKALLOC | 217 1.1 dholland PGO_NOTIMESTAMP | PGO_GLOCKHELD); 218 1.1 dholland if (error) { 219 1.14 dholland genfs_node_unlock(vp); 220 1.1 dholland goto out; 221 1.1 dholland } 222 1.1 dholland 223 1.1 dholland /* 224 1.1 dholland * now allocate the range. 225 1.1 dholland */ 226 1.1 dholland 227 1.1 dholland error = GOP_ALLOC(vp, off, len, flags, cred); 228 1.1 dholland genfs_node_unlock(vp); 229 1.1 dholland 230 1.1 dholland /* 231 1.24 ad * if the allocation succeeded, mark all pages dirty and clear 232 1.24 ad * PG_RDONLY on any pages that are now fully backed by disk blocks. 233 1.24 ad * if the allocation failed, we do not invalidate the pages since 234 1.24 ad * they might have already existed and been dirty, in which case we 235 1.24 ad * need to keep them around. if we created the pages, they will be 236 1.24 ad * clean and read-only, and leaving such pages in the cache won't 237 1.24 ad * cause any problems. 238 1.1 dholland */ 239 1.1 dholland 240 1.1 dholland GOP_SIZE(vp, off + len, &eob, 0); 241 1.25 ad rw_enter(uobj->vmobjlock, RW_WRITER); 242 1.1 dholland for (i = 0; i < npages; i++) { 243 1.1 dholland KASSERT((pgs[i]->flags & PG_RELEASED) == 0); 244 1.1 dholland if (!error) { 245 1.1 dholland if (off <= pagestart + (i << PAGE_SHIFT) && 246 1.1 dholland pagestart + ((i + 1) << PAGE_SHIFT) <= eob) { 247 1.1 dholland pgs[i]->flags &= ~PG_RDONLY; 248 1.1 dholland } 249 1.24 ad uvm_pagemarkdirty(pgs[i], UVM_PAGE_STATUS_DIRTY); 250 1.1 dholland } 251 1.23 ad uvm_pagelock(pgs[i]); 252 1.1 dholland uvm_pageactivate(pgs[i]); 253 1.23 ad uvm_pageunlock(pgs[i]); 254 1.1 dholland } 255 1.1 dholland uvm_page_unbusy(pgs, npages); 256 1.25 ad rw_exit(uobj->vmobjlock); 257 1.1 dholland 258 1.1 dholland out: 259 1.1 dholland kmem_free(pgs, pgssize); 260 1.1 dholland return error; 261 1.1 dholland } 262