1 1.28 thorpej /* $NetBSD: ulfs_readwrite.c,v 1.28 2021/10/20 03:08:19 thorpej Exp $ */ 2 1.22 dholland /* from NetBSD: ufs_readwrite.c,v 1.120 2015/04/12 22:48:38 riastradh Exp */ 3 1.1 dholland 4 1.1 dholland /*- 5 1.1 dholland * Copyright (c) 1993 6 1.1 dholland * The Regents of the University of California. All rights reserved. 7 1.1 dholland * 8 1.1 dholland * Redistribution and use in source and binary forms, with or without 9 1.1 dholland * modification, are permitted provided that the following conditions 10 1.1 dholland * are met: 11 1.1 dholland * 1. Redistributions of source code must retain the above copyright 12 1.1 dholland * notice, this list of conditions and the following disclaimer. 13 1.1 dholland * 2. Redistributions in binary form must reproduce the above copyright 14 1.1 dholland * notice, this list of conditions and the following disclaimer in the 15 1.1 dholland * documentation and/or other materials provided with the distribution. 16 1.1 dholland * 3. Neither the name of the University nor the names of its contributors 17 1.1 dholland * may be used to endorse or promote products derived from this software 18 1.1 dholland * without specific prior written permission. 19 1.1 dholland * 20 1.1 dholland * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 1.1 dholland * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 1.1 dholland * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 1.1 dholland * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 1.1 dholland * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 1.1 dholland * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 1.1 dholland * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 1.1 dholland * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 1.1 dholland * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 1.1 dholland * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 1.1 dholland * SUCH DAMAGE. 31 1.1 dholland * 32 1.1 dholland * @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95 33 1.1 dholland */ 34 1.1 dholland 35 1.1 dholland #include <sys/cdefs.h> 36 1.28 thorpej __KERNEL_RCSID(1, "$NetBSD: ulfs_readwrite.c,v 1.28 2021/10/20 03:08:19 thorpej Exp $"); 37 1.1 dholland 38 1.1 dholland #define FS struct lfs 39 1.1 dholland #define I_FS i_lfs 40 1.1 dholland #define READ lfs_read 41 1.1 dholland #define READ_S "lfs_read" 42 1.1 dholland #define WRITE lfs_write 43 1.1 dholland #define WRITE_S "lfs_write" 44 1.8 riastrad #define BUFRD lfs_bufrd 45 1.8 riastrad #define BUFWR lfs_bufwr 46 1.18 dholland #define fs_sb_getbsize(fs) lfs_sb_getbsize(fs) 47 1.1 dholland #define fs_bmask lfs_bmask 48 1.1 dholland 49 1.11 riastrad static int ulfs_post_read_update(struct vnode *, int, int); 50 1.11 riastrad static int ulfs_post_write_update(struct vnode *, struct uio *, int, 51 1.28 thorpej kauth_cred_t, off_t, int, int); 52 1.11 riastrad 53 1.1 dholland /* 54 1.1 dholland * Vnode op for reading. 55 1.1 dholland */ 56 1.1 dholland /* ARGSUSED */ 57 1.1 dholland int 58 1.1 dholland READ(void *v) 59 1.1 dholland { 60 1.1 dholland struct vop_read_args /* { 61 1.1 dholland struct vnode *a_vp; 62 1.1 dholland struct uio *a_uio; 63 1.1 dholland int a_ioflag; 64 1.1 dholland kauth_cred_t a_cred; 65 1.1 dholland } */ *ap = v; 66 1.1 dholland struct vnode *vp; 67 1.1 dholland struct inode *ip; 68 1.1 dholland struct uio *uio; 69 1.1 dholland FS *fs; 70 1.1 dholland vsize_t bytelen; 71 1.8 riastrad int error, ioflag, advice; 72 1.1 dholland 73 1.1 dholland vp = ap->a_vp; 74 1.1 dholland ip = VTOI(vp); 75 1.6 dholland fs = ip->I_FS; 76 1.1 dholland uio = ap->a_uio; 77 1.1 dholland ioflag = ap->a_ioflag; 78 1.1 dholland error = 0; 79 1.1 dholland 80 1.10 riastrad KASSERT(uio->uio_rw == UIO_READ); 81 1.10 riastrad KASSERT(vp->v_type == VREG || vp->v_type == VDIR); 82 1.1 dholland 83 1.8 riastrad /* XXX Eliminate me by refusing directory reads from userland. */ 84 1.8 riastrad if (vp->v_type == VDIR) 85 1.8 riastrad return BUFRD(vp, uio, ioflag, ap->a_cred); 86 1.8 riastrad /* XXX Eliminate me by using ufs_bufio in lfs. */ 87 1.8 riastrad if (vp->v_type == VREG && ip->i_number == LFS_IFILE_INUM) 88 1.8 riastrad return BUFRD(vp, uio, ioflag, ap->a_cred); 89 1.6 dholland if ((u_int64_t)uio->uio_offset > fs->um_maxfilesize) 90 1.1 dholland return (EFBIG); 91 1.1 dholland if (uio->uio_resid == 0) 92 1.1 dholland return (0); 93 1.1 dholland 94 1.1 dholland 95 1.1 dholland if (uio->uio_offset >= ip->i_size) 96 1.1 dholland goto out; 97 1.1 dholland 98 1.8 riastrad KASSERT(vp->v_type == VREG); 99 1.8 riastrad advice = IO_ADV_DECODE(ap->a_ioflag); 100 1.8 riastrad while (uio->uio_resid > 0) { 101 1.8 riastrad if (ioflag & IO_DIRECT) { 102 1.8 riastrad genfs_directio(vp, uio, ioflag); 103 1.8 riastrad } 104 1.8 riastrad bytelen = MIN(ip->i_size - uio->uio_offset, uio->uio_resid); 105 1.8 riastrad if (bytelen == 0) 106 1.8 riastrad break; 107 1.8 riastrad error = ubc_uiomove(&vp->v_uobj, uio, bytelen, advice, 108 1.27 ad UBC_READ | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp)); 109 1.8 riastrad if (error) 110 1.8 riastrad break; 111 1.8 riastrad } 112 1.1 dholland 113 1.8 riastrad out: 114 1.11 riastrad error = ulfs_post_read_update(vp, ap->a_ioflag, error); 115 1.8 riastrad return (error); 116 1.8 riastrad } 117 1.8 riastrad 118 1.8 riastrad /* 119 1.8 riastrad * UFS op for reading via the buffer cache 120 1.8 riastrad */ 121 1.8 riastrad int 122 1.8 riastrad BUFRD(struct vnode *vp, struct uio *uio, int ioflag, kauth_cred_t cred) 123 1.8 riastrad { 124 1.8 riastrad struct inode *ip; 125 1.8 riastrad FS *fs; 126 1.8 riastrad struct buf *bp; 127 1.8 riastrad daddr_t lbn, nextlbn; 128 1.8 riastrad off_t bytesinfile; 129 1.8 riastrad long size, xfersize, blkoffset; 130 1.8 riastrad int error; 131 1.8 riastrad 132 1.8 riastrad KASSERT(VOP_ISLOCKED(vp)); 133 1.8 riastrad KASSERT(vp->v_type == VDIR || vp->v_type == VLNK || 134 1.8 riastrad vp->v_type == VREG); 135 1.8 riastrad KASSERT(uio->uio_rw == UIO_READ); 136 1.8 riastrad 137 1.8 riastrad ip = VTOI(vp); 138 1.8 riastrad fs = ip->I_FS; 139 1.8 riastrad error = 0; 140 1.8 riastrad 141 1.20 mlelstv KASSERT(vp->v_type != VLNK || ip->i_size >= fs->um_maxsymlinklen); 142 1.8 riastrad KASSERT(vp->v_type != VLNK || fs->um_maxsymlinklen != 0 || 143 1.8 riastrad DIP(ip, blocks) == 0); 144 1.8 riastrad KASSERT(vp->v_type != VREG || vp == fs->lfs_ivnode); 145 1.8 riastrad KASSERT(vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM); 146 1.8 riastrad 147 1.8 riastrad if (uio->uio_offset > fs->um_maxfilesize) 148 1.8 riastrad return EFBIG; 149 1.8 riastrad if (uio->uio_resid == 0) 150 1.8 riastrad return 0; 151 1.8 riastrad 152 1.8 riastrad 153 1.8 riastrad if (uio->uio_offset >= ip->i_size) 154 1.1 dholland goto out; 155 1.1 dholland 156 1.1 dholland for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 157 1.1 dholland bytesinfile = ip->i_size - uio->uio_offset; 158 1.1 dholland if (bytesinfile <= 0) 159 1.1 dholland break; 160 1.4 christos lbn = lfs_lblkno(fs, uio->uio_offset); 161 1.1 dholland nextlbn = lbn + 1; 162 1.4 christos size = lfs_blksize(fs, ip, lbn); 163 1.4 christos blkoffset = lfs_blkoff(fs, uio->uio_offset); 164 1.18 dholland xfersize = MIN(MIN(fs_sb_getbsize(fs) - blkoffset, uio->uio_resid), 165 1.1 dholland bytesinfile); 166 1.1 dholland 167 1.4 christos if (lfs_lblktosize(fs, nextlbn) >= ip->i_size) 168 1.15 maxv error = bread(vp, lbn, size, 0, &bp); 169 1.1 dholland else { 170 1.4 christos int nextsize = lfs_blksize(fs, ip, nextlbn); 171 1.1 dholland error = breadn(vp, lbn, 172 1.13 maxv size, &nextlbn, &nextsize, 1, 0, &bp); 173 1.1 dholland } 174 1.1 dholland if (error) 175 1.1 dholland break; 176 1.1 dholland 177 1.1 dholland /* 178 1.1 dholland * We should only get non-zero b_resid when an I/O error 179 1.1 dholland * has occurred, which should cause us to break above. 180 1.1 dholland * However, if the short read did not cause an error, 181 1.1 dholland * then we want to ensure that we do not uiomove bad 182 1.1 dholland * or uninitialized data. 183 1.1 dholland */ 184 1.1 dholland size -= bp->b_resid; 185 1.1 dholland if (size < xfersize) { 186 1.1 dholland if (size == 0) 187 1.1 dholland break; 188 1.1 dholland xfersize = size; 189 1.1 dholland } 190 1.1 dholland error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio); 191 1.1 dholland if (error) 192 1.1 dholland break; 193 1.1 dholland brelse(bp, 0); 194 1.1 dholland } 195 1.1 dholland if (bp != NULL) 196 1.1 dholland brelse(bp, 0); 197 1.1 dholland 198 1.1 dholland out: 199 1.11 riastrad error = ulfs_post_read_update(vp, ioflag, error); 200 1.11 riastrad return (error); 201 1.11 riastrad } 202 1.11 riastrad 203 1.11 riastrad static int 204 1.14 riastrad ulfs_post_read_update(struct vnode *vp, int ioflag, int oerror) 205 1.11 riastrad { 206 1.11 riastrad struct inode *ip = VTOI(vp); 207 1.14 riastrad int error = oerror; 208 1.11 riastrad 209 1.1 dholland if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) { 210 1.24 maya ip->i_state |= IN_ACCESS; 211 1.8 riastrad if ((ioflag & IO_SYNC) == IO_SYNC) { 212 1.5 dholland error = lfs_update(vp, NULL, NULL, UPDATE_WAIT); 213 1.1 dholland } 214 1.1 dholland } 215 1.1 dholland 216 1.14 riastrad /* Read error overrides any inode update error. */ 217 1.14 riastrad if (oerror) 218 1.14 riastrad error = oerror; 219 1.11 riastrad return error; 220 1.1 dholland } 221 1.1 dholland 222 1.1 dholland /* 223 1.1 dholland * Vnode op for writing. 224 1.1 dholland */ 225 1.1 dholland int 226 1.1 dholland WRITE(void *v) 227 1.1 dholland { 228 1.1 dholland struct vop_write_args /* { 229 1.1 dholland struct vnode *a_vp; 230 1.1 dholland struct uio *a_uio; 231 1.1 dholland int a_ioflag; 232 1.1 dholland kauth_cred_t a_cred; 233 1.1 dholland } */ *ap = v; 234 1.1 dholland struct vnode *vp; 235 1.1 dholland struct uio *uio; 236 1.1 dholland struct inode *ip; 237 1.1 dholland FS *fs; 238 1.1 dholland kauth_cred_t cred; 239 1.1 dholland off_t osize, origoff, oldoff, preallocoff, endallocoff, nsize; 240 1.8 riastrad int blkoffset, error, flags, ioflag, resid; 241 1.1 dholland int aflag; 242 1.1 dholland vsize_t bytelen; 243 1.1 dholland bool async; 244 1.1 dholland 245 1.1 dholland cred = ap->a_cred; 246 1.1 dholland ioflag = ap->a_ioflag; 247 1.1 dholland uio = ap->a_uio; 248 1.1 dholland vp = ap->a_vp; 249 1.1 dholland ip = VTOI(vp); 250 1.1 dholland 251 1.1 dholland KASSERT(vp->v_size == ip->i_size); 252 1.10 riastrad KASSERT(uio->uio_rw == UIO_WRITE); 253 1.10 riastrad KASSERT(vp->v_type == VREG); 254 1.1 dholland 255 1.10 riastrad if (ioflag & IO_APPEND) 256 1.10 riastrad uio->uio_offset = ip->i_size; 257 1.10 riastrad if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) 258 1.10 riastrad return (EPERM); 259 1.1 dholland 260 1.1 dholland fs = ip->I_FS; 261 1.1 dholland if (uio->uio_offset < 0 || 262 1.6 dholland (u_int64_t)uio->uio_offset + uio->uio_resid > fs->um_maxfilesize) 263 1.1 dholland return (EFBIG); 264 1.1 dholland /* Disallow writes to the Ifile, even if noschg flag is removed */ 265 1.1 dholland /* XXX can this go away when the Ifile is no longer in the namespace? */ 266 1.1 dholland if (vp == fs->lfs_ivnode) 267 1.1 dholland return (EPERM); 268 1.1 dholland if (uio->uio_resid == 0) 269 1.1 dholland return (0); 270 1.1 dholland 271 1.1 dholland flags = ioflag & IO_SYNC ? B_SYNC : 0; 272 1.1 dholland async = vp->v_mount->mnt_flag & MNT_ASYNC; 273 1.1 dholland origoff = uio->uio_offset; 274 1.1 dholland resid = uio->uio_resid; 275 1.1 dholland osize = ip->i_size; 276 1.1 dholland error = 0; 277 1.1 dholland 278 1.8 riastrad KASSERT(vp->v_type == VREG); 279 1.1 dholland 280 1.1 dholland async = true; 281 1.4 christos lfs_availwait(fs, lfs_btofsb(fs, uio->uio_resid)); 282 1.1 dholland lfs_check(vp, LFS_UNUSED_LBN, 0); 283 1.1 dholland 284 1.4 christos preallocoff = round_page(lfs_blkroundup(fs, MAX(osize, uio->uio_offset))); 285 1.1 dholland aflag = ioflag & IO_SYNC ? B_SYNC : 0; 286 1.1 dholland nsize = MAX(osize, uio->uio_offset + uio->uio_resid); 287 1.4 christos endallocoff = nsize - lfs_blkoff(fs, nsize); 288 1.1 dholland 289 1.1 dholland /* 290 1.1 dholland * if we're increasing the file size, deal with expanding 291 1.1 dholland * the fragment if there is one. 292 1.1 dholland */ 293 1.1 dholland 294 1.4 christos if (nsize > osize && lfs_lblkno(fs, osize) < ULFS_NDADDR && 295 1.4 christos lfs_lblkno(fs, osize) != lfs_lblkno(fs, nsize) && 296 1.4 christos lfs_blkroundup(fs, osize) != osize) { 297 1.1 dholland off_t eob; 298 1.1 dholland 299 1.4 christos eob = lfs_blkroundup(fs, osize); 300 1.1 dholland uvm_vnp_setwritesize(vp, eob); 301 1.2 dholland error = ulfs_balloc_range(vp, osize, eob - osize, cred, aflag); 302 1.1 dholland if (error) 303 1.1 dholland goto out; 304 1.1 dholland if (flags & B_SYNC) { 305 1.26 ad rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 306 1.19 dholland VOP_PUTPAGES(vp, trunc_page(osize & lfs_sb_getbmask(fs)), 307 1.1 dholland round_page(eob), 308 1.17 riastrad PGO_CLEANIT | PGO_SYNCIO); 309 1.1 dholland } 310 1.1 dholland } 311 1.1 dholland 312 1.1 dholland while (uio->uio_resid > 0) { 313 1.1 dholland int ubc_flags = UBC_WRITE; 314 1.1 dholland bool overwrite; /* if we're overwrite a whole block */ 315 1.1 dholland off_t newoff; 316 1.1 dholland 317 1.1 dholland if (ioflag & IO_DIRECT) { 318 1.17 riastrad genfs_directio(vp, uio, ioflag); 319 1.1 dholland } 320 1.1 dholland 321 1.1 dholland oldoff = uio->uio_offset; 322 1.4 christos blkoffset = lfs_blkoff(fs, uio->uio_offset); 323 1.18 dholland bytelen = MIN(fs_sb_getbsize(fs) - blkoffset, uio->uio_resid); 324 1.1 dholland if (bytelen == 0) { 325 1.1 dholland break; 326 1.1 dholland } 327 1.1 dholland 328 1.1 dholland /* 329 1.1 dholland * if we're filling in a hole, allocate the blocks now and 330 1.1 dholland * initialize the pages first. if we're extending the file, 331 1.1 dholland * we can safely allocate blocks without initializing pages 332 1.1 dholland * since the new blocks will be inaccessible until the write 333 1.1 dholland * is complete. 334 1.1 dholland */ 335 1.1 dholland overwrite = uio->uio_offset >= preallocoff && 336 1.1 dholland uio->uio_offset < endallocoff; 337 1.1 dholland if (!overwrite && (vp->v_vflag & VV_MAPPED) == 0 && 338 1.4 christos lfs_blkoff(fs, uio->uio_offset) == 0 && 339 1.1 dholland (uio->uio_offset & PAGE_MASK) == 0) { 340 1.1 dholland vsize_t len; 341 1.1 dholland 342 1.1 dholland len = trunc_page(bytelen); 343 1.4 christos len -= lfs_blkoff(fs, len); 344 1.1 dholland if (len > 0) { 345 1.1 dholland overwrite = true; 346 1.1 dholland bytelen = len; 347 1.1 dholland } 348 1.1 dholland } 349 1.1 dholland 350 1.1 dholland newoff = oldoff + bytelen; 351 1.1 dholland if (vp->v_size < newoff) { 352 1.1 dholland uvm_vnp_setwritesize(vp, newoff); 353 1.1 dholland } 354 1.1 dholland 355 1.1 dholland if (!overwrite) { 356 1.2 dholland error = ulfs_balloc_range(vp, uio->uio_offset, bytelen, 357 1.1 dholland cred, aflag); 358 1.1 dholland if (error) 359 1.1 dholland break; 360 1.1 dholland } else { 361 1.1 dholland genfs_node_wrlock(vp); 362 1.1 dholland error = GOP_ALLOC(vp, uio->uio_offset, bytelen, 363 1.1 dholland aflag, cred); 364 1.1 dholland genfs_node_unlock(vp); 365 1.1 dholland if (error) 366 1.1 dholland break; 367 1.1 dholland ubc_flags |= UBC_FAULTBUSY; 368 1.1 dholland } 369 1.1 dholland 370 1.1 dholland /* 371 1.1 dholland * copy the data. 372 1.1 dholland */ 373 1.1 dholland 374 1.1 dholland error = ubc_uiomove(&vp->v_uobj, uio, bytelen, 375 1.27 ad IO_ADV_DECODE(ioflag), ubc_flags | UBC_VNODE_FLAGS(vp)); 376 1.1 dholland 377 1.1 dholland /* 378 1.1 dholland * update UVM's notion of the size now that we've 379 1.1 dholland * copied the data into the vnode's pages. 380 1.1 dholland * 381 1.1 dholland * we should update the size even when uiomove failed. 382 1.1 dholland */ 383 1.1 dholland 384 1.1 dholland if (vp->v_size < newoff) { 385 1.1 dholland uvm_vnp_setsize(vp, newoff); 386 1.1 dholland } 387 1.1 dholland 388 1.1 dholland if (error) 389 1.1 dholland break; 390 1.1 dholland 391 1.1 dholland /* 392 1.1 dholland * flush what we just wrote if necessary. 393 1.1 dholland * XXXUBC simplistic async flushing. 394 1.1 dholland */ 395 1.1 dholland 396 1.7 christos __USE(async); 397 1.1 dholland } 398 1.1 dholland if (error == 0 && ioflag & IO_SYNC) { 399 1.26 ad rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 400 1.19 dholland error = VOP_PUTPAGES(vp, trunc_page(origoff & lfs_sb_getbmask(fs)), 401 1.4 christos round_page(lfs_blkroundup(fs, uio->uio_offset)), 402 1.17 riastrad PGO_CLEANIT | PGO_SYNCIO); 403 1.1 dholland } 404 1.1 dholland 405 1.8 riastrad out: 406 1.11 riastrad error = ulfs_post_write_update(vp, uio, ioflag, cred, osize, resid, 407 1.28 thorpej error); 408 1.8 riastrad 409 1.8 riastrad return (error); 410 1.8 riastrad } 411 1.8 riastrad 412 1.8 riastrad /* 413 1.8 riastrad * UFS op for writing via the buffer cache 414 1.8 riastrad */ 415 1.8 riastrad int 416 1.8 riastrad BUFWR(struct vnode *vp, struct uio *uio, int ioflag, kauth_cred_t cred) 417 1.8 riastrad { 418 1.8 riastrad struct inode *ip; 419 1.8 riastrad FS *fs; 420 1.8 riastrad int flags; 421 1.8 riastrad struct buf *bp; 422 1.16 riastrad off_t osize; 423 1.8 riastrad int resid, xfersize, size, blkoffset; 424 1.8 riastrad daddr_t lbn; 425 1.8 riastrad int error; 426 1.8 riastrad bool need_unreserve = false; 427 1.8 riastrad 428 1.9 riastrad KASSERT(ISSET(ioflag, IO_NODELOCKED)); 429 1.8 riastrad KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 430 1.8 riastrad KASSERT(vp->v_type == VDIR || vp->v_type == VLNK); 431 1.8 riastrad KASSERT(vp->v_type != VDIR || ISSET(ioflag, IO_SYNC)); 432 1.8 riastrad KASSERT(uio->uio_rw == UIO_WRITE); 433 1.8 riastrad 434 1.8 riastrad ip = VTOI(vp); 435 1.8 riastrad fs = ip->I_FS; 436 1.8 riastrad 437 1.8 riastrad KASSERT(vp->v_size == ip->i_size); 438 1.8 riastrad 439 1.8 riastrad if (uio->uio_offset < 0 || 440 1.8 riastrad uio->uio_resid > fs->um_maxfilesize || 441 1.8 riastrad uio->uio_offset > (fs->um_maxfilesize - uio->uio_resid)) 442 1.8 riastrad return EFBIG; 443 1.8 riastrad KASSERT(vp != fs->lfs_ivnode); 444 1.8 riastrad if (uio->uio_resid == 0) 445 1.8 riastrad return 0; 446 1.8 riastrad 447 1.8 riastrad flags = ioflag & IO_SYNC ? B_SYNC : 0; 448 1.8 riastrad resid = uio->uio_resid; 449 1.8 riastrad osize = ip->i_size; 450 1.8 riastrad error = 0; 451 1.8 riastrad 452 1.8 riastrad KASSERT(vp->v_type != VREG); 453 1.8 riastrad 454 1.8 riastrad lfs_availwait(fs, lfs_btofsb(fs, uio->uio_resid)); 455 1.8 riastrad lfs_check(vp, LFS_UNUSED_LBN, 0); 456 1.8 riastrad 457 1.12 riastrad /* XXX Should never have pages cached here. */ 458 1.16 riastrad KASSERT(vp->v_uobj.uo_npages == 0); 459 1.1 dholland while (uio->uio_resid > 0) { 460 1.4 christos lbn = lfs_lblkno(fs, uio->uio_offset); 461 1.4 christos blkoffset = lfs_blkoff(fs, uio->uio_offset); 462 1.18 dholland xfersize = MIN(fs_sb_getbsize(fs) - blkoffset, uio->uio_resid); 463 1.18 dholland if (fs_sb_getbsize(fs) > xfersize) 464 1.1 dholland flags |= B_CLRBUF; 465 1.1 dholland else 466 1.1 dholland flags &= ~B_CLRBUF; 467 1.1 dholland 468 1.1 dholland error = lfs_reserve(fs, vp, NULL, 469 1.19 dholland lfs_btofsb(fs, (ULFS_NIADDR + 1) << lfs_sb_getbshift(fs))); 470 1.1 dholland if (error) 471 1.1 dholland break; 472 1.1 dholland need_unreserve = true; 473 1.8 riastrad error = lfs_balloc(vp, uio->uio_offset, xfersize, cred, flags, 474 1.8 riastrad &bp); 475 1.1 dholland 476 1.1 dholland if (error) 477 1.1 dholland break; 478 1.1 dholland if (uio->uio_offset + xfersize > ip->i_size) { 479 1.1 dholland ip->i_size = uio->uio_offset + xfersize; 480 1.1 dholland DIP_ASSIGN(ip, size, ip->i_size); 481 1.1 dholland uvm_vnp_setsize(vp, ip->i_size); 482 1.1 dholland } 483 1.4 christos size = lfs_blksize(fs, ip, lbn) - bp->b_resid; 484 1.1 dholland if (xfersize > size) 485 1.1 dholland xfersize = size; 486 1.1 dholland 487 1.1 dholland error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio); 488 1.1 dholland 489 1.1 dholland /* 490 1.1 dholland * if we didn't clear the block and the uiomove failed, 491 1.1 dholland * the buf will now contain part of some other file, 492 1.1 dholland * so we need to invalidate it. 493 1.1 dholland */ 494 1.1 dholland if (error && (flags & B_CLRBUF) == 0) { 495 1.1 dholland brelse(bp, BC_INVAL); 496 1.1 dholland break; 497 1.1 dholland } 498 1.1 dholland (void)VOP_BWRITE(bp->b_vp, bp); 499 1.1 dholland lfs_reserve(fs, vp, NULL, 500 1.19 dholland -lfs_btofsb(fs, (ULFS_NIADDR + 1) << lfs_sb_getbshift(fs))); 501 1.1 dholland need_unreserve = false; 502 1.1 dholland if (error || xfersize == 0) 503 1.1 dholland break; 504 1.1 dholland } 505 1.1 dholland if (need_unreserve) { 506 1.1 dholland lfs_reserve(fs, vp, NULL, 507 1.19 dholland -lfs_btofsb(fs, (ULFS_NIADDR + 1) << lfs_sb_getbshift(fs))); 508 1.1 dholland } 509 1.1 dholland 510 1.11 riastrad error = ulfs_post_write_update(vp, uio, ioflag, cred, osize, resid, 511 1.28 thorpej error); 512 1.11 riastrad 513 1.11 riastrad return (error); 514 1.11 riastrad } 515 1.11 riastrad 516 1.11 riastrad static int 517 1.11 riastrad ulfs_post_write_update(struct vnode *vp, struct uio *uio, int ioflag, 518 1.28 thorpej kauth_cred_t cred, off_t osize, int resid, int oerror) 519 1.11 riastrad { 520 1.11 riastrad struct inode *ip = VTOI(vp); 521 1.14 riastrad int error = oerror; 522 1.11 riastrad 523 1.11 riastrad /* Trigger ctime and mtime updates, and atime if MNT_RELATIME. */ 524 1.24 maya ip->i_state |= IN_CHANGE | IN_UPDATE; 525 1.11 riastrad if (vp->v_mount->mnt_flag & MNT_RELATIME) 526 1.24 maya ip->i_state |= IN_ACCESS; 527 1.11 riastrad 528 1.1 dholland /* 529 1.11 riastrad * If we successfully wrote any data and we are not the superuser, 530 1.1 dholland * we clear the setuid and setgid bits as a precaution against 531 1.1 dholland * tampering. 532 1.1 dholland */ 533 1.8 riastrad if (resid > uio->uio_resid && cred) { 534 1.1 dholland if (ip->i_mode & ISUID) { 535 1.8 riastrad if (kauth_authorize_vnode(cred, 536 1.1 dholland KAUTH_VNODE_RETAIN_SUID, vp, NULL, EPERM) != 0) { 537 1.1 dholland ip->i_mode &= ~ISUID; 538 1.1 dholland DIP_ASSIGN(ip, mode, ip->i_mode); 539 1.1 dholland } 540 1.1 dholland } 541 1.1 dholland 542 1.1 dholland if (ip->i_mode & ISGID) { 543 1.8 riastrad if (kauth_authorize_vnode(cred, 544 1.1 dholland KAUTH_VNODE_RETAIN_SGID, vp, NULL, EPERM) != 0) { 545 1.1 dholland ip->i_mode &= ~ISGID; 546 1.1 dholland DIP_ASSIGN(ip, mode, ip->i_mode); 547 1.1 dholland } 548 1.1 dholland } 549 1.1 dholland } 550 1.11 riastrad 551 1.11 riastrad /* 552 1.11 riastrad * Update the size on disk: truncate back to original size on 553 1.11 riastrad * error, or reflect the new size on success. 554 1.11 riastrad */ 555 1.1 dholland if (error) { 556 1.8 riastrad (void) lfs_truncate(vp, osize, ioflag & IO_SYNC, cred); 557 1.1 dholland uio->uio_offset -= resid - uio->uio_resid; 558 1.1 dholland uio->uio_resid = resid; 559 1.3 dholland } else if (resid > uio->uio_resid && (ioflag & IO_SYNC) == IO_SYNC) { 560 1.5 dholland error = lfs_update(vp, NULL, NULL, UPDATE_WAIT); 561 1.3 dholland } else { 562 1.3 dholland /* nothing */ 563 1.3 dholland } 564 1.12 riastrad 565 1.12 riastrad /* Make sure the vnode uvm size matches the inode file size. */ 566 1.1 dholland KASSERT(vp->v_size == ip->i_size); 567 1.1 dholland 568 1.14 riastrad /* Write error overrides any inode update error. */ 569 1.14 riastrad if (oerror) 570 1.14 riastrad error = oerror; 571 1.11 riastrad return error; 572 1.1 dholland } 573