ulfs_readwrite.c revision 1.7.4.2 1 1.7.4.2 yamt /* $NetBSD: ulfs_readwrite.c,v 1.7.4.2 2014/05/22 11:41:19 yamt Exp $ */
2 1.7.4.2 yamt /* from NetBSD: ufs_readwrite.c,v 1.105 2013/01/22 09:39:18 dholland Exp */
3 1.7.4.2 yamt
4 1.7.4.2 yamt /*-
5 1.7.4.2 yamt * Copyright (c) 1993
6 1.7.4.2 yamt * The Regents of the University of California. All rights reserved.
7 1.7.4.2 yamt *
8 1.7.4.2 yamt * Redistribution and use in source and binary forms, with or without
9 1.7.4.2 yamt * modification, are permitted provided that the following conditions
10 1.7.4.2 yamt * are met:
11 1.7.4.2 yamt * 1. Redistributions of source code must retain the above copyright
12 1.7.4.2 yamt * notice, this list of conditions and the following disclaimer.
13 1.7.4.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
14 1.7.4.2 yamt * notice, this list of conditions and the following disclaimer in the
15 1.7.4.2 yamt * documentation and/or other materials provided with the distribution.
16 1.7.4.2 yamt * 3. Neither the name of the University nor the names of its contributors
17 1.7.4.2 yamt * may be used to endorse or promote products derived from this software
18 1.7.4.2 yamt * without specific prior written permission.
19 1.7.4.2 yamt *
20 1.7.4.2 yamt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 1.7.4.2 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.7.4.2 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.7.4.2 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 1.7.4.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 1.7.4.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 1.7.4.2 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 1.7.4.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 1.7.4.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 1.7.4.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 1.7.4.2 yamt * SUCH DAMAGE.
31 1.7.4.2 yamt *
32 1.7.4.2 yamt * @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
33 1.7.4.2 yamt */
34 1.7.4.2 yamt
35 1.7.4.2 yamt #include <sys/cdefs.h>
36 1.7.4.2 yamt __KERNEL_RCSID(1, "$NetBSD: ulfs_readwrite.c,v 1.7.4.2 2014/05/22 11:41:19 yamt Exp $");
37 1.7.4.2 yamt
38 1.7.4.2 yamt #ifdef LFS_READWRITE
39 1.7.4.2 yamt #define FS struct lfs
40 1.7.4.2 yamt #define I_FS i_lfs
41 1.7.4.2 yamt #define READ lfs_read
42 1.7.4.2 yamt #define READ_S "lfs_read"
43 1.7.4.2 yamt #define WRITE lfs_write
44 1.7.4.2 yamt #define WRITE_S "lfs_write"
45 1.7.4.2 yamt #define fs_bsize lfs_bsize
46 1.7.4.2 yamt #define fs_bmask lfs_bmask
47 1.7.4.2 yamt #else
48 1.7.4.2 yamt #define FS struct fs
49 1.7.4.2 yamt #define I_FS i_fs
50 1.7.4.2 yamt #define READ ffs_read
51 1.7.4.2 yamt #define READ_S "ffs_read"
52 1.7.4.2 yamt #define WRITE ffs_write
53 1.7.4.2 yamt #define WRITE_S "ffs_write"
54 1.7.4.2 yamt #endif
55 1.7.4.2 yamt
56 1.7.4.2 yamt /*
57 1.7.4.2 yamt * Vnode op for reading.
58 1.7.4.2 yamt */
59 1.7.4.2 yamt /* ARGSUSED */
60 1.7.4.2 yamt int
61 1.7.4.2 yamt READ(void *v)
62 1.7.4.2 yamt {
63 1.7.4.2 yamt struct vop_read_args /* {
64 1.7.4.2 yamt struct vnode *a_vp;
65 1.7.4.2 yamt struct uio *a_uio;
66 1.7.4.2 yamt int a_ioflag;
67 1.7.4.2 yamt kauth_cred_t a_cred;
68 1.7.4.2 yamt } */ *ap = v;
69 1.7.4.2 yamt struct vnode *vp;
70 1.7.4.2 yamt struct inode *ip;
71 1.7.4.2 yamt struct uio *uio;
72 1.7.4.2 yamt struct buf *bp;
73 1.7.4.2 yamt FS *fs;
74 1.7.4.2 yamt vsize_t bytelen;
75 1.7.4.2 yamt daddr_t lbn, nextlbn;
76 1.7.4.2 yamt off_t bytesinfile;
77 1.7.4.2 yamt long size, xfersize, blkoffset;
78 1.7.4.2 yamt int error, ioflag;
79 1.7.4.2 yamt bool usepc = false;
80 1.7.4.2 yamt
81 1.7.4.2 yamt vp = ap->a_vp;
82 1.7.4.2 yamt ip = VTOI(vp);
83 1.7.4.2 yamt fs = ip->I_FS;
84 1.7.4.2 yamt uio = ap->a_uio;
85 1.7.4.2 yamt ioflag = ap->a_ioflag;
86 1.7.4.2 yamt error = 0;
87 1.7.4.2 yamt
88 1.7.4.2 yamt #ifdef DIAGNOSTIC
89 1.7.4.2 yamt if (uio->uio_rw != UIO_READ)
90 1.7.4.2 yamt panic("%s: mode", READ_S);
91 1.7.4.2 yamt
92 1.7.4.2 yamt if (vp->v_type == VLNK) {
93 1.7.4.2 yamt if (ip->i_size < fs->um_maxsymlinklen ||
94 1.7.4.2 yamt (fs->um_maxsymlinklen == 0 && DIP(ip, blocks) == 0))
95 1.7.4.2 yamt panic("%s: short symlink", READ_S);
96 1.7.4.2 yamt } else if (vp->v_type != VREG && vp->v_type != VDIR)
97 1.7.4.2 yamt panic("%s: type %d", READ_S, vp->v_type);
98 1.7.4.2 yamt #endif
99 1.7.4.2 yamt if ((u_int64_t)uio->uio_offset > fs->um_maxfilesize)
100 1.7.4.2 yamt return (EFBIG);
101 1.7.4.2 yamt if (uio->uio_resid == 0)
102 1.7.4.2 yamt return (0);
103 1.7.4.2 yamt
104 1.7.4.2 yamt #ifndef LFS_READWRITE
105 1.7.4.2 yamt if ((ip->i_flags & (SF_SNAPSHOT | SF_SNAPINVAL)) == SF_SNAPSHOT)
106 1.7.4.2 yamt return ffs_snapshot_read(vp, uio, ioflag);
107 1.7.4.2 yamt #endif /* !LFS_READWRITE */
108 1.7.4.2 yamt
109 1.7.4.2 yamt fstrans_start(vp->v_mount, FSTRANS_SHARED);
110 1.7.4.2 yamt
111 1.7.4.2 yamt if (uio->uio_offset >= ip->i_size)
112 1.7.4.2 yamt goto out;
113 1.7.4.2 yamt
114 1.7.4.2 yamt #ifdef LFS_READWRITE
115 1.7.4.2 yamt usepc = (vp->v_type == VREG && ip->i_number != LFS_IFILE_INUM);
116 1.7.4.2 yamt #else /* !LFS_READWRITE */
117 1.7.4.2 yamt usepc = vp->v_type == VREG;
118 1.7.4.2 yamt #endif /* !LFS_READWRITE */
119 1.7.4.2 yamt if (usepc) {
120 1.7.4.2 yamt const int advice = IO_ADV_DECODE(ap->a_ioflag);
121 1.7.4.2 yamt
122 1.7.4.2 yamt while (uio->uio_resid > 0) {
123 1.7.4.2 yamt if (ioflag & IO_DIRECT) {
124 1.7.4.2 yamt genfs_directio(vp, uio, ioflag);
125 1.7.4.2 yamt }
126 1.7.4.2 yamt bytelen = MIN(ip->i_size - uio->uio_offset,
127 1.7.4.2 yamt uio->uio_resid);
128 1.7.4.2 yamt if (bytelen == 0)
129 1.7.4.2 yamt break;
130 1.7.4.2 yamt error = ubc_uiomove(&vp->v_uobj, uio, bytelen, advice,
131 1.7.4.2 yamt UBC_READ | UBC_PARTIALOK | UBC_UNMAP_FLAG(vp));
132 1.7.4.2 yamt if (error)
133 1.7.4.2 yamt break;
134 1.7.4.2 yamt }
135 1.7.4.2 yamt goto out;
136 1.7.4.2 yamt }
137 1.7.4.2 yamt
138 1.7.4.2 yamt for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
139 1.7.4.2 yamt bytesinfile = ip->i_size - uio->uio_offset;
140 1.7.4.2 yamt if (bytesinfile <= 0)
141 1.7.4.2 yamt break;
142 1.7.4.2 yamt lbn = lfs_lblkno(fs, uio->uio_offset);
143 1.7.4.2 yamt nextlbn = lbn + 1;
144 1.7.4.2 yamt size = lfs_blksize(fs, ip, lbn);
145 1.7.4.2 yamt blkoffset = lfs_blkoff(fs, uio->uio_offset);
146 1.7.4.2 yamt xfersize = MIN(MIN(fs->fs_bsize - blkoffset, uio->uio_resid),
147 1.7.4.2 yamt bytesinfile);
148 1.7.4.2 yamt
149 1.7.4.2 yamt if (lfs_lblktosize(fs, nextlbn) >= ip->i_size)
150 1.7.4.2 yamt error = bread(vp, lbn, size, NOCRED, 0, &bp);
151 1.7.4.2 yamt else {
152 1.7.4.2 yamt int nextsize = lfs_blksize(fs, ip, nextlbn);
153 1.7.4.2 yamt error = breadn(vp, lbn,
154 1.7.4.2 yamt size, &nextlbn, &nextsize, 1, NOCRED, 0, &bp);
155 1.7.4.2 yamt }
156 1.7.4.2 yamt if (error)
157 1.7.4.2 yamt break;
158 1.7.4.2 yamt
159 1.7.4.2 yamt /*
160 1.7.4.2 yamt * We should only get non-zero b_resid when an I/O error
161 1.7.4.2 yamt * has occurred, which should cause us to break above.
162 1.7.4.2 yamt * However, if the short read did not cause an error,
163 1.7.4.2 yamt * then we want to ensure that we do not uiomove bad
164 1.7.4.2 yamt * or uninitialized data.
165 1.7.4.2 yamt */
166 1.7.4.2 yamt size -= bp->b_resid;
167 1.7.4.2 yamt if (size < xfersize) {
168 1.7.4.2 yamt if (size == 0)
169 1.7.4.2 yamt break;
170 1.7.4.2 yamt xfersize = size;
171 1.7.4.2 yamt }
172 1.7.4.2 yamt error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
173 1.7.4.2 yamt if (error)
174 1.7.4.2 yamt break;
175 1.7.4.2 yamt brelse(bp, 0);
176 1.7.4.2 yamt }
177 1.7.4.2 yamt if (bp != NULL)
178 1.7.4.2 yamt brelse(bp, 0);
179 1.7.4.2 yamt
180 1.7.4.2 yamt out:
181 1.7.4.2 yamt if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) {
182 1.7.4.2 yamt ip->i_flag |= IN_ACCESS;
183 1.7.4.2 yamt if ((ap->a_ioflag & IO_SYNC) == IO_SYNC) {
184 1.7.4.2 yamt error = lfs_update(vp, NULL, NULL, UPDATE_WAIT);
185 1.7.4.2 yamt }
186 1.7.4.2 yamt }
187 1.7.4.2 yamt
188 1.7.4.2 yamt fstrans_done(vp->v_mount);
189 1.7.4.2 yamt return (error);
190 1.7.4.2 yamt }
191 1.7.4.2 yamt
192 1.7.4.2 yamt /*
193 1.7.4.2 yamt * Vnode op for writing.
194 1.7.4.2 yamt */
195 1.7.4.2 yamt int
196 1.7.4.2 yamt WRITE(void *v)
197 1.7.4.2 yamt {
198 1.7.4.2 yamt struct vop_write_args /* {
199 1.7.4.2 yamt struct vnode *a_vp;
200 1.7.4.2 yamt struct uio *a_uio;
201 1.7.4.2 yamt int a_ioflag;
202 1.7.4.2 yamt kauth_cred_t a_cred;
203 1.7.4.2 yamt } */ *ap = v;
204 1.7.4.2 yamt struct vnode *vp;
205 1.7.4.2 yamt struct uio *uio;
206 1.7.4.2 yamt struct inode *ip;
207 1.7.4.2 yamt FS *fs;
208 1.7.4.2 yamt struct buf *bp;
209 1.7.4.2 yamt kauth_cred_t cred;
210 1.7.4.2 yamt daddr_t lbn;
211 1.7.4.2 yamt off_t osize, origoff, oldoff, preallocoff, endallocoff, nsize;
212 1.7.4.2 yamt int blkoffset, error, flags, ioflag, resid, size, xfersize;
213 1.7.4.2 yamt int aflag;
214 1.7.4.2 yamt int extended=0;
215 1.7.4.2 yamt vsize_t bytelen;
216 1.7.4.2 yamt bool async;
217 1.7.4.2 yamt bool usepc = false;
218 1.7.4.2 yamt #ifdef LFS_READWRITE
219 1.7.4.2 yamt bool need_unreserve = false;
220 1.7.4.2 yamt #endif
221 1.7.4.2 yamt
222 1.7.4.2 yamt cred = ap->a_cred;
223 1.7.4.2 yamt ioflag = ap->a_ioflag;
224 1.7.4.2 yamt uio = ap->a_uio;
225 1.7.4.2 yamt vp = ap->a_vp;
226 1.7.4.2 yamt ip = VTOI(vp);
227 1.7.4.2 yamt
228 1.7.4.2 yamt KASSERT(vp->v_size == ip->i_size);
229 1.7.4.2 yamt #ifdef DIAGNOSTIC
230 1.7.4.2 yamt if (uio->uio_rw != UIO_WRITE)
231 1.7.4.2 yamt panic("%s: mode", WRITE_S);
232 1.7.4.2 yamt #endif
233 1.7.4.2 yamt
234 1.7.4.2 yamt switch (vp->v_type) {
235 1.7.4.2 yamt case VREG:
236 1.7.4.2 yamt if (ioflag & IO_APPEND)
237 1.7.4.2 yamt uio->uio_offset = ip->i_size;
238 1.7.4.2 yamt if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
239 1.7.4.2 yamt return (EPERM);
240 1.7.4.2 yamt /* FALLTHROUGH */
241 1.7.4.2 yamt case VLNK:
242 1.7.4.2 yamt break;
243 1.7.4.2 yamt case VDIR:
244 1.7.4.2 yamt if ((ioflag & IO_SYNC) == 0)
245 1.7.4.2 yamt panic("%s: nonsync dir write", WRITE_S);
246 1.7.4.2 yamt break;
247 1.7.4.2 yamt default:
248 1.7.4.2 yamt panic("%s: type", WRITE_S);
249 1.7.4.2 yamt }
250 1.7.4.2 yamt
251 1.7.4.2 yamt fs = ip->I_FS;
252 1.7.4.2 yamt if (uio->uio_offset < 0 ||
253 1.7.4.2 yamt (u_int64_t)uio->uio_offset + uio->uio_resid > fs->um_maxfilesize)
254 1.7.4.2 yamt return (EFBIG);
255 1.7.4.2 yamt #ifdef LFS_READWRITE
256 1.7.4.2 yamt /* Disallow writes to the Ifile, even if noschg flag is removed */
257 1.7.4.2 yamt /* XXX can this go away when the Ifile is no longer in the namespace? */
258 1.7.4.2 yamt if (vp == fs->lfs_ivnode)
259 1.7.4.2 yamt return (EPERM);
260 1.7.4.2 yamt #endif
261 1.7.4.2 yamt if (uio->uio_resid == 0)
262 1.7.4.2 yamt return (0);
263 1.7.4.2 yamt
264 1.7.4.2 yamt fstrans_start(vp->v_mount, FSTRANS_SHARED);
265 1.7.4.2 yamt
266 1.7.4.2 yamt flags = ioflag & IO_SYNC ? B_SYNC : 0;
267 1.7.4.2 yamt async = vp->v_mount->mnt_flag & MNT_ASYNC;
268 1.7.4.2 yamt origoff = uio->uio_offset;
269 1.7.4.2 yamt resid = uio->uio_resid;
270 1.7.4.2 yamt osize = ip->i_size;
271 1.7.4.2 yamt error = 0;
272 1.7.4.2 yamt
273 1.7.4.2 yamt usepc = vp->v_type == VREG;
274 1.7.4.2 yamt
275 1.7.4.2 yamt #ifdef LFS_READWRITE
276 1.7.4.2 yamt async = true;
277 1.7.4.2 yamt lfs_availwait(fs, lfs_btofsb(fs, uio->uio_resid));
278 1.7.4.2 yamt lfs_check(vp, LFS_UNUSED_LBN, 0);
279 1.7.4.2 yamt #endif /* !LFS_READWRITE */
280 1.7.4.2 yamt if (!usepc)
281 1.7.4.2 yamt goto bcache;
282 1.7.4.2 yamt
283 1.7.4.2 yamt preallocoff = round_page(lfs_blkroundup(fs, MAX(osize, uio->uio_offset)));
284 1.7.4.2 yamt aflag = ioflag & IO_SYNC ? B_SYNC : 0;
285 1.7.4.2 yamt nsize = MAX(osize, uio->uio_offset + uio->uio_resid);
286 1.7.4.2 yamt endallocoff = nsize - lfs_blkoff(fs, nsize);
287 1.7.4.2 yamt
288 1.7.4.2 yamt /*
289 1.7.4.2 yamt * if we're increasing the file size, deal with expanding
290 1.7.4.2 yamt * the fragment if there is one.
291 1.7.4.2 yamt */
292 1.7.4.2 yamt
293 1.7.4.2 yamt if (nsize > osize && lfs_lblkno(fs, osize) < ULFS_NDADDR &&
294 1.7.4.2 yamt lfs_lblkno(fs, osize) != lfs_lblkno(fs, nsize) &&
295 1.7.4.2 yamt lfs_blkroundup(fs, osize) != osize) {
296 1.7.4.2 yamt off_t eob;
297 1.7.4.2 yamt
298 1.7.4.2 yamt eob = lfs_blkroundup(fs, osize);
299 1.7.4.2 yamt uvm_vnp_setwritesize(vp, eob);
300 1.7.4.2 yamt error = ulfs_balloc_range(vp, osize, eob - osize, cred, aflag);
301 1.7.4.2 yamt if (error)
302 1.7.4.2 yamt goto out;
303 1.7.4.2 yamt if (flags & B_SYNC) {
304 1.7.4.2 yamt mutex_enter(vp->v_interlock);
305 1.7.4.2 yamt VOP_PUTPAGES(vp, trunc_page(osize & fs->fs_bmask),
306 1.7.4.2 yamt round_page(eob),
307 1.7.4.2 yamt PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
308 1.7.4.2 yamt }
309 1.7.4.2 yamt }
310 1.7.4.2 yamt
311 1.7.4.2 yamt while (uio->uio_resid > 0) {
312 1.7.4.2 yamt int ubc_flags = UBC_WRITE;
313 1.7.4.2 yamt bool overwrite; /* if we're overwrite a whole block */
314 1.7.4.2 yamt off_t newoff;
315 1.7.4.2 yamt
316 1.7.4.2 yamt if (ioflag & IO_DIRECT) {
317 1.7.4.2 yamt genfs_directio(vp, uio, ioflag | IO_JOURNALLOCKED);
318 1.7.4.2 yamt }
319 1.7.4.2 yamt
320 1.7.4.2 yamt oldoff = uio->uio_offset;
321 1.7.4.2 yamt blkoffset = lfs_blkoff(fs, uio->uio_offset);
322 1.7.4.2 yamt bytelen = MIN(fs->fs_bsize - blkoffset, uio->uio_resid);
323 1.7.4.2 yamt if (bytelen == 0) {
324 1.7.4.2 yamt break;
325 1.7.4.2 yamt }
326 1.7.4.2 yamt
327 1.7.4.2 yamt /*
328 1.7.4.2 yamt * if we're filling in a hole, allocate the blocks now and
329 1.7.4.2 yamt * initialize the pages first. if we're extending the file,
330 1.7.4.2 yamt * we can safely allocate blocks without initializing pages
331 1.7.4.2 yamt * since the new blocks will be inaccessible until the write
332 1.7.4.2 yamt * is complete.
333 1.7.4.2 yamt */
334 1.7.4.2 yamt overwrite = uio->uio_offset >= preallocoff &&
335 1.7.4.2 yamt uio->uio_offset < endallocoff;
336 1.7.4.2 yamt if (!overwrite && (vp->v_vflag & VV_MAPPED) == 0 &&
337 1.7.4.2 yamt lfs_blkoff(fs, uio->uio_offset) == 0 &&
338 1.7.4.2 yamt (uio->uio_offset & PAGE_MASK) == 0) {
339 1.7.4.2 yamt vsize_t len;
340 1.7.4.2 yamt
341 1.7.4.2 yamt len = trunc_page(bytelen);
342 1.7.4.2 yamt len -= lfs_blkoff(fs, len);
343 1.7.4.2 yamt if (len > 0) {
344 1.7.4.2 yamt overwrite = true;
345 1.7.4.2 yamt bytelen = len;
346 1.7.4.2 yamt }
347 1.7.4.2 yamt }
348 1.7.4.2 yamt
349 1.7.4.2 yamt newoff = oldoff + bytelen;
350 1.7.4.2 yamt if (vp->v_size < newoff) {
351 1.7.4.2 yamt uvm_vnp_setwritesize(vp, newoff);
352 1.7.4.2 yamt }
353 1.7.4.2 yamt
354 1.7.4.2 yamt if (!overwrite) {
355 1.7.4.2 yamt error = ulfs_balloc_range(vp, uio->uio_offset, bytelen,
356 1.7.4.2 yamt cred, aflag);
357 1.7.4.2 yamt if (error)
358 1.7.4.2 yamt break;
359 1.7.4.2 yamt } else {
360 1.7.4.2 yamt genfs_node_wrlock(vp);
361 1.7.4.2 yamt error = GOP_ALLOC(vp, uio->uio_offset, bytelen,
362 1.7.4.2 yamt aflag, cred);
363 1.7.4.2 yamt genfs_node_unlock(vp);
364 1.7.4.2 yamt if (error)
365 1.7.4.2 yamt break;
366 1.7.4.2 yamt ubc_flags |= UBC_FAULTBUSY;
367 1.7.4.2 yamt }
368 1.7.4.2 yamt
369 1.7.4.2 yamt /*
370 1.7.4.2 yamt * copy the data.
371 1.7.4.2 yamt */
372 1.7.4.2 yamt
373 1.7.4.2 yamt error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
374 1.7.4.2 yamt IO_ADV_DECODE(ioflag), ubc_flags | UBC_UNMAP_FLAG(vp));
375 1.7.4.2 yamt
376 1.7.4.2 yamt /*
377 1.7.4.2 yamt * update UVM's notion of the size now that we've
378 1.7.4.2 yamt * copied the data into the vnode's pages.
379 1.7.4.2 yamt *
380 1.7.4.2 yamt * we should update the size even when uiomove failed.
381 1.7.4.2 yamt */
382 1.7.4.2 yamt
383 1.7.4.2 yamt if (vp->v_size < newoff) {
384 1.7.4.2 yamt uvm_vnp_setsize(vp, newoff);
385 1.7.4.2 yamt extended = 1;
386 1.7.4.2 yamt }
387 1.7.4.2 yamt
388 1.7.4.2 yamt if (error)
389 1.7.4.2 yamt break;
390 1.7.4.2 yamt
391 1.7.4.2 yamt /*
392 1.7.4.2 yamt * flush what we just wrote if necessary.
393 1.7.4.2 yamt * XXXUBC simplistic async flushing.
394 1.7.4.2 yamt */
395 1.7.4.2 yamt
396 1.7.4.2 yamt #ifndef LFS_READWRITE
397 1.7.4.2 yamt if (!async && oldoff >> 16 != uio->uio_offset >> 16) {
398 1.7.4.2 yamt mutex_enter(vp->v_interlock);
399 1.7.4.2 yamt error = VOP_PUTPAGES(vp, (oldoff >> 16) << 16,
400 1.7.4.2 yamt (uio->uio_offset >> 16) << 16,
401 1.7.4.2 yamt PGO_CLEANIT | PGO_JOURNALLOCKED | PGO_LAZY);
402 1.7.4.2 yamt if (error)
403 1.7.4.2 yamt break;
404 1.7.4.2 yamt }
405 1.7.4.2 yamt #else
406 1.7.4.2 yamt __USE(async);
407 1.7.4.2 yamt #endif
408 1.7.4.2 yamt }
409 1.7.4.2 yamt if (error == 0 && ioflag & IO_SYNC) {
410 1.7.4.2 yamt mutex_enter(vp->v_interlock);
411 1.7.4.2 yamt error = VOP_PUTPAGES(vp, trunc_page(origoff & fs->fs_bmask),
412 1.7.4.2 yamt round_page(lfs_blkroundup(fs, uio->uio_offset)),
413 1.7.4.2 yamt PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
414 1.7.4.2 yamt }
415 1.7.4.2 yamt goto out;
416 1.7.4.2 yamt
417 1.7.4.2 yamt bcache:
418 1.7.4.2 yamt mutex_enter(vp->v_interlock);
419 1.7.4.2 yamt VOP_PUTPAGES(vp, trunc_page(origoff), round_page(origoff + resid),
420 1.7.4.2 yamt PGO_CLEANIT | PGO_FREE | PGO_SYNCIO | PGO_JOURNALLOCKED);
421 1.7.4.2 yamt while (uio->uio_resid > 0) {
422 1.7.4.2 yamt lbn = lfs_lblkno(fs, uio->uio_offset);
423 1.7.4.2 yamt blkoffset = lfs_blkoff(fs, uio->uio_offset);
424 1.7.4.2 yamt xfersize = MIN(fs->fs_bsize - blkoffset, uio->uio_resid);
425 1.7.4.2 yamt if (fs->fs_bsize > xfersize)
426 1.7.4.2 yamt flags |= B_CLRBUF;
427 1.7.4.2 yamt else
428 1.7.4.2 yamt flags &= ~B_CLRBUF;
429 1.7.4.2 yamt
430 1.7.4.2 yamt #ifdef LFS_READWRITE
431 1.7.4.2 yamt error = lfs_reserve(fs, vp, NULL,
432 1.7.4.2 yamt lfs_btofsb(fs, (ULFS_NIADDR + 1) << fs->lfs_bshift));
433 1.7.4.2 yamt if (error)
434 1.7.4.2 yamt break;
435 1.7.4.2 yamt need_unreserve = true;
436 1.7.4.2 yamt #endif
437 1.7.4.2 yamt error = lfs_balloc(vp, uio->uio_offset, xfersize,
438 1.7.4.2 yamt ap->a_cred, flags, &bp);
439 1.7.4.2 yamt
440 1.7.4.2 yamt if (error)
441 1.7.4.2 yamt break;
442 1.7.4.2 yamt if (uio->uio_offset + xfersize > ip->i_size) {
443 1.7.4.2 yamt ip->i_size = uio->uio_offset + xfersize;
444 1.7.4.2 yamt DIP_ASSIGN(ip, size, ip->i_size);
445 1.7.4.2 yamt uvm_vnp_setsize(vp, ip->i_size);
446 1.7.4.2 yamt extended = 1;
447 1.7.4.2 yamt }
448 1.7.4.2 yamt size = lfs_blksize(fs, ip, lbn) - bp->b_resid;
449 1.7.4.2 yamt if (xfersize > size)
450 1.7.4.2 yamt xfersize = size;
451 1.7.4.2 yamt
452 1.7.4.2 yamt error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
453 1.7.4.2 yamt
454 1.7.4.2 yamt /*
455 1.7.4.2 yamt * if we didn't clear the block and the uiomove failed,
456 1.7.4.2 yamt * the buf will now contain part of some other file,
457 1.7.4.2 yamt * so we need to invalidate it.
458 1.7.4.2 yamt */
459 1.7.4.2 yamt if (error && (flags & B_CLRBUF) == 0) {
460 1.7.4.2 yamt brelse(bp, BC_INVAL);
461 1.7.4.2 yamt break;
462 1.7.4.2 yamt }
463 1.7.4.2 yamt #ifdef LFS_READWRITE
464 1.7.4.2 yamt (void)VOP_BWRITE(bp->b_vp, bp);
465 1.7.4.2 yamt lfs_reserve(fs, vp, NULL,
466 1.7.4.2 yamt -lfs_btofsb(fs, (ULFS_NIADDR + 1) << fs->lfs_bshift));
467 1.7.4.2 yamt need_unreserve = false;
468 1.7.4.2 yamt #else
469 1.7.4.2 yamt if (ioflag & IO_SYNC)
470 1.7.4.2 yamt (void)bwrite(bp);
471 1.7.4.2 yamt else if (xfersize + blkoffset == fs->fs_bsize)
472 1.7.4.2 yamt bawrite(bp);
473 1.7.4.2 yamt else
474 1.7.4.2 yamt bdwrite(bp);
475 1.7.4.2 yamt #endif
476 1.7.4.2 yamt if (error || xfersize == 0)
477 1.7.4.2 yamt break;
478 1.7.4.2 yamt }
479 1.7.4.2 yamt #ifdef LFS_READWRITE
480 1.7.4.2 yamt if (need_unreserve) {
481 1.7.4.2 yamt lfs_reserve(fs, vp, NULL,
482 1.7.4.2 yamt -lfs_btofsb(fs, (ULFS_NIADDR + 1) << fs->lfs_bshift));
483 1.7.4.2 yamt }
484 1.7.4.2 yamt #endif
485 1.7.4.2 yamt
486 1.7.4.2 yamt /*
487 1.7.4.2 yamt * If we successfully wrote any data, and we are not the superuser
488 1.7.4.2 yamt * we clear the setuid and setgid bits as a precaution against
489 1.7.4.2 yamt * tampering.
490 1.7.4.2 yamt */
491 1.7.4.2 yamt out:
492 1.7.4.2 yamt ip->i_flag |= IN_CHANGE | IN_UPDATE;
493 1.7.4.2 yamt if (vp->v_mount->mnt_flag & MNT_RELATIME)
494 1.7.4.2 yamt ip->i_flag |= IN_ACCESS;
495 1.7.4.2 yamt if (resid > uio->uio_resid && ap->a_cred) {
496 1.7.4.2 yamt if (ip->i_mode & ISUID) {
497 1.7.4.2 yamt if (kauth_authorize_vnode(ap->a_cred,
498 1.7.4.2 yamt KAUTH_VNODE_RETAIN_SUID, vp, NULL, EPERM) != 0) {
499 1.7.4.2 yamt ip->i_mode &= ~ISUID;
500 1.7.4.2 yamt DIP_ASSIGN(ip, mode, ip->i_mode);
501 1.7.4.2 yamt }
502 1.7.4.2 yamt }
503 1.7.4.2 yamt
504 1.7.4.2 yamt if (ip->i_mode & ISGID) {
505 1.7.4.2 yamt if (kauth_authorize_vnode(ap->a_cred,
506 1.7.4.2 yamt KAUTH_VNODE_RETAIN_SGID, vp, NULL, EPERM) != 0) {
507 1.7.4.2 yamt ip->i_mode &= ~ISGID;
508 1.7.4.2 yamt DIP_ASSIGN(ip, mode, ip->i_mode);
509 1.7.4.2 yamt }
510 1.7.4.2 yamt }
511 1.7.4.2 yamt }
512 1.7.4.2 yamt if (resid > uio->uio_resid)
513 1.7.4.2 yamt VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
514 1.7.4.2 yamt if (error) {
515 1.7.4.2 yamt (void) lfs_truncate(vp, osize, ioflag & IO_SYNC, ap->a_cred);
516 1.7.4.2 yamt uio->uio_offset -= resid - uio->uio_resid;
517 1.7.4.2 yamt uio->uio_resid = resid;
518 1.7.4.2 yamt } else if (resid > uio->uio_resid && (ioflag & IO_SYNC) == IO_SYNC) {
519 1.7.4.2 yamt error = lfs_update(vp, NULL, NULL, UPDATE_WAIT);
520 1.7.4.2 yamt } else {
521 1.7.4.2 yamt /* nothing */
522 1.7.4.2 yamt }
523 1.7.4.2 yamt KASSERT(vp->v_size == ip->i_size);
524 1.7.4.2 yamt fstrans_done(vp->v_mount);
525 1.7.4.2 yamt
526 1.7.4.2 yamt return (error);
527 1.7.4.2 yamt }
528