lfs_syscalls.c revision 1.62 1 /* $NetBSD: lfs_syscalls.c,v 1.62 2001/11/23 21:44:28 chs Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*-
39 * Copyright (c) 1991, 1993, 1994
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.62 2001/11/23 21:44:28 chs Exp $");
75
76 #define LFS /* for prototypes in syscallargs.h */
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/proc.h>
81 #include <sys/buf.h>
82 #include <sys/mount.h>
83 #include <sys/vnode.h>
84 #include <sys/malloc.h>
85 #include <sys/kernel.h>
86
87 #include <sys/syscallargs.h>
88
89 #include <ufs/ufs/inode.h>
90 #include <ufs/ufs/ufsmount.h>
91 #include <ufs/ufs/ufs_extern.h>
92
93 #include <ufs/lfs/lfs.h>
94 #include <ufs/lfs/lfs_extern.h>
95
96 /* Flags for return from lfs_fastvget */
97 #define FVG_UNLOCK 0x01 /* Needs to be unlocked */
98 #define FVG_PUT 0x02 /* Needs to be vput() */
99
100 /* Max block count for lfs_markv() */
101 #define MARKV_MAXBLKCNT 65536
102
103 struct buf *lfs_fakebuf(struct vnode *, int, size_t, caddr_t);
104 int lfs_fasthashget(dev_t, ino_t, int *, struct vnode **);
105
106 int debug_cleaner = 0;
107 int clean_vnlocked = 0;
108 int clean_inlocked = 0;
109 int verbose_debug = 0;
110
111 pid_t lfs_cleaner_pid = 0;
112
113 /*
114 * Definitions for the buffer free lists.
115 */
116 #define BQUEUES 4 /* number of free buffer queues */
117
118 #define BQ_LOCKED 0 /* super-blocks &c */
119 #define BQ_LRU 1 /* lru, useful buffers */
120 #define BQ_AGE 2 /* rubbish */
121 #define BQ_EMPTY 3 /* buffer headers with no memory */
122
123 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
124
125 #define LFS_FORCE_WRITE UNASSIGNED
126
127 #define LFS_VREF_THRESHOLD 128
128
129 static int lfs_bmapv(struct proc *, fsid_t *, BLOCK_INFO *, int);
130 static int lfs_markv(struct proc *, fsid_t *, BLOCK_INFO *, int);
131
132 /*
133 * sys_lfs_markv:
134 *
135 * This will mark inodes and blocks dirty, so they are written into the log.
136 * It will block until all the blocks have been written. The segment create
137 * time passed in the block_info and inode_info structures is used to decide
138 * if the data is valid for each block (in case some process dirtied a block
139 * or inode that is being cleaned between the determination that a block is
140 * live and the lfs_markv call).
141 *
142 * 0 on success
143 * -1/errno is return on error.
144 */
145 #ifdef USE_64BIT_SYSCALLS
146 int
147 sys_lfs_markv(struct proc *p, void *v, register_t *retval)
148 {
149 struct sys_lfs_markv_args /* {
150 syscallarg(fsid_t *) fsidp;
151 syscallarg(struct block_info *) blkiov;
152 syscallarg(int) blkcnt;
153 } */ *uap = v;
154 BLOCK_INFO *blkiov;
155 int blkcnt, error;
156 fsid_t fsid;
157
158 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
159 return (error);
160
161 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
162 return (error);
163
164 blkcnt = SCARG(uap, blkcnt);
165 if ((u_int) blkcnt > MARKV_MAXBLKCNT)
166 return (EINVAL);
167
168 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
169 if ((error = copyin(SCARG(uap, blkiov), blkiov,
170 blkcnt * sizeof(BLOCK_INFO))) != 0)
171 goto out;
172
173 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0)
174 copyout(blkiov, SCARG(uap, blkiov),
175 blkcnt * sizeof(BLOCK_INFO));
176 out:
177 free(blkiov, M_SEGMENT);
178 return error;
179 }
180 #else
181 int
182 sys_lfs_markv(struct proc *p, void *v, register_t *retval)
183 {
184 struct sys_lfs_markv_args /* {
185 syscallarg(fsid_t *) fsidp;
186 syscallarg(struct block_info *) blkiov;
187 syscallarg(int) blkcnt;
188 } */ *uap = v;
189 BLOCK_INFO *blkiov;
190 BLOCK_INFO_15 *blkiov15;
191 int i, blkcnt, error;
192 fsid_t fsid;
193
194 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
195 return (error);
196
197 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
198 return (error);
199
200 blkcnt = SCARG(uap, blkcnt);
201 if ((u_int) blkcnt > MARKV_MAXBLKCNT)
202 return (EINVAL);
203
204 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
205 blkiov15 = malloc(blkcnt * sizeof(BLOCK_INFO_15), M_SEGMENT, M_WAITOK);
206 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
207 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
208 goto out;
209
210 for (i = 0; i < blkcnt; i++) {
211 blkiov[i].bi_inode = blkiov15[i].bi_inode;
212 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
213 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
214 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
215 blkiov[i].bi_version = blkiov15[i].bi_version;
216 blkiov[i].bi_bp = blkiov15[i].bi_bp;
217 blkiov[i].bi_size = blkiov15[i].bi_size;
218 }
219
220 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0) {
221 for (i = 0; i < blkcnt; i++) {
222 blkiov15[i].bi_inode = blkiov[i].bi_inode;
223 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
224 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
225 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
226 blkiov15[i].bi_version = blkiov[i].bi_version;
227 blkiov15[i].bi_bp = blkiov[i].bi_bp;
228 blkiov15[i].bi_size = blkiov[i].bi_size;
229 }
230 copyout(blkiov15, SCARG(uap, blkiov),
231 blkcnt * sizeof(BLOCK_INFO_15));
232 }
233 out:
234 free(blkiov, M_SEGMENT);
235 free(blkiov15, M_SEGMENT);
236 return error;
237 }
238 #endif
239
240 static int
241 lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
242 {
243 BLOCK_INFO *blkp;
244 IFILE *ifp;
245 struct buf *bp, *nbp;
246 struct inode *ip = NULL;
247 struct lfs *fs;
248 struct mount *mntp;
249 struct vnode *vp;
250 #ifdef DEBUG_LFS
251 int vputc = 0, iwritten = 0;
252 #endif
253 ino_t lastino;
254 ufs_daddr_t b_daddr, v_daddr;
255 int cnt, error, lfs_fastvget_unlock;
256 int do_again = 0;
257 int s;
258 #ifdef CHECK_COPYIN
259 int i;
260 #endif /* CHECK_COPYIN */
261 #ifdef LFS_TRACK_IOS
262 int j;
263 #endif
264 int numlocked = 0, numrefed = 0;
265 ino_t maxino;
266
267 if ((mntp = vfs_getvfs(fsidp)) == NULL)
268 return (ENOENT);
269
270 fs = VFSTOUFS(mntp)->um_lfs;
271 maxino = (fragstoblks(fs, fsbtofrags(fs, VTOI(fs->lfs_ivnode)->i_ffs_blocks)) -
272 fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb;
273
274 cnt = blkcnt;
275
276 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
277 return (error);
278
279 /*
280 * This seglock is just to prevent the fact that we might have to sleep
281 * from allowing the possibility that our blocks might become
282 * invalid.
283 *
284 * It is also important to note here that unless we specify SEGM_CKP,
285 * any Ifile blocks that we might be asked to clean will never get
286 * to the disk.
287 */
288 lfs_seglock(fs, SEGM_SYNC|SEGM_CLEAN|SEGM_CKP);
289
290 /* Mark blocks/inodes dirty. */
291 error = 0;
292
293 #ifdef DEBUG_LFS
294 /* Run through and count the inodes */
295 lastino = LFS_UNUSED_INUM;
296 for (blkp = blkiov; cnt--; ++blkp) {
297 if (lastino != blkp->bi_inode) {
298 lastino = blkp->bi_inode;
299 vputc++;
300 }
301 }
302 cnt = blkcnt;
303 printf("[%d/",vputc);
304 iwritten = 0;
305 #endif /* DEBUG_LFS */
306 /* these were inside the initialization for the for loop */
307 v_daddr = LFS_UNUSED_DADDR;
308 lastino = LFS_UNUSED_INUM;
309 for (blkp = blkiov; cnt--; ++blkp)
310 {
311 if (blkp->bi_daddr == LFS_FORCE_WRITE)
312 printf("lfs_markv: warning: force-writing ino %d lbn %d\n",
313 blkp->bi_inode, blkp->bi_lbn);
314 #ifdef LFS_TRACK_IOS
315 /*
316 * If there is I/O on this segment that is not yet complete,
317 * the cleaner probably does not have the right information.
318 * Send it packing.
319 */
320 for (j = 0; j < LFS_THROTTLE; j++) {
321 if (fs->lfs_pending[j] != LFS_UNUSED_DADDR
322 && dtosn(fs,fs->lfs_pending[j]) == dtosn(fs,blkp->bi_daddr)
323 && blkp->bi_daddr != LFS_FORCE_WRITE)
324 {
325 printf("lfs_markv: attempt to clean pending segment? (#%d)\n",
326 dtosn(fs, fs->lfs_pending[j]));
327 /* return (EBUSY); */
328 }
329 }
330 #endif /* LFS_TRACK_IOS */
331 /* Bounds-check incoming data, avoid panic for failed VGET */
332 if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
333 error = EINVAL;
334 goto again;
335 }
336 /*
337 * Get the IFILE entry (only once) and see if the file still
338 * exists.
339 */
340 if (lastino != blkp->bi_inode) {
341 /*
342 * Finish the old file, if there was one. The presence
343 * of a usable vnode in vp is signaled by a valid v_daddr.
344 */
345 if (v_daddr != LFS_UNUSED_DADDR) {
346 #ifdef DEBUG_LFS
347 if (ip->i_flag & (IN_MODIFIED|IN_CLEANING))
348 iwritten++;
349 #endif
350 if (lfs_fastvget_unlock) {
351 VOP_UNLOCK(vp, 0);
352 numlocked--;
353 }
354 lfs_vunref(vp);
355 numrefed--;
356 }
357
358 /*
359 * Start a new file
360 */
361 lastino = blkp->bi_inode;
362 if (blkp->bi_inode == LFS_IFILE_INUM)
363 v_daddr = fs->lfs_idaddr;
364 else {
365 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
366 /* XXX fix for force write */
367 v_daddr = ifp->if_daddr;
368 brelse(bp);
369 }
370 /* Don't force-write the ifile */
371 if (blkp->bi_inode == LFS_IFILE_INUM
372 && blkp->bi_daddr == LFS_FORCE_WRITE)
373 {
374 continue;
375 }
376 if (v_daddr == LFS_UNUSED_DADDR
377 && blkp->bi_daddr != LFS_FORCE_WRITE)
378 {
379 continue;
380 }
381
382 /* Get the vnode/inode. */
383 error = lfs_fastvget(mntp, blkp->bi_inode, v_daddr,
384 &vp,
385 (blkp->bi_lbn == LFS_UNUSED_LBN
386 ? blkp->bi_bp
387 : NULL),
388 &lfs_fastvget_unlock);
389 if (lfs_fastvget_unlock)
390 numlocked++;
391
392 if (!error) {
393 numrefed++;
394 }
395 if (error) {
396 #ifdef DEBUG_LFS
397 printf("lfs_markv: lfs_fastvget failed with %d (ino %d, segment %d)\n",
398 error, blkp->bi_inode,
399 dtosn(fs, blkp->bi_daddr));
400 #endif /* DEBUG_LFS */
401 /*
402 * If we got EAGAIN, that means that the
403 * Inode was locked. This is
404 * recoverable: just clean the rest of
405 * this segment, and let the cleaner try
406 * again with another. (When the
407 * cleaner runs again, this segment will
408 * sort high on the list, since it is
409 * now almost entirely empty.) But, we
410 * still set v_daddr = LFS_UNUSED_ADDR
411 * so as not to test this over and over
412 * again.
413 */
414 if (error == EAGAIN) {
415 error = 0;
416 do_again++;
417 }
418 #ifdef DIAGNOSTIC
419 else if (error != ENOENT)
420 panic("lfs_markv VFS_VGET FAILED");
421 #endif
422 /* lastino = LFS_UNUSED_INUM; */
423 v_daddr = LFS_UNUSED_DADDR;
424 vp = NULL;
425 ip = NULL;
426 continue;
427 }
428 ip = VTOI(vp);
429 } else if (v_daddr == LFS_UNUSED_DADDR) {
430 /*
431 * This can only happen if the vnode is dead (or
432 * in any case we can't get it...e.g., it is
433 * inlocked). Keep going.
434 */
435 continue;
436 }
437
438 /* Past this point we are guaranteed that vp, ip are valid. */
439
440 /* If this BLOCK_INFO didn't contain a block, keep going. */
441 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
442 /* XXX need to make sure that the inode gets written in this case */
443 /* XXX but only write the inode if it's the right one */
444 if (blkp->bi_inode != LFS_IFILE_INUM) {
445 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
446 if (ifp->if_daddr == blkp->bi_daddr
447 || blkp->bi_daddr == LFS_FORCE_WRITE)
448 {
449 LFS_SET_UINO(ip, IN_CLEANING);
450 }
451 brelse(bp);
452 }
453 continue;
454 }
455
456 b_daddr = 0;
457 if (blkp->bi_daddr != LFS_FORCE_WRITE) {
458 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
459 dbtofsb(fs, b_daddr) != blkp->bi_daddr)
460 {
461 if (dtosn(fs,dbtofsb(fs, b_daddr))
462 == dtosn(fs,blkp->bi_daddr))
463 {
464 printf("lfs_markv: wrong da same seg: %x vs %x\n",
465 blkp->bi_daddr, dbtofsb(fs, b_daddr));
466 }
467 continue;
468 }
469 }
470 /*
471 * If we got to here, then we are keeping the block. If
472 * it is an indirect block, we want to actually put it
473 * in the buffer cache so that it can be updated in the
474 * finish_meta section. If it's not, we need to
475 * allocate a fake buffer so that writeseg can perform
476 * the copyin and write the buffer.
477 */
478 /*
479 * XXX - if the block we are reading has been *extended* since
480 * it was written to disk, then we risk throwing away
481 * the extension in bread()/getblk(). Check the size
482 * here.
483 */
484 if (blkp->bi_size < fs->lfs_bsize) {
485 s = splbio();
486 bp = incore(vp, blkp->bi_lbn);
487 if (bp && bp->b_bcount > blkp->bi_size) {
488 printf("lfs_markv: %ld > %d (fixed)\n",
489 bp->b_bcount, blkp->bi_size);
490 blkp->bi_size = bp->b_bcount;
491 }
492 splx(s);
493 }
494 if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
495 /* Data Block */
496 bp = lfs_fakebuf(vp, blkp->bi_lbn,
497 blkp->bi_size, blkp->bi_bp);
498 /* Pretend we used bread() to get it */
499 bp->b_blkno = fsbtodb(fs, blkp->bi_daddr);
500 } else {
501 /* Indirect block */
502 bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
503 if (!(bp->b_flags & (B_DONE|B_DELWRI))) { /* B_CACHE */
504 /*
505 * The block in question was not found
506 * in the cache; i.e., the block that
507 * getblk() returned is empty. So, we
508 * can (and should) copy in the
509 * contents, because we've already
510 * determined that this was the right
511 * version of this block on disk.
512 *
513 * And, it can't have changed underneath
514 * us, because we have the segment lock.
515 */
516 error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
517 if (error)
518 goto err2;
519 }
520 }
521 if ((error = lfs_bwrite_ext(bp,BW_CLEAN)) != 0)
522 goto err2;
523 }
524
525 /*
526 * Finish the old file, if there was one
527 */
528 if (v_daddr != LFS_UNUSED_DADDR) {
529 #ifdef DEBUG_LFS
530 if (ip->i_flag & (IN_MODIFIED|IN_CLEANING))
531 iwritten++;
532 #endif
533 if (lfs_fastvget_unlock) {
534 VOP_UNLOCK(vp, 0);
535 numlocked--;
536 }
537 lfs_vunref(vp);
538 numrefed--;
539 }
540
541 /*
542 * The last write has to be SEGM_SYNC, because of calling semantics.
543 * It also has to be SEGM_CKP, because otherwise we could write
544 * over the newly cleaned data contained in a checkpoint, and then
545 * we'd be unhappy at recovery time.
546 */
547 lfs_segwrite(mntp, SEGM_SYNC|SEGM_CLEAN|SEGM_CKP);
548
549 lfs_segunlock(fs);
550
551 #ifdef DEBUG_LFS
552 printf("%d]",iwritten);
553 if (numlocked != 0 || numrefed != 0) {
554 panic("lfs_markv: numlocked=%d numrefed=%d", numlocked, numrefed);
555 }
556 #endif
557
558 vfs_unbusy(mntp);
559 if (error)
560 return (error);
561 else if (do_again)
562 return EAGAIN;
563
564 return 0;
565
566 err2:
567 printf("lfs_markv err2\n");
568 if (lfs_fastvget_unlock) {
569 VOP_UNLOCK(vp, 0);
570 --numlocked;
571 }
572 lfs_vunref(vp);
573 --numrefed;
574
575 /* Free up fakebuffers -- have to take these from the LOCKED list */
576 again:
577 s = splbio();
578 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; bp = nbp) {
579 nbp = bp->b_freelist.tqe_next;
580 if (bp->b_flags & B_CALL) {
581 if (bp->b_flags & B_BUSY) { /* not bloody likely */
582 bp->b_flags |= B_WANTED;
583 tsleep(bp, PRIBIO+1, "markv", 0);
584 splx(s);
585 goto again;
586 }
587 if (bp->b_flags & B_DELWRI)
588 fs->lfs_avail += btofsb(fs, bp->b_bcount);
589 bremfree(bp);
590 splx(s);
591 brelse(bp);
592 s = splbio();
593 }
594 }
595 splx(s);
596 lfs_segunlock(fs);
597 vfs_unbusy(mntp);
598 #ifdef DEBUG_LFS
599 if (numlocked != 0 || numrefed != 0) {
600 panic("lfs_markv: numlocked=%d numrefed=%d", numlocked, numrefed);
601 }
602 #endif
603
604 return (error);
605 }
606
607 /*
608 * sys_lfs_bmapv:
609 *
610 * This will fill in the current disk address for arrays of blocks.
611 *
612 * 0 on success
613 * -1/errno is return on error.
614 */
615 #ifdef USE_64BIT_SYSCALLS
616 int
617 sys_lfs_bmapv(struct proc *p, void *v, register_t *retval)
618 {
619 struct sys_lfs_bmapv_args /* {
620 syscallarg(fsid_t *) fsidp;
621 syscallarg(struct block_info *) blkiov;
622 syscallarg(int) blkcnt;
623 } */ *uap = v;
624 BLOCK_INFO *blkiov;
625 int blkcnt, error;
626 fsid_t fsid;
627
628 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
629 return (error);
630
631 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
632 return (error);
633
634 blkcnt = SCARG(uap, blkcnt);
635 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
636 if ((error = copyin(SCARG(uap, blkiov), blkiov,
637 blkcnt * sizeof(BLOCK_INFO))) != 0)
638 goto out;
639
640 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0)
641 copyout(blkiov, SCARG(uap, blkiov),
642 blkcnt * sizeof(BLOCK_INFO));
643 out:
644 free(blkiov, M_SEGMENT);
645 return error;
646 }
647 #else
648 int
649 sys_lfs_bmapv(struct proc *p, void *v, register_t *retval)
650 {
651 struct sys_lfs_bmapv_args /* {
652 syscallarg(fsid_t *) fsidp;
653 syscallarg(struct block_info *) blkiov;
654 syscallarg(int) blkcnt;
655 } */ *uap = v;
656 BLOCK_INFO *blkiov;
657 BLOCK_INFO_15 *blkiov15;
658 int i, blkcnt, error;
659 fsid_t fsid;
660
661 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
662 return (error);
663
664 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
665 return (error);
666
667 blkcnt = SCARG(uap, blkcnt);
668 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
669 blkiov15 = malloc(blkcnt * sizeof(BLOCK_INFO_15), M_SEGMENT, M_WAITOK);
670 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
671 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
672 goto out;
673
674 for (i = 0; i < blkcnt; i++) {
675 blkiov[i].bi_inode = blkiov15[i].bi_inode;
676 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
677 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
678 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
679 blkiov[i].bi_version = blkiov15[i].bi_version;
680 blkiov[i].bi_bp = blkiov15[i].bi_bp;
681 blkiov[i].bi_size = blkiov15[i].bi_size;
682 }
683
684 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0) {
685 for (i = 0; i < blkcnt; i++) {
686 blkiov15[i].bi_inode = blkiov[i].bi_inode;
687 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
688 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
689 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
690 blkiov15[i].bi_version = blkiov[i].bi_version;
691 blkiov15[i].bi_bp = blkiov[i].bi_bp;
692 blkiov15[i].bi_size = blkiov[i].bi_size;
693 }
694 copyout(blkiov15, SCARG(uap, blkiov),
695 blkcnt * sizeof(BLOCK_INFO_15));
696 }
697 out:
698 free(blkiov, M_SEGMENT);
699 free(blkiov15, M_SEGMENT);
700 return error;
701 }
702 #endif
703
704 static int
705 lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
706 {
707 BLOCK_INFO *blkp;
708 IFILE *ifp;
709 struct buf *bp;
710 struct inode *ip = NULL;
711 struct lfs *fs;
712 struct mount *mntp;
713 struct ufsmount *ump;
714 struct vnode *vp;
715 ino_t lastino;
716 ufs_daddr_t v_daddr;
717 int cnt, error, need_unlock = 0;
718 int numlocked = 0, numrefed = 0;
719 #ifdef LFS_TRACK_IOS
720 int j;
721 #endif
722
723 lfs_cleaner_pid = p->p_pid;
724
725 if ((mntp = vfs_getvfs(fsidp)) == NULL)
726 return (ENOENT);
727
728 ump = VFSTOUFS(mntp);
729 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
730 return (error);
731
732 cnt = blkcnt;
733
734 fs = VFSTOUFS(mntp)->um_lfs;
735
736 error = 0;
737
738 /* these were inside the initialization for the for loop */
739 v_daddr = LFS_UNUSED_DADDR;
740 lastino = LFS_UNUSED_INUM;
741 for (blkp = blkiov; cnt--; ++blkp)
742 {
743 #ifdef DEBUG
744 if (dtosn(fs, fs->lfs_curseg) == dtosn(fs, blkp->bi_daddr)) {
745 printf("lfs_bmapv: attempt to clean current segment? (#%d)\n",
746 dtosn(fs, fs->lfs_curseg));
747 vfs_unbusy(mntp);
748 return (EBUSY);
749 }
750 #endif /* DEBUG */
751 #ifdef LFS_TRACK_IOS
752 /*
753 * If there is I/O on this segment that is not yet complete,
754 * the cleaner probably does not have the right information.
755 * Send it packing.
756 */
757 for (j = 0; j < LFS_THROTTLE; j++) {
758 if (fs->lfs_pending[j] != LFS_UNUSED_DADDR
759 && dtosn(fs,fs->lfs_pending[j]) == dtosn(fs,blkp->bi_daddr))
760 {
761 printf("lfs_bmapv: attempt to clean pending segment? (#%d)\n",
762 dtosn(fs, fs->lfs_pending[j]));
763 vfs_unbusy(mntp);
764 return (EBUSY);
765 }
766 }
767
768 #endif /* LFS_TRACK_IOS */
769 /*
770 * Get the IFILE entry (only once) and see if the file still
771 * exists.
772 */
773 if (lastino != blkp->bi_inode) {
774 /*
775 * Finish the old file, if there was one. The presence
776 * of a usable vnode in vp is signaled by a valid
777 * v_daddr.
778 */
779 if (v_daddr != LFS_UNUSED_DADDR) {
780 if (need_unlock) {
781 VOP_UNLOCK(vp, 0);
782 numlocked--;
783 }
784 lfs_vunref(vp);
785 numrefed--;
786 }
787
788 /*
789 * Start a new file
790 */
791 lastino = blkp->bi_inode;
792 if (blkp->bi_inode == LFS_IFILE_INUM)
793 v_daddr = fs->lfs_idaddr;
794 else {
795 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
796 v_daddr = ifp->if_daddr;
797 brelse(bp);
798 }
799 if (v_daddr == LFS_UNUSED_DADDR) {
800 blkp->bi_daddr = LFS_UNUSED_DADDR;
801 continue;
802 }
803 /*
804 * A regular call to VFS_VGET could deadlock
805 * here. Instead, we try an unlocked access.
806 */
807 vp = ufs_ihashlookup(ump->um_dev, blkp->bi_inode);
808 if (vp != NULL && !(vp->v_flag & VXLOCK)) {
809 ip = VTOI(vp);
810 if (lfs_vref(vp)) {
811 v_daddr = LFS_UNUSED_DADDR;
812 need_unlock = 0;
813 continue;
814 }
815 numrefed++;
816 if (VOP_ISLOCKED(vp)) {
817 #ifdef DEBUG_LFS
818 printf("lfs_bmapv: inode %d inlocked\n",ip->i_number);
819 #endif
820 v_daddr = LFS_UNUSED_DADDR;
821 need_unlock = 0;
822 lfs_vunref(vp);
823 --numrefed;
824 continue;
825 } else {
826 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
827 need_unlock = FVG_UNLOCK;
828 numlocked++;
829 }
830 } else {
831 error = VFS_VGET(mntp, blkp->bi_inode, &vp);
832 if (error) {
833 #ifdef DEBUG_LFS
834 printf("lfs_bmapv: vget of ino %d failed with %d",blkp->bi_inode,error);
835 #endif
836 v_daddr = LFS_UNUSED_DADDR;
837 need_unlock = 0;
838 continue;
839 } else {
840 need_unlock = FVG_PUT;
841 numlocked++;
842 numrefed++;
843 }
844 }
845 ip = VTOI(vp);
846 } else if (v_daddr == LFS_UNUSED_DADDR) {
847 /*
848 * This can only happen if the vnode is dead.
849 * Keep going. Note that we DO NOT set the
850 * bi_addr to anything -- if we failed to get
851 * the vnode, for example, we want to assume
852 * conservatively that all of its blocks *are*
853 * located in the segment in question.
854 * lfs_markv will throw them out if we are
855 * wrong.
856 */
857 /* blkp->bi_daddr = LFS_UNUSED_DADDR; */
858 continue;
859 }
860
861 /* Past this point we are guaranteed that vp, ip are valid. */
862
863 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
864 /*
865 * We just want the inode address, which is
866 * conveniently in v_daddr.
867 */
868 blkp->bi_daddr = v_daddr;
869 } else {
870 error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
871 &(blkp->bi_daddr), NULL);
872 if (error)
873 {
874 blkp->bi_daddr = LFS_UNUSED_DADDR;
875 continue;
876 }
877 blkp->bi_daddr = dbtofsb(fs, blkp->bi_daddr);
878 }
879 }
880
881 /*
882 * Finish the old file, if there was one. The presence
883 * of a usable vnode in vp is signaled by a valid v_daddr.
884 */
885 if (v_daddr != LFS_UNUSED_DADDR) {
886 if (need_unlock) {
887 VOP_UNLOCK(vp, 0);
888 numlocked--;
889 }
890 lfs_vunref(vp);
891 numrefed--;
892 }
893
894 if (numlocked != 0 || numrefed != 0) {
895 panic("lfs_bmapv: numlocked=%d numrefed=%d", numlocked,
896 numrefed);
897 }
898
899 vfs_unbusy(mntp);
900
901 return 0;
902 }
903
904 /*
905 * sys_lfs_segclean:
906 *
907 * Mark the segment clean.
908 *
909 * 0 on success
910 * -1/errno is return on error.
911 */
912 int
913 sys_lfs_segclean(struct proc *p, void *v, register_t *retval)
914 {
915 struct sys_lfs_segclean_args /* {
916 syscallarg(fsid_t *) fsidp;
917 syscallarg(u_long) segment;
918 } */ *uap = v;
919 CLEANERINFO *cip;
920 SEGUSE *sup;
921 struct buf *bp;
922 struct mount *mntp;
923 struct lfs *fs;
924 fsid_t fsid;
925 int error;
926
927 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
928 return (error);
929
930 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
931 return (error);
932 if ((mntp = vfs_getvfs(&fsid)) == NULL)
933 return (ENOENT);
934
935 fs = VFSTOUFS(mntp)->um_lfs;
936
937 if (dtosn(fs, fs->lfs_curseg) == SCARG(uap, segment))
938 return (EBUSY);
939
940 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
941 return (error);
942 LFS_SEGENTRY(sup, fs, SCARG(uap, segment), bp);
943 if (sup->su_flags & SEGUSE_ACTIVE) {
944 brelse(bp);
945 vfs_unbusy(mntp);
946 return (EBUSY);
947 }
948 if (!(sup->su_flags & SEGUSE_DIRTY)) {
949 brelse(bp);
950 vfs_unbusy(mntp);
951 return (EALREADY);
952 }
953
954 fs->lfs_avail += segtod(fs, 1);
955 if (sup->su_flags & SEGUSE_SUPERBLOCK)
956 fs->lfs_avail -= btofsb(fs, LFS_SBPAD);
957 if (fs->lfs_version > 1 && SCARG(uap, segment) == 0 &&
958 fs->lfs_start < btofsb(fs, LFS_LABELPAD))
959 fs->lfs_avail -= btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
960 fs->lfs_bfree += sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
961 btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
962 fs->lfs_dmeta -= sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
963 btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
964 if (fs->lfs_dmeta < 0)
965 fs->lfs_dmeta = 0;
966 sup->su_flags &= ~SEGUSE_DIRTY;
967 (void) VOP_BWRITE(bp);
968
969 LFS_CLEANERINFO(cip, fs, bp);
970 ++cip->clean;
971 --cip->dirty;
972 fs->lfs_nclean = cip->clean;
973 cip->bfree = fs->lfs_bfree;
974 cip->avail = fs->lfs_avail - fs->lfs_ravail;
975 (void) VOP_BWRITE(bp);
976 wakeup(&fs->lfs_avail);
977 vfs_unbusy(mntp);
978
979 return (0);
980 }
981
982 /*
983 * sys_lfs_segwait:
984 *
985 * This will block until a segment in file system fsid is written. A timeout
986 * in milliseconds may be specified which will awake the cleaner automatically.
987 * An fsid of -1 means any file system, and a timeout of 0 means forever.
988 *
989 * 0 on success
990 * 1 on timeout
991 * -1/errno is return on error.
992 */
993 int
994 sys_lfs_segwait(struct proc *p, void *v, register_t *retval)
995 {
996 struct sys_lfs_segwait_args /* {
997 syscallarg(fsid_t *) fsidp;
998 syscallarg(struct timeval *) tv;
999 } */ *uap = v;
1000 extern int lfs_allclean_wakeup;
1001 struct mount *mntp;
1002 struct timeval atv;
1003 fsid_t fsid;
1004 void *addr;
1005 u_long timeout;
1006 int error, s;
1007
1008 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) {
1009 return (error);
1010 }
1011 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
1012 return (error);
1013 if ((mntp = vfs_getvfs(&fsid)) == NULL)
1014 addr = &lfs_allclean_wakeup;
1015 else
1016 addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
1017
1018 if (SCARG(uap, tv)) {
1019 error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
1020 if (error)
1021 return (error);
1022 if (itimerfix(&atv))
1023 return (EINVAL);
1024 /*
1025 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
1026 * XXX IS THAT WHAT IS INTENDED?
1027 */
1028 s = splclock();
1029 timeradd(&atv, &time, &atv);
1030 timeout = hzto(&atv);
1031 splx(s);
1032 } else
1033 timeout = 0;
1034
1035 error = tsleep(addr, PCATCH | PUSER, "segment", timeout);
1036 return (error == ERESTART ? EINTR : 0);
1037 }
1038
1039 /*
1040 * VFS_VGET call specialized for the cleaner. The cleaner already knows the
1041 * daddr from the ifile, so don't look it up again. If the cleaner is
1042 * processing IINFO structures, it may have the ondisk inode already, so
1043 * don't go retrieving it again.
1044 *
1045 * If we find the vnode on the hash chain, then it may be locked by another
1046 * process; so we set (*need_unlock) to zero.
1047 *
1048 * If we don't, we call ufs_ihashins, which locks the inode, and we set
1049 * (*need_unlock) to non-zero.
1050 *
1051 * In either case we lfs_vref, and it is the caller's responsibility to
1052 * lfs_vunref and VOP_UNLOCK (if necessary) when finished.
1053 */
1054 extern struct lock ufs_hashlock;
1055
1056 int
1057 lfs_fasthashget(dev_t dev, ino_t ino, int *need_unlock, struct vnode **vpp)
1058 {
1059 struct inode *ip;
1060
1061 /*
1062 * This is playing fast and loose. Someone may have the inode
1063 * locked, in which case they are going to be distinctly unhappy
1064 * if we trash something.
1065 */
1066 if ((*vpp = ufs_ihashlookup(dev, ino)) != NULL) {
1067 if ((*vpp)->v_flag & VXLOCK) {
1068 printf("lfs_fastvget: vnode VXLOCKed for ino %d\n",
1069 ino);
1070 clean_vnlocked++;
1071 #ifdef LFS_EAGAIN_FAIL
1072 return EAGAIN;
1073 #endif
1074 }
1075 ip = VTOI(*vpp);
1076 if (lfs_vref(*vpp)) {
1077 clean_inlocked++;
1078 return EAGAIN;
1079 }
1080 if (VOP_ISLOCKED(*vpp)) {
1081 #ifdef DEBUG_LFS
1082 printf("lfs_fastvget: ino %d inlocked by pid %d\n",
1083 ip->i_number, (*vpp)->v_lock.lk_lockholder);
1084 #endif
1085 clean_inlocked++;
1086 #ifdef LFS_EAGAIN_FAIL
1087 lfs_vunref(*vpp);
1088 return EAGAIN;
1089 #endif /* LFS_EAGAIN_FAIL */
1090 } else {
1091 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
1092 *need_unlock |= FVG_UNLOCK;
1093 }
1094 } else
1095 *vpp = NULL;
1096
1097 return (0);
1098 }
1099
1100 int
1101 lfs_fastvget(struct mount *mp, ino_t ino, ufs_daddr_t daddr, struct vnode **vpp, struct dinode *dinp, int *need_unlock)
1102 {
1103 struct inode *ip;
1104 struct vnode *vp;
1105 struct ufsmount *ump;
1106 dev_t dev;
1107 int error;
1108 struct buf *bp;
1109 struct lfs *fs;
1110
1111 ump = VFSTOUFS(mp);
1112 dev = ump->um_dev;
1113 fs = ump->um_lfs;
1114 *need_unlock = 0;
1115
1116 /*
1117 * Wait until the filesystem is fully mounted before allowing vget
1118 * to complete. This prevents possible problems with roll-forward.
1119 */
1120 while (fs->lfs_flags & LFS_NOTYET) {
1121 tsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0);
1122 }
1123 /*
1124 * This is playing fast and loose. Someone may have the inode
1125 * locked, in which case they are going to be distinctly unhappy
1126 * if we trash something.
1127 */
1128
1129 error = lfs_fasthashget(dev, ino, need_unlock, vpp);
1130 if (error != 0 || *vpp != NULL)
1131 return (error);
1132
1133 if ((error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, &vp)) != 0) {
1134 *vpp = NULL;
1135 return (error);
1136 }
1137
1138 do {
1139 error = lfs_fasthashget(dev, ino, need_unlock, vpp);
1140 if (error != 0 || *vpp != NULL) {
1141 ungetnewvnode(vp);
1142 return (error);
1143 }
1144 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1145
1146 /* Allocate new vnode/inode. */
1147 lfs_vcreate(mp, ino, vp);
1148
1149 /*
1150 * Put it onto its hash chain and lock it so that other requests for
1151 * this inode will block if they arrive while we are sleeping waiting
1152 * for old data structures to be purged or for the contents of the
1153 * disk portion of this inode to be read.
1154 */
1155 ip = VTOI(vp);
1156 ufs_ihashins(ip);
1157 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1158
1159 /*
1160 * XXX
1161 * This may not need to be here, logically it should go down with
1162 * the i_devvp initialization.
1163 * Ask Kirk.
1164 */
1165 ip->i_lfs = fs;
1166
1167 /* Read in the disk contents for the inode, copy into the inode. */
1168 if (dinp) {
1169 error = copyin(dinp, &ip->i_din.ffs_din, DINODE_SIZE);
1170 if (error) {
1171 printf("lfs_fastvget: dinode copyin failed for ino %d\n", ino);
1172 ufs_ihashrem(ip);
1173
1174 /* Unlock and discard unneeded inode. */
1175 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1176 lfs_vunref(vp);
1177 *vpp = NULL;
1178 return (error);
1179 }
1180 if (ip->i_number != ino)
1181 panic("lfs_fastvget: I was fed the wrong inode!");
1182 } else {
1183 error = bread(ump->um_devvp, fsbtodb(fs, daddr), fs->lfs_ibsize,
1184 NOCRED, &bp);
1185 if (error) {
1186 printf("lfs_fastvget: bread failed with %d\n",error);
1187 /*
1188 * The inode does not contain anything useful, so it
1189 * would be misleading to leave it on its hash chain.
1190 * Iput() will return it to the free list.
1191 */
1192 ufs_ihashrem(ip);
1193
1194 /* Unlock and discard unneeded inode. */
1195 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1196 lfs_vunref(vp);
1197 brelse(bp);
1198 *vpp = NULL;
1199 return (error);
1200 }
1201 ip->i_din.ffs_din = *lfs_ifind(fs, ino, bp);
1202 brelse(bp);
1203 }
1204 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1205 ip->i_lfs_effnblks = ip->i_ffs_blocks;
1206
1207 /*
1208 * Initialize the vnode from the inode, check for aliases. In all
1209 * cases re-init ip, the underlying vnode/inode may have changed.
1210 */
1211 ufs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
1212 #ifdef DEBUG_LFS
1213 if (vp->v_type == VNON) {
1214 printf("lfs_fastvget: ino %d is type VNON! (ifmt=%o, dinp=%p)\n",
1215 ip->i_number, (ip->i_ffs_mode & IFMT) >> 12, dinp);
1216 lfs_dump_dinode(&ip->i_din.ffs_din);
1217 #ifdef DDB
1218 Debugger();
1219 #endif
1220 }
1221 #endif /* DEBUG_LFS */
1222 /*
1223 * Finish inode initialization now that aliasing has been resolved.
1224 */
1225 ip->i_devvp = ump->um_devvp;
1226 VREF(ip->i_devvp);
1227 *vpp = vp;
1228 *need_unlock |= FVG_PUT;
1229
1230 uvm_vnp_setsize(vp, ip->i_ffs_size);
1231
1232 return (0);
1233 }
1234
1235 struct buf *
1236 lfs_fakebuf(struct vnode *vp, int lbn, size_t size, caddr_t uaddr)
1237 {
1238 struct buf *bp;
1239 int error;
1240
1241 #ifndef ALLOW_VFLUSH_CORRUPTION
1242 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size);
1243 error = copyin(uaddr, bp->b_data, size);
1244 if (error) {
1245 lfs_freebuf(bp);
1246 return NULL;
1247 }
1248 #else
1249 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, 0);
1250 bp->b_flags |= B_INVAL;
1251 bp->b_saveaddr = uaddr;
1252 #endif
1253
1254 bp->b_bufsize = size;
1255 bp->b_bcount = size;
1256 return (bp);
1257 }
1258