lfs_syscalls.c revision 1.58 1 /* $NetBSD: lfs_syscalls.c,v 1.58 2001/08/03 06:02:42 jdolecek Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*-
39 * Copyright (c) 1991, 1993, 1994
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95
71 */
72
73 #define LFS /* for prototypes in syscallargs.h */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/proc.h>
78 #include <sys/buf.h>
79 #include <sys/mount.h>
80 #include <sys/vnode.h>
81 #include <sys/malloc.h>
82 #include <sys/kernel.h>
83
84 #include <sys/syscallargs.h>
85
86 #include <ufs/ufs/quota.h>
87 #include <ufs/ufs/inode.h>
88 #include <ufs/ufs/ufsmount.h>
89 #include <ufs/ufs/ufs_extern.h>
90
91 #include <ufs/lfs/lfs.h>
92 #include <ufs/lfs/lfs_extern.h>
93
94 /* Flags for return from lfs_fastvget */
95 #define FVG_UNLOCK 0x01 /* Needs to be unlocked */
96 #define FVG_PUT 0x02 /* Needs to be vput() */
97
98 /* Max block count for lfs_markv() */
99 #define MARKV_MAXBLKCNT 65536
100
101 struct buf *lfs_fakebuf(struct vnode *, int, size_t, caddr_t);
102 int lfs_fasthashget(dev_t, ino_t, int *, struct vnode **);
103
104 int debug_cleaner = 0;
105 int clean_vnlocked = 0;
106 int clean_inlocked = 0;
107 int verbose_debug = 0;
108
109 pid_t lfs_cleaner_pid = 0;
110
111 /*
112 * Definitions for the buffer free lists.
113 */
114 #define BQUEUES 4 /* number of free buffer queues */
115
116 #define BQ_LOCKED 0 /* super-blocks &c */
117 #define BQ_LRU 1 /* lru, useful buffers */
118 #define BQ_AGE 2 /* rubbish */
119 #define BQ_EMPTY 3 /* buffer headers with no memory */
120
121 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
122
123 #define LFS_FORCE_WRITE UNASSIGNED
124
125 #define LFS_VREF_THRESHOLD 128
126
127 static int lfs_bmapv(struct proc *, fsid_t *, BLOCK_INFO *, int);
128 static int lfs_markv(struct proc *, fsid_t *, BLOCK_INFO *, int);
129
130 /*
131 * sys_lfs_markv:
132 *
133 * This will mark inodes and blocks dirty, so they are written into the log.
134 * It will block until all the blocks have been written. The segment create
135 * time passed in the block_info and inode_info structures is used to decide
136 * if the data is valid for each block (in case some process dirtied a block
137 * or inode that is being cleaned between the determination that a block is
138 * live and the lfs_markv call).
139 *
140 * 0 on success
141 * -1/errno is return on error.
142 */
143 #ifdef USE_64BIT_SYSCALLS
144 int
145 sys_lfs_markv(struct proc *p, void *v, register_t *retval)
146 {
147 struct sys_lfs_markv_args /* {
148 syscallarg(fsid_t *) fsidp;
149 syscallarg(struct block_info *) blkiov;
150 syscallarg(int) blkcnt;
151 } */ *uap = v;
152 BLOCK_INFO *blkiov;
153 int blkcnt, error;
154 fsid_t fsid;
155
156 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
157 return (error);
158
159 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
160 return (error);
161
162 blkcnt = SCARG(uap, blkcnt);
163 if ((u_int) blkcnt > MARKV_MAXBLKCNT)
164 return (EINVAL);
165
166 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
167 if ((error = copyin(SCARG(uap, blkiov), blkiov,
168 blkcnt * sizeof(BLOCK_INFO))) != 0)
169 goto out;
170
171 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0)
172 copyout(blkiov, SCARG(uap, blkiov),
173 blkcnt * sizeof(BLOCK_INFO));
174 out:
175 free(blkiov, M_SEGMENT);
176 return error;
177 }
178 #else
179 int
180 sys_lfs_markv(struct proc *p, void *v, register_t *retval)
181 {
182 struct sys_lfs_markv_args /* {
183 syscallarg(fsid_t *) fsidp;
184 syscallarg(struct block_info *) blkiov;
185 syscallarg(int) blkcnt;
186 } */ *uap = v;
187 BLOCK_INFO *blkiov;
188 BLOCK_INFO_15 *blkiov15;
189 int i, blkcnt, error;
190 fsid_t fsid;
191
192 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
193 return (error);
194
195 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
196 return (error);
197
198 blkcnt = SCARG(uap, blkcnt);
199 if ((u_int) blkcnt > MARKV_MAXBLKCNT)
200 return (EINVAL);
201
202 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
203 blkiov15 = malloc(blkcnt * sizeof(BLOCK_INFO_15), M_SEGMENT, M_WAITOK);
204 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
205 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
206 goto out;
207
208 for (i = 0; i < blkcnt; i++) {
209 blkiov[i].bi_inode = blkiov15[i].bi_inode;
210 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
211 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
212 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
213 blkiov[i].bi_version = blkiov15[i].bi_version;
214 blkiov[i].bi_bp = blkiov15[i].bi_bp;
215 blkiov[i].bi_size = blkiov15[i].bi_size;
216 }
217
218 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0) {
219 for (i = 0; i < blkcnt; i++) {
220 blkiov15[i].bi_inode = blkiov[i].bi_inode;
221 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
222 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
223 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
224 blkiov15[i].bi_version = blkiov[i].bi_version;
225 blkiov15[i].bi_bp = blkiov[i].bi_bp;
226 blkiov15[i].bi_size = blkiov[i].bi_size;
227 }
228 copyout(blkiov15, SCARG(uap, blkiov),
229 blkcnt * sizeof(BLOCK_INFO_15));
230 }
231 out:
232 free(blkiov, M_SEGMENT);
233 free(blkiov15, M_SEGMENT);
234 return error;
235 }
236 #endif
237
238 static int
239 lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
240 {
241 BLOCK_INFO *blkp;
242 IFILE *ifp;
243 struct buf *bp, *nbp;
244 struct inode *ip = NULL;
245 struct lfs *fs;
246 struct mount *mntp;
247 struct vnode *vp;
248 #ifdef DEBUG_LFS
249 int vputc=0, iwritten=0;
250 #endif
251 ino_t lastino;
252 ufs_daddr_t b_daddr, v_daddr;
253 int cnt, error, lfs_fastvget_unlock;
254 int do_again=0;
255 int s;
256 #ifdef CHECK_COPYIN
257 int i;
258 #endif /* CHECK_COPYIN */
259 #ifdef LFS_TRACK_IOS
260 int j;
261 #endif
262 int numlocked=0, numrefed=0;
263 ino_t maxino;
264
265 if ((mntp = vfs_getvfs(fsidp)) == NULL)
266 return (ENOENT);
267
268 fs = VFSTOUFS(mntp)->um_lfs;
269 maxino = (fragstoblks(fs, fsbtofrags(fs, VTOI(fs->lfs_ivnode)->i_ffs_blocks)) -
270 fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb;
271
272 cnt = blkcnt;
273
274 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
275 return (error);
276
277 /*
278 * This seglock is just to prevent the fact that we might have to sleep
279 * from allowing the possibility that our blocks might become
280 * invalid.
281 *
282 * It is also important to note here that unless we specify SEGM_CKP,
283 * any Ifile blocks that we might be asked to clean will never get
284 * to the disk.
285 */
286 lfs_seglock(fs, SEGM_SYNC|SEGM_CLEAN|SEGM_CKP);
287
288 /* Mark blocks/inodes dirty. */
289 error = 0;
290
291 #ifdef DEBUG_LFS
292 /* Run through and count the inodes */
293 lastino = LFS_UNUSED_INUM;
294 for(blkp = blkiov; cnt--; ++blkp) {
295 if(lastino != blkp->bi_inode) {
296 lastino = blkp->bi_inode;
297 vputc++;
298 }
299 }
300 cnt = blkcnt;
301 printf("[%d/",vputc);
302 iwritten=0;
303 #endif /* DEBUG_LFS */
304 /* these were inside the initialization for the for loop */
305 v_daddr = LFS_UNUSED_DADDR;
306 lastino = LFS_UNUSED_INUM;
307 for (blkp = blkiov; cnt--; ++blkp)
308 {
309 if(blkp->bi_daddr == LFS_FORCE_WRITE)
310 printf("lfs_markv: warning: force-writing ino %d lbn %d\n",
311 blkp->bi_inode, blkp->bi_lbn);
312 #ifdef LFS_TRACK_IOS
313 /*
314 * If there is I/O on this segment that is not yet complete,
315 * the cleaner probably does not have the right information.
316 * Send it packing.
317 */
318 for(j=0;j<LFS_THROTTLE;j++) {
319 if(fs->lfs_pending[j] != LFS_UNUSED_DADDR
320 && dtosn(fs,fs->lfs_pending[j])==dtosn(fs,blkp->bi_daddr)
321 && blkp->bi_daddr != LFS_FORCE_WRITE)
322 {
323 printf("lfs_markv: attempt to clean pending segment? (#%d)\n",
324 dtosn(fs, fs->lfs_pending[j]));
325 /* return (EBUSY); */
326 }
327 }
328 #endif /* LFS_TRACK_IOS */
329 /* Bounds-check incoming data, avoid panic for failed VGET */
330 if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
331 error = EINVAL;
332 goto again;
333 }
334 /*
335 * Get the IFILE entry (only once) and see if the file still
336 * exists.
337 */
338 if (lastino != blkp->bi_inode) {
339 /*
340 * Finish the old file, if there was one. The presence
341 * of a usable vnode in vp is signaled by a valid v_daddr.
342 */
343 if(v_daddr != LFS_UNUSED_DADDR) {
344 #ifdef DEBUG_LFS
345 if(ip->i_flag & (IN_MODIFIED|IN_CLEANING))
346 iwritten++;
347 #endif
348 if(lfs_fastvget_unlock) {
349 VOP_UNLOCK(vp, 0);
350 numlocked--;
351 }
352 lfs_vunref(vp);
353 numrefed--;
354 }
355
356 /*
357 * Start a new file
358 */
359 lastino = blkp->bi_inode;
360 if (blkp->bi_inode == LFS_IFILE_INUM)
361 v_daddr = fs->lfs_idaddr;
362 else {
363 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
364 /* XXX fix for force write */
365 v_daddr = ifp->if_daddr;
366 brelse(bp);
367 }
368 /* Don't force-write the ifile */
369 if (blkp->bi_inode == LFS_IFILE_INUM
370 && blkp->bi_daddr == LFS_FORCE_WRITE)
371 {
372 continue;
373 }
374 if (v_daddr == LFS_UNUSED_DADDR
375 && blkp->bi_daddr != LFS_FORCE_WRITE)
376 {
377 continue;
378 }
379
380 /* Get the vnode/inode. */
381 error=lfs_fastvget(mntp, blkp->bi_inode, v_daddr,
382 &vp,
383 (blkp->bi_lbn==LFS_UNUSED_LBN
384 ? blkp->bi_bp
385 : NULL),
386 &lfs_fastvget_unlock);
387 if(lfs_fastvget_unlock)
388 numlocked++;
389
390 if(!error) {
391 numrefed++;
392 }
393 if(error) {
394 #ifdef DEBUG_LFS
395 printf("lfs_markv: lfs_fastvget failed with %d (ino %d, segment %d)\n",
396 error, blkp->bi_inode,
397 dtosn(fs, blkp->bi_daddr));
398 #endif /* DEBUG_LFS */
399 /*
400 * If we got EAGAIN, that means that the
401 * Inode was locked. This is
402 * recoverable: just clean the rest of
403 * this segment, and let the cleaner try
404 * again with another. (When the
405 * cleaner runs again, this segment will
406 * sort high on the list, since it is
407 * now almost entirely empty.) But, we
408 * still set v_daddr = LFS_UNUSED_ADDR
409 * so as not to test this over and over
410 * again.
411 */
412 if(error == EAGAIN) {
413 error = 0;
414 do_again++;
415 }
416 #ifdef DIAGNOSTIC
417 else if(error != ENOENT)
418 panic("lfs_markv VFS_VGET FAILED");
419 #endif
420 /* lastino = LFS_UNUSED_INUM; */
421 v_daddr = LFS_UNUSED_DADDR;
422 vp = NULL;
423 ip = NULL;
424 continue;
425 }
426 ip = VTOI(vp);
427 } else if (v_daddr == LFS_UNUSED_DADDR) {
428 /*
429 * This can only happen if the vnode is dead (or
430 * in any case we can't get it...e.g., it is
431 * inlocked). Keep going.
432 */
433 continue;
434 }
435
436 /* Past this point we are guaranteed that vp, ip are valid. */
437
438 /* If this BLOCK_INFO didn't contain a block, keep going. */
439 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
440 /* XXX need to make sure that the inode gets written in this case */
441 /* XXX but only write the inode if it's the right one */
442 if (blkp->bi_inode != LFS_IFILE_INUM) {
443 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
444 if(ifp->if_daddr == blkp->bi_daddr
445 || blkp->bi_daddr == LFS_FORCE_WRITE)
446 {
447 LFS_SET_UINO(ip, IN_CLEANING);
448 }
449 brelse(bp);
450 }
451 continue;
452 }
453
454 b_daddr = 0;
455 if(blkp->bi_daddr != LFS_FORCE_WRITE) {
456 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
457 dbtofsb(fs, b_daddr) != blkp->bi_daddr)
458 {
459 if(dtosn(fs,dbtofsb(fs, b_daddr))
460 == dtosn(fs,blkp->bi_daddr))
461 {
462 printf("lfs_markv: wrong da same seg: %x vs %x\n",
463 blkp->bi_daddr, dbtofsb(fs, b_daddr));
464 }
465 continue;
466 }
467 }
468 /*
469 * If we got to here, then we are keeping the block. If
470 * it is an indirect block, we want to actually put it
471 * in the buffer cache so that it can be updated in the
472 * finish_meta section. If it's not, we need to
473 * allocate a fake buffer so that writeseg can perform
474 * the copyin and write the buffer.
475 */
476 /*
477 * XXX - if the block we are reading has been *extended* since
478 * it was written to disk, then we risk throwing away
479 * the extension in bread()/getblk(). Check the size
480 * here.
481 */
482 if(blkp->bi_size < fs->lfs_bsize) {
483 s = splbio();
484 bp = incore(vp, blkp->bi_lbn);
485 if(bp && bp->b_bcount > blkp->bi_size) {
486 printf("lfs_markv: %ld > %d (fixed)\n",
487 bp->b_bcount, blkp->bi_size);
488 blkp->bi_size = bp->b_bcount;
489 }
490 splx(s);
491 }
492 if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
493 /* Data Block */
494 bp = lfs_fakebuf(vp, blkp->bi_lbn,
495 blkp->bi_size, blkp->bi_bp);
496 /* Pretend we used bread() to get it */
497 bp->b_blkno = fsbtodb(fs, blkp->bi_daddr);
498 } else {
499 /* Indirect block */
500 bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
501 if (!(bp->b_flags & (B_DONE|B_DELWRI))) { /* B_CACHE */
502 /*
503 * The block in question was not found
504 * in the cache; i.e., the block that
505 * getblk() returned is empty. So, we
506 * can (and should) copy in the
507 * contents, because we've already
508 * determined that this was the right
509 * version of this block on disk.
510 *
511 * And, it can't have changed underneath
512 * us, because we have the segment lock.
513 */
514 error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
515 if(error)
516 goto err2;
517 }
518 }
519 if ((error = lfs_bwrite_ext(bp,BW_CLEAN)) != 0)
520 goto err2;
521 }
522
523 /*
524 * Finish the old file, if there was one
525 */
526 if(v_daddr != LFS_UNUSED_DADDR) {
527 #ifdef DEBUG_LFS
528 if(ip->i_flag & (IN_MODIFIED|IN_CLEANING))
529 iwritten++;
530 #endif
531 if(lfs_fastvget_unlock) {
532 VOP_UNLOCK(vp, 0);
533 numlocked--;
534 }
535 lfs_vunref(vp);
536 numrefed--;
537 }
538
539 /*
540 * The last write has to be SEGM_SYNC, because of calling semantics.
541 * It also has to be SEGM_CKP, because otherwise we could write
542 * over the newly cleaned data contained in a checkpoint, and then
543 * we'd be unhappy at recovery time.
544 */
545 lfs_segwrite(mntp, SEGM_SYNC|SEGM_CLEAN|SEGM_CKP);
546
547 lfs_segunlock(fs);
548
549 #ifdef DEBUG_LFS
550 printf("%d]",iwritten);
551 if(numlocked != 0 || numrefed != 0) {
552 panic("lfs_markv: numlocked=%d numrefed=%d", numlocked, numrefed);
553 }
554 #endif
555
556 vfs_unbusy(mntp);
557 if(error)
558 return (error);
559 else if(do_again)
560 return EAGAIN;
561
562 return 0;
563
564 err2:
565 printf("lfs_markv err2\n");
566 if(lfs_fastvget_unlock) {
567 VOP_UNLOCK(vp, 0);
568 --numlocked;
569 }
570 lfs_vunref(vp);
571 --numrefed;
572
573 /* Free up fakebuffers -- have to take these from the LOCKED list */
574 again:
575 s = splbio();
576 for(bp = bufqueues[BQ_LOCKED].tqh_first; bp; bp=nbp) {
577 nbp = bp->b_freelist.tqe_next;
578 if(bp->b_flags & B_CALL) {
579 if(bp->b_flags & B_BUSY) { /* not bloody likely */
580 bp->b_flags |= B_WANTED;
581 tsleep(bp, PRIBIO+1, "markv", 0);
582 splx(s);
583 goto again;
584 }
585 if(bp->b_flags & B_DELWRI)
586 fs->lfs_avail += btofsb(fs, bp->b_bcount);
587 bremfree(bp);
588 splx(s);
589 brelse(bp);
590 s = splbio();
591 }
592 }
593 splx(s);
594 lfs_segunlock(fs);
595 vfs_unbusy(mntp);
596 #ifdef DEBUG_LFS
597 if(numlocked != 0 || numrefed != 0) {
598 panic("lfs_markv: numlocked=%d numrefed=%d", numlocked, numrefed);
599 }
600 #endif
601
602 return (error);
603 }
604
605 /*
606 * sys_lfs_bmapv:
607 *
608 * This will fill in the current disk address for arrays of blocks.
609 *
610 * 0 on success
611 * -1/errno is return on error.
612 */
613 #ifdef USE_64BIT_SYSCALLS
614 int
615 sys_lfs_bmapv(struct proc *p, void *v, register_t *retval)
616 {
617 struct sys_lfs_bmapv_args /* {
618 syscallarg(fsid_t *) fsidp;
619 syscallarg(struct block_info *) blkiov;
620 syscallarg(int) blkcnt;
621 } */ *uap = v;
622 BLOCK_INFO *blkiov;
623 int blkcnt, error;
624 fsid_t fsid;
625
626 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
627 return (error);
628
629 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
630 return (error);
631
632 blkcnt = SCARG(uap, blkcnt);
633 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
634 if ((error = copyin(SCARG(uap, blkiov), blkiov,
635 blkcnt * sizeof(BLOCK_INFO))) != 0)
636 goto out;
637
638 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0)
639 copyout(blkiov, SCARG(uap, blkiov),
640 blkcnt * sizeof(BLOCK_INFO));
641 out:
642 free(blkiov, M_SEGMENT);
643 return error;
644 }
645 #else
646 int
647 sys_lfs_bmapv(struct proc *p, void *v, register_t *retval)
648 {
649 struct sys_lfs_bmapv_args /* {
650 syscallarg(fsid_t *) fsidp;
651 syscallarg(struct block_info *) blkiov;
652 syscallarg(int) blkcnt;
653 } */ *uap = v;
654 BLOCK_INFO *blkiov;
655 BLOCK_INFO_15 *blkiov15;
656 int i, blkcnt, error;
657 fsid_t fsid;
658
659 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
660 return (error);
661
662 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
663 return (error);
664
665 blkcnt = SCARG(uap, blkcnt);
666 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
667 blkiov15 = malloc(blkcnt * sizeof(BLOCK_INFO_15), M_SEGMENT, M_WAITOK);
668 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
669 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
670 goto out;
671
672 for (i = 0; i < blkcnt; i++) {
673 blkiov[i].bi_inode = blkiov15[i].bi_inode;
674 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
675 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
676 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
677 blkiov[i].bi_version = blkiov15[i].bi_version;
678 blkiov[i].bi_bp = blkiov15[i].bi_bp;
679 blkiov[i].bi_size = blkiov15[i].bi_size;
680 }
681
682 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0) {
683 for (i = 0; i < blkcnt; i++) {
684 blkiov15[i].bi_inode = blkiov[i].bi_inode;
685 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
686 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
687 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
688 blkiov15[i].bi_version = blkiov[i].bi_version;
689 blkiov15[i].bi_bp = blkiov[i].bi_bp;
690 blkiov15[i].bi_size = blkiov[i].bi_size;
691 }
692 copyout(blkiov15, SCARG(uap, blkiov),
693 blkcnt * sizeof(BLOCK_INFO_15));
694 }
695 out:
696 free(blkiov, M_SEGMENT);
697 free(blkiov15, M_SEGMENT);
698 return error;
699 }
700 #endif
701
702 static int
703 lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
704 {
705 BLOCK_INFO *blkp;
706 IFILE *ifp;
707 struct buf *bp;
708 struct inode *ip = NULL;
709 struct lfs *fs;
710 struct mount *mntp;
711 struct ufsmount *ump;
712 struct vnode *vp;
713 ino_t lastino;
714 ufs_daddr_t v_daddr;
715 int cnt, error, need_unlock=0;
716 int numlocked=0, numrefed=0;
717 #ifdef LFS_TRACK_IOS
718 int j;
719 #endif
720
721 lfs_cleaner_pid = p->p_pid;
722
723 if ((mntp = vfs_getvfs(fsidp)) == NULL)
724 return (ENOENT);
725
726 ump = VFSTOUFS(mntp);
727 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
728 return (error);
729
730 cnt = blkcnt;
731
732 fs = VFSTOUFS(mntp)->um_lfs;
733
734 error = 0;
735
736 /* these were inside the initialization for the for loop */
737 v_daddr = LFS_UNUSED_DADDR;
738 lastino = LFS_UNUSED_INUM;
739 for (blkp = blkiov; cnt--; ++blkp)
740 {
741 #ifdef DEBUG
742 if (dtosn(fs, fs->lfs_curseg) == dtosn(fs, blkp->bi_daddr)) {
743 printf("lfs_bmapv: attempt to clean current segment? (#%d)\n",
744 dtosn(fs, fs->lfs_curseg));
745 vfs_unbusy(mntp);
746 return (EBUSY);
747 }
748 #endif /* DEBUG */
749 #ifdef LFS_TRACK_IOS
750 /*
751 * If there is I/O on this segment that is not yet complete,
752 * the cleaner probably does not have the right information.
753 * Send it packing.
754 */
755 for(j=0;j<LFS_THROTTLE;j++) {
756 if(fs->lfs_pending[j] != LFS_UNUSED_DADDR
757 && dtosn(fs,fs->lfs_pending[j])==dtosn(fs,blkp->bi_daddr))
758 {
759 printf("lfs_bmapv: attempt to clean pending segment? (#%d)\n",
760 dtosn(fs, fs->lfs_pending[j]));
761 vfs_unbusy(mntp);
762 return (EBUSY);
763 }
764 }
765
766 #endif /* LFS_TRACK_IOS */
767 /*
768 * Get the IFILE entry (only once) and see if the file still
769 * exists.
770 */
771 if (lastino != blkp->bi_inode) {
772 /*
773 * Finish the old file, if there was one. The presence
774 * of a usable vnode in vp is signaled by a valid
775 * v_daddr.
776 */
777 if(v_daddr != LFS_UNUSED_DADDR) {
778 if(need_unlock) {
779 VOP_UNLOCK(vp, 0);
780 numlocked--;
781 }
782 lfs_vunref(vp);
783 numrefed--;
784 }
785
786 /*
787 * Start a new file
788 */
789 lastino = blkp->bi_inode;
790 if (blkp->bi_inode == LFS_IFILE_INUM)
791 v_daddr = fs->lfs_idaddr;
792 else {
793 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
794 v_daddr = ifp->if_daddr;
795 brelse(bp);
796 }
797 if (v_daddr == LFS_UNUSED_DADDR) {
798 blkp->bi_daddr = LFS_UNUSED_DADDR;
799 continue;
800 }
801 /*
802 * A regular call to VFS_VGET could deadlock
803 * here. Instead, we try an unlocked access.
804 */
805 vp = ufs_ihashlookup(ump->um_dev, blkp->bi_inode);
806 if (vp != NULL && !(vp->v_flag & VXLOCK)) {
807 ip = VTOI(vp);
808 if (lfs_vref(vp)) {
809 v_daddr = LFS_UNUSED_DADDR;
810 need_unlock = 0;
811 continue;
812 }
813 numrefed++;
814 if(VOP_ISLOCKED(vp)) {
815 #ifdef DEBUG_LFS
816 printf("lfs_bmapv: inode %d inlocked\n",ip->i_number);
817 #endif
818 v_daddr = LFS_UNUSED_DADDR;
819 need_unlock = 0;
820 lfs_vunref(vp);
821 --numrefed;
822 continue;
823 } else {
824 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
825 need_unlock = FVG_UNLOCK;
826 numlocked++;
827 }
828 } else {
829 error = VFS_VGET(mntp, blkp->bi_inode, &vp);
830 if(error) {
831 #ifdef DEBUG_LFS
832 printf("lfs_bmapv: vget of ino %d failed with %d",blkp->bi_inode,error);
833 #endif
834 v_daddr = LFS_UNUSED_DADDR;
835 need_unlock = 0;
836 continue;
837 } else {
838 need_unlock = FVG_PUT;
839 numlocked++;
840 numrefed++;
841 }
842 }
843 ip = VTOI(vp);
844 } else if (v_daddr == LFS_UNUSED_DADDR) {
845 /*
846 * This can only happen if the vnode is dead.
847 * Keep going. Note that we DO NOT set the
848 * bi_addr to anything -- if we failed to get
849 * the vnode, for example, we want to assume
850 * conservatively that all of its blocks *are*
851 * located in the segment in question.
852 * lfs_markv will throw them out if we are
853 * wrong.
854 */
855 /* blkp->bi_daddr = LFS_UNUSED_DADDR; */
856 continue;
857 }
858
859 /* Past this point we are guaranteed that vp, ip are valid. */
860
861 if(blkp->bi_lbn == LFS_UNUSED_LBN) {
862 /*
863 * We just want the inode address, which is
864 * conveniently in v_daddr.
865 */
866 blkp->bi_daddr = v_daddr;
867 } else {
868 error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
869 &(blkp->bi_daddr), NULL);
870 if(error)
871 {
872 blkp->bi_daddr = LFS_UNUSED_DADDR;
873 continue;
874 }
875 blkp->bi_daddr = dbtofsb(fs, blkp->bi_daddr);
876 }
877 }
878
879 /*
880 * Finish the old file, if there was one. The presence
881 * of a usable vnode in vp is signaled by a valid v_daddr.
882 */
883 if(v_daddr != LFS_UNUSED_DADDR) {
884 if(need_unlock) {
885 VOP_UNLOCK(vp, 0);
886 numlocked--;
887 }
888 lfs_vunref(vp);
889 numrefed--;
890 }
891
892 if(numlocked != 0 || numrefed != 0) {
893 panic("lfs_bmapv: numlocked=%d numrefed=%d", numlocked,
894 numrefed);
895 }
896
897 vfs_unbusy(mntp);
898
899 return 0;
900 }
901
902 /*
903 * sys_lfs_segclean:
904 *
905 * Mark the segment clean.
906 *
907 * 0 on success
908 * -1/errno is return on error.
909 */
910 int
911 sys_lfs_segclean(struct proc *p, void *v, register_t *retval)
912 {
913 struct sys_lfs_segclean_args /* {
914 syscallarg(fsid_t *) fsidp;
915 syscallarg(u_long) segment;
916 } */ *uap = v;
917 CLEANERINFO *cip;
918 SEGUSE *sup;
919 struct buf *bp;
920 struct mount *mntp;
921 struct lfs *fs;
922 fsid_t fsid;
923 int error;
924
925 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
926 return (error);
927
928 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
929 return (error);
930 if ((mntp = vfs_getvfs(&fsid)) == NULL)
931 return (ENOENT);
932
933 fs = VFSTOUFS(mntp)->um_lfs;
934
935 if (dtosn(fs, fs->lfs_curseg) == SCARG(uap, segment))
936 return (EBUSY);
937
938 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
939 return (error);
940 LFS_SEGENTRY(sup, fs, SCARG(uap, segment), bp);
941 if (sup->su_flags & SEGUSE_ACTIVE) {
942 brelse(bp);
943 vfs_unbusy(mntp);
944 return (EBUSY);
945 }
946 if (!(sup->su_flags & SEGUSE_DIRTY)) {
947 brelse(bp);
948 vfs_unbusy(mntp);
949 return (EALREADY);
950 }
951
952 fs->lfs_avail += segtod(fs, 1);
953 if (sup->su_flags & SEGUSE_SUPERBLOCK)
954 fs->lfs_avail -= btofsb(fs, LFS_SBPAD);
955 if (fs->lfs_version > 1 && SCARG(uap, segment) == 0 &&
956 fs->lfs_start < btofsb(fs, LFS_LABELPAD))
957 fs->lfs_avail -= btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
958 fs->lfs_bfree += sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
959 btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
960 fs->lfs_dmeta -= sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
961 btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
962 if (fs->lfs_dmeta < 0)
963 fs->lfs_dmeta = 0;
964 sup->su_flags &= ~SEGUSE_DIRTY;
965 (void) VOP_BWRITE(bp);
966
967 LFS_CLEANERINFO(cip, fs, bp);
968 ++cip->clean;
969 --cip->dirty;
970 fs->lfs_nclean = cip->clean;
971 cip->bfree = fs->lfs_bfree;
972 cip->avail = fs->lfs_avail - fs->lfs_ravail;
973 (void) VOP_BWRITE(bp);
974 wakeup(&fs->lfs_avail);
975 vfs_unbusy(mntp);
976
977 return (0);
978 }
979
980 /*
981 * sys_lfs_segwait:
982 *
983 * This will block until a segment in file system fsid is written. A timeout
984 * in milliseconds may be specified which will awake the cleaner automatically.
985 * An fsid of -1 means any file system, and a timeout of 0 means forever.
986 *
987 * 0 on success
988 * 1 on timeout
989 * -1/errno is return on error.
990 */
991 int
992 sys_lfs_segwait(struct proc *p, void *v, register_t *retval)
993 {
994 struct sys_lfs_segwait_args /* {
995 syscallarg(fsid_t *) fsidp;
996 syscallarg(struct timeval *) tv;
997 } */ *uap = v;
998 extern int lfs_allclean_wakeup;
999 struct mount *mntp;
1000 struct timeval atv;
1001 fsid_t fsid;
1002 void *addr;
1003 u_long timeout;
1004 int error, s;
1005
1006 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) {
1007 return (error);
1008 }
1009 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
1010 return (error);
1011 if ((mntp = vfs_getvfs(&fsid)) == NULL)
1012 addr = &lfs_allclean_wakeup;
1013 else
1014 addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
1015
1016 if (SCARG(uap, tv)) {
1017 error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
1018 if (error)
1019 return (error);
1020 if (itimerfix(&atv))
1021 return (EINVAL);
1022 /*
1023 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
1024 * XXX IS THAT WHAT IS INTENDED?
1025 */
1026 s = splclock();
1027 timeradd(&atv, &time, &atv);
1028 timeout = hzto(&atv);
1029 splx(s);
1030 } else
1031 timeout = 0;
1032
1033 error = tsleep(addr, PCATCH | PUSER, "segment", timeout);
1034 return (error == ERESTART ? EINTR : 0);
1035 }
1036
1037 /*
1038 * VFS_VGET call specialized for the cleaner. The cleaner already knows the
1039 * daddr from the ifile, so don't look it up again. If the cleaner is
1040 * processing IINFO structures, it may have the ondisk inode already, so
1041 * don't go retrieving it again.
1042 *
1043 * If we find the vnode on the hash chain, then it may be locked by another
1044 * process; so we set (*need_unlock) to zero.
1045 *
1046 * If we don't, we call ufs_ihashins, which locks the inode, and we set
1047 * (*need_unlock) to non-zero.
1048 *
1049 * In either case we lfs_vref, and it is the caller's responsibility to
1050 * lfs_vunref and VOP_UNLOCK (if necessary) when finished.
1051 */
1052 extern struct lock ufs_hashlock;
1053
1054 int
1055 lfs_fasthashget(dev_t dev, ino_t ino, int *need_unlock, struct vnode **vpp)
1056 {
1057 struct inode *ip;
1058
1059 /*
1060 * This is playing fast and loose. Someone may have the inode
1061 * locked, in which case they are going to be distinctly unhappy
1062 * if we trash something.
1063 */
1064 if ((*vpp = ufs_ihashlookup(dev, ino)) != NULL) {
1065 if ((*vpp)->v_flag & VXLOCK) {
1066 printf("lfs_fastvget: vnode VXLOCKed for ino %d\n",
1067 ino);
1068 clean_vnlocked++;
1069 #ifdef LFS_EAGAIN_FAIL
1070 return EAGAIN;
1071 #endif
1072 }
1073 ip = VTOI(*vpp);
1074 if (lfs_vref(*vpp)) {
1075 clean_inlocked++;
1076 return EAGAIN;
1077 }
1078 if (VOP_ISLOCKED(*vpp)) {
1079 #ifdef DEBUG_LFS
1080 printf("lfs_fastvget: ino %d inlocked by pid %d\n",
1081 ip->i_number, (*vpp)->v_lock.lk_lockholder);
1082 #endif
1083 clean_inlocked++;
1084 #ifdef LFS_EAGAIN_FAIL
1085 lfs_vunref(*vpp);
1086 return EAGAIN;
1087 #endif /* LFS_EAGAIN_FAIL */
1088 } else {
1089 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
1090 *need_unlock |= FVG_UNLOCK;
1091 }
1092 } else
1093 *vpp = NULL;
1094
1095 return (0);
1096 }
1097
1098 int
1099 lfs_fastvget(struct mount *mp, ino_t ino, ufs_daddr_t daddr, struct vnode **vpp, struct dinode *dinp, int *need_unlock)
1100 {
1101 struct inode *ip;
1102 struct vnode *vp;
1103 struct ufsmount *ump;
1104 dev_t dev;
1105 int error;
1106 struct buf *bp;
1107 struct lfs *fs;
1108
1109 ump = VFSTOUFS(mp);
1110 dev = ump->um_dev;
1111 fs = ump->um_lfs;
1112 *need_unlock = 0;
1113
1114 /*
1115 * Wait until the filesystem is fully mounted before allowing vget
1116 * to complete. This prevents possible problems with roll-forward.
1117 */
1118 while(fs->lfs_flags & LFS_NOTYET) {
1119 tsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0);
1120 }
1121 /*
1122 * This is playing fast and loose. Someone may have the inode
1123 * locked, in which case they are going to be distinctly unhappy
1124 * if we trash something.
1125 */
1126
1127 error = lfs_fasthashget(dev, ino, need_unlock, vpp);
1128 if (error != 0 || *vpp != NULL)
1129 return (error);
1130
1131 if ((error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, &vp)) != 0) {
1132 *vpp = NULL;
1133 return (error);
1134 }
1135
1136 do {
1137 error = lfs_fasthashget(dev, ino, need_unlock, vpp);
1138 if (error != 0 || *vpp != NULL) {
1139 ungetnewvnode(vp);
1140 return (error);
1141 }
1142 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1143
1144 /* Allocate new vnode/inode. */
1145 lfs_vcreate(mp, ino, vp);
1146
1147 /*
1148 * Put it onto its hash chain and lock it so that other requests for
1149 * this inode will block if they arrive while we are sleeping waiting
1150 * for old data structures to be purged or for the contents of the
1151 * disk portion of this inode to be read.
1152 */
1153 ip = VTOI(vp);
1154 ufs_ihashins(ip);
1155 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1156
1157 /*
1158 * XXX
1159 * This may not need to be here, logically it should go down with
1160 * the i_devvp initialization.
1161 * Ask Kirk.
1162 */
1163 ip->i_lfs = fs;
1164
1165 /* Read in the disk contents for the inode, copy into the inode. */
1166 if (dinp) {
1167 error = copyin(dinp, &ip->i_din.ffs_din, DINODE_SIZE);
1168 if (error) {
1169 printf("lfs_fastvget: dinode copyin failed for ino %d\n", ino);
1170 ufs_ihashrem(ip);
1171
1172 /* Unlock and discard unneeded inode. */
1173 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1174 lfs_vunref(vp);
1175 *vpp = NULL;
1176 return (error);
1177 }
1178 if(ip->i_number != ino)
1179 panic("lfs_fastvget: I was fed the wrong inode!");
1180 } else {
1181 error = bread(ump->um_devvp, fsbtodb(fs, daddr), fs->lfs_ibsize,
1182 NOCRED, &bp);
1183 if (error) {
1184 printf("lfs_fastvget: bread failed with %d\n",error);
1185 /*
1186 * The inode does not contain anything useful, so it
1187 * would be misleading to leave it on its hash chain.
1188 * Iput() will return it to the free list.
1189 */
1190 ufs_ihashrem(ip);
1191
1192 /* Unlock and discard unneeded inode. */
1193 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1194 lfs_vunref(vp);
1195 brelse(bp);
1196 *vpp = NULL;
1197 return (error);
1198 }
1199 ip->i_din.ffs_din = *lfs_ifind(fs, ino, bp);
1200 brelse(bp);
1201 }
1202 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1203 ip->i_lfs_effnblks = ip->i_ffs_blocks;
1204
1205 /*
1206 * Initialize the vnode from the inode, check for aliases. In all
1207 * cases re-init ip, the underlying vnode/inode may have changed.
1208 */
1209 error = ufs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
1210 if (error) {
1211 /* This CANNOT happen (see ufs_vinit) */
1212 printf("lfs_fastvget: ufs_vinit returned %d for ino %d\n", error, ino);
1213 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1214 lfs_vunref(vp);
1215 *vpp = NULL;
1216 return (error);
1217 }
1218 #ifdef DEBUG_LFS
1219 if(vp->v_type == VNON) {
1220 printf("lfs_fastvget: ino %d is type VNON! (ifmt=%o, dinp=%p)\n",
1221 ip->i_number, (ip->i_ffs_mode & IFMT)>>12, dinp);
1222 lfs_dump_dinode(&ip->i_din.ffs_din);
1223 #ifdef DDB
1224 Debugger();
1225 #endif
1226 }
1227 #endif /* DEBUG_LFS */
1228 /*
1229 * Finish inode initialization now that aliasing has been resolved.
1230 */
1231 ip->i_devvp = ump->um_devvp;
1232 VREF(ip->i_devvp);
1233 *vpp = vp;
1234 *need_unlock |= FVG_PUT;
1235
1236 uvm_vnp_setsize(vp, ip->i_ffs_size);
1237
1238 return (0);
1239 }
1240
1241 struct buf *
1242 lfs_fakebuf(struct vnode *vp, int lbn, size_t size, caddr_t uaddr)
1243 {
1244 struct buf *bp;
1245 int error;
1246
1247 #ifndef ALLOW_VFLUSH_CORRUPTION
1248 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size);
1249 error = copyin(uaddr, bp->b_data, size);
1250 if(error) {
1251 lfs_freebuf(bp);
1252 return NULL;
1253 }
1254 #else
1255 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, 0);
1256 bp->b_flags |= B_INVAL;
1257 bp->b_saveaddr = uaddr;
1258 #endif
1259
1260 bp->b_bufsize = size;
1261 bp->b_bcount = size;
1262 return (bp);
1263 }
1264