lfs_syscalls.c revision 1.72 1 /* $NetBSD: lfs_syscalls.c,v 1.72 2002/11/24 08:27:00 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*-
39 * Copyright (c) 1991, 1993, 1994
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.72 2002/11/24 08:27:00 yamt Exp $");
75
76 #define LFS /* for prototypes in syscallargs.h */
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/proc.h>
81 #include <sys/buf.h>
82 #include <sys/mount.h>
83 #include <sys/vnode.h>
84 #include <sys/malloc.h>
85 #include <sys/kernel.h>
86
87 #include <sys/syscallargs.h>
88
89 #include <ufs/ufs/inode.h>
90 #include <ufs/ufs/ufsmount.h>
91 #include <ufs/ufs/ufs_extern.h>
92
93 #include <ufs/lfs/lfs.h>
94 #include <ufs/lfs/lfs_extern.h>
95
96 /* Flags for return from lfs_fastvget */
97 #define FVG_UNLOCK 0x01 /* Needs to be unlocked */
98 #define FVG_PUT 0x02 /* Needs to be vput() */
99
100 /* Max block count for lfs_markv() */
101 #define MARKV_MAXBLKCNT 65536
102
103 struct buf *lfs_fakebuf(struct lfs *, struct vnode *, int, size_t, caddr_t);
104 int lfs_fasthashget(dev_t, ino_t, int *, struct vnode **);
105
106 int debug_cleaner = 0;
107 int clean_vnlocked = 0;
108 int clean_inlocked = 0;
109 int verbose_debug = 0;
110
111 pid_t lfs_cleaner_pid = 0;
112
113 /*
114 * Definitions for the buffer free lists.
115 */
116 #define BQUEUES 4 /* number of free buffer queues */
117
118 #define BQ_LOCKED 0 /* super-blocks &c */
119 #define BQ_LRU 1 /* lru, useful buffers */
120 #define BQ_AGE 2 /* rubbish */
121 #define BQ_EMPTY 3 /* buffer headers with no memory */
122
123 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
124
125 #define LFS_FORCE_WRITE UNASSIGNED
126
127 #define LFS_VREF_THRESHOLD 128
128
129 static int lfs_bmapv(struct proc *, fsid_t *, BLOCK_INFO *, int);
130 static int lfs_markv(struct proc *, fsid_t *, BLOCK_INFO *, int);
131
132 /*
133 * sys_lfs_markv:
134 *
135 * This will mark inodes and blocks dirty, so they are written into the log.
136 * It will block until all the blocks have been written. The segment create
137 * time passed in the block_info and inode_info structures is used to decide
138 * if the data is valid for each block (in case some process dirtied a block
139 * or inode that is being cleaned between the determination that a block is
140 * live and the lfs_markv call).
141 *
142 * 0 on success
143 * -1/errno is return on error.
144 */
145 #ifdef USE_64BIT_SYSCALLS
146 int
147 sys_lfs_markv(struct proc *p, void *v, register_t *retval)
148 {
149 struct sys_lfs_markv_args /* {
150 syscallarg(fsid_t *) fsidp;
151 syscallarg(struct block_info *) blkiov;
152 syscallarg(int) blkcnt;
153 } */ *uap = v;
154 BLOCK_INFO *blkiov;
155 int blkcnt, error;
156 fsid_t fsid;
157
158 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
159 return (error);
160
161 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
162 return (error);
163
164 blkcnt = SCARG(uap, blkcnt);
165 if ((u_int) blkcnt > MARKV_MAXBLKCNT)
166 return (EINVAL);
167
168 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
169 if ((error = copyin(SCARG(uap, blkiov), blkiov,
170 blkcnt * sizeof(BLOCK_INFO))) != 0)
171 goto out;
172
173 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0)
174 copyout(blkiov, SCARG(uap, blkiov),
175 blkcnt * sizeof(BLOCK_INFO));
176 out:
177 free(blkiov, M_SEGMENT);
178 return error;
179 }
180 #else
181 int
182 sys_lfs_markv(struct proc *p, void *v, register_t *retval)
183 {
184 struct sys_lfs_markv_args /* {
185 syscallarg(fsid_t *) fsidp;
186 syscallarg(struct block_info *) blkiov;
187 syscallarg(int) blkcnt;
188 } */ *uap = v;
189 BLOCK_INFO *blkiov;
190 BLOCK_INFO_15 *blkiov15;
191 int i, blkcnt, error;
192 fsid_t fsid;
193
194 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
195 return (error);
196
197 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
198 return (error);
199
200 blkcnt = SCARG(uap, blkcnt);
201 if ((u_int) blkcnt > MARKV_MAXBLKCNT)
202 return (EINVAL);
203
204 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
205 blkiov15 = malloc(blkcnt * sizeof(BLOCK_INFO_15), M_SEGMENT, M_WAITOK);
206 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
207 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
208 goto out;
209
210 for (i = 0; i < blkcnt; i++) {
211 blkiov[i].bi_inode = blkiov15[i].bi_inode;
212 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
213 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
214 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
215 blkiov[i].bi_version = blkiov15[i].bi_version;
216 blkiov[i].bi_bp = blkiov15[i].bi_bp;
217 blkiov[i].bi_size = blkiov15[i].bi_size;
218 }
219
220 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0) {
221 for (i = 0; i < blkcnt; i++) {
222 blkiov15[i].bi_inode = blkiov[i].bi_inode;
223 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
224 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
225 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
226 blkiov15[i].bi_version = blkiov[i].bi_version;
227 blkiov15[i].bi_bp = blkiov[i].bi_bp;
228 blkiov15[i].bi_size = blkiov[i].bi_size;
229 }
230 copyout(blkiov15, SCARG(uap, blkiov),
231 blkcnt * sizeof(BLOCK_INFO_15));
232 }
233 out:
234 free(blkiov, M_SEGMENT);
235 free(blkiov15, M_SEGMENT);
236 return error;
237 }
238 #endif
239
240 static int
241 lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
242 {
243 BLOCK_INFO *blkp;
244 IFILE *ifp;
245 struct buf *bp, *nbp;
246 struct inode *ip = NULL;
247 struct lfs *fs;
248 struct mount *mntp;
249 struct vnode *vp;
250 #ifdef DEBUG_LFS
251 int vputc = 0, iwritten = 0;
252 #endif
253 ino_t lastino;
254 ufs_daddr_t b_daddr, v_daddr;
255 int cnt, error, lfs_fastvget_unlock;
256 int do_again = 0;
257 int s;
258 #ifdef CHECK_COPYIN
259 int i;
260 #endif /* CHECK_COPYIN */
261 int numlocked = 0, numrefed = 0;
262 ino_t maxino;
263 size_t obsize;
264
265 if ((mntp = vfs_getvfs(fsidp)) == NULL)
266 return (ENOENT);
267
268 fs = VFSTOUFS(mntp)->um_lfs;
269 maxino = (fragstoblks(fs, fsbtofrags(fs, VTOI(fs->lfs_ivnode)->i_ffs_blocks)) -
270 fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb;
271
272 cnt = blkcnt;
273
274 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
275 return (error);
276
277 /*
278 * This seglock is just to prevent the fact that we might have to sleep
279 * from allowing the possibility that our blocks might become
280 * invalid.
281 *
282 * It is also important to note here that unless we specify SEGM_CKP,
283 * any Ifile blocks that we might be asked to clean will never get
284 * to the disk.
285 */
286 lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
287
288 /* Mark blocks/inodes dirty. */
289 error = 0;
290
291 #ifdef DEBUG_LFS
292 /* Run through and count the inodes */
293 lastino = LFS_UNUSED_INUM;
294 for (blkp = blkiov; cnt--; ++blkp) {
295 if (lastino != blkp->bi_inode) {
296 lastino = blkp->bi_inode;
297 vputc++;
298 }
299 }
300 cnt = blkcnt;
301 printf("[%d/",vputc);
302 iwritten = 0;
303 #endif /* DEBUG_LFS */
304 /* these were inside the initialization for the for loop */
305 v_daddr = LFS_UNUSED_DADDR;
306 lastino = LFS_UNUSED_INUM;
307 for (blkp = blkiov; cnt--; ++blkp)
308 {
309 if (blkp->bi_daddr == LFS_FORCE_WRITE)
310 printf("lfs_markv: warning: force-writing ino %d lbn %d\n",
311 blkp->bi_inode, blkp->bi_lbn);
312 /* Bounds-check incoming data, avoid panic for failed VGET */
313 if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
314 error = EINVAL;
315 goto again;
316 }
317 /*
318 * Get the IFILE entry (only once) and see if the file still
319 * exists.
320 */
321 if (lastino != blkp->bi_inode) {
322 /*
323 * Finish the old file, if there was one. The presence
324 * of a usable vnode in vp is signaled by a valid v_daddr.
325 */
326 if (v_daddr != LFS_UNUSED_DADDR) {
327 #ifdef DEBUG_LFS
328 if (ip->i_flag & (IN_MODIFIED|IN_CLEANING))
329 iwritten++;
330 #endif
331 if (lfs_fastvget_unlock) {
332 VOP_UNLOCK(vp, 0);
333 numlocked--;
334 }
335 lfs_vunref(vp);
336 numrefed--;
337 }
338
339 /*
340 * Start a new file
341 */
342 lastino = blkp->bi_inode;
343 if (blkp->bi_inode == LFS_IFILE_INUM)
344 v_daddr = fs->lfs_idaddr;
345 else {
346 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
347 /* XXX fix for force write */
348 v_daddr = ifp->if_daddr;
349 brelse(bp);
350 }
351 /* Don't force-write the ifile */
352 if (blkp->bi_inode == LFS_IFILE_INUM
353 && blkp->bi_daddr == LFS_FORCE_WRITE)
354 {
355 continue;
356 }
357 if (v_daddr == LFS_UNUSED_DADDR
358 && blkp->bi_daddr != LFS_FORCE_WRITE)
359 {
360 continue;
361 }
362
363 /* Get the vnode/inode. */
364 error = lfs_fastvget(mntp, blkp->bi_inode, v_daddr,
365 &vp,
366 (blkp->bi_lbn == LFS_UNUSED_LBN
367 ? blkp->bi_bp
368 : NULL),
369 &lfs_fastvget_unlock);
370 if (lfs_fastvget_unlock)
371 numlocked++;
372
373 if (!error) {
374 numrefed++;
375 }
376 if (error) {
377 #ifdef DEBUG_LFS
378 printf("lfs_markv: lfs_fastvget failed with %d (ino %d, segment %d)\n",
379 error, blkp->bi_inode,
380 dtosn(fs, blkp->bi_daddr));
381 #endif /* DEBUG_LFS */
382 /*
383 * If we got EAGAIN, that means that the
384 * Inode was locked. This is
385 * recoverable: just clean the rest of
386 * this segment, and let the cleaner try
387 * again with another. (When the
388 * cleaner runs again, this segment will
389 * sort high on the list, since it is
390 * now almost entirely empty.) But, we
391 * still set v_daddr = LFS_UNUSED_ADDR
392 * so as not to test this over and over
393 * again.
394 */
395 if (error == EAGAIN) {
396 error = 0;
397 do_again++;
398 }
399 #ifdef DIAGNOSTIC
400 else if (error != ENOENT)
401 panic("lfs_markv VFS_VGET FAILED");
402 #endif
403 /* lastino = LFS_UNUSED_INUM; */
404 v_daddr = LFS_UNUSED_DADDR;
405 vp = NULL;
406 ip = NULL;
407 continue;
408 }
409 ip = VTOI(vp);
410 } else if (v_daddr == LFS_UNUSED_DADDR) {
411 /*
412 * This can only happen if the vnode is dead (or
413 * in any case we can't get it...e.g., it is
414 * inlocked). Keep going.
415 */
416 continue;
417 }
418
419 /* Past this point we are guaranteed that vp, ip are valid. */
420
421 /* If this BLOCK_INFO didn't contain a block, keep going. */
422 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
423 /* XXX need to make sure that the inode gets written in this case */
424 /* XXX but only write the inode if it's the right one */
425 if (blkp->bi_inode != LFS_IFILE_INUM) {
426 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
427 if (ifp->if_daddr == blkp->bi_daddr
428 || blkp->bi_daddr == LFS_FORCE_WRITE)
429 {
430 LFS_SET_UINO(ip, IN_CLEANING);
431 }
432 brelse(bp);
433 }
434 continue;
435 }
436
437 b_daddr = 0;
438 if (blkp->bi_daddr != LFS_FORCE_WRITE) {
439 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
440 dbtofsb(fs, b_daddr) != blkp->bi_daddr)
441 {
442 if (dtosn(fs,dbtofsb(fs, b_daddr))
443 == dtosn(fs,blkp->bi_daddr))
444 {
445 printf("lfs_markv: wrong da same seg: %x vs %x\n",
446 blkp->bi_daddr, dbtofsb(fs, b_daddr));
447 }
448 do_again++;
449 continue;
450 }
451 }
452
453 /*
454 * Check block sizes. The blocks being cleaned come from
455 * disk, so they should have the same size as their on-disk
456 * counterparts.
457 */
458 if (blkp->bi_lbn >= 0)
459 obsize = blksize(fs, ip, blkp->bi_lbn);
460 else
461 obsize = fs->lfs_bsize;
462 /* Check for fragment size change */
463 if (blkp->bi_lbn >= 0 && blkp->bi_lbn < NDADDR) {
464 obsize = ip->i_lfs_fragsize[blkp->bi_lbn];
465 }
466 if (obsize != blkp->bi_size) {
467 printf("lfs_markv: ino %d lbn %d wrong size (%ld != %d), try again\n",
468 blkp->bi_inode, blkp->bi_lbn,
469 (long) obsize, blkp->bi_size);
470 do_again++;
471 continue;
472 }
473
474 /*
475 * If we get to here, then we are keeping the block. If
476 * it is an indirect block, we want to actually put it
477 * in the buffer cache so that it can be updated in the
478 * finish_meta section. If it's not, we need to
479 * allocate a fake buffer so that writeseg can perform
480 * the copyin and write the buffer.
481 */
482 if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
483 /* Data Block */
484 bp = lfs_fakebuf(fs, vp, blkp->bi_lbn,
485 blkp->bi_size, blkp->bi_bp);
486 /* Pretend we used bread() to get it */
487 bp->b_blkno = fsbtodb(fs, blkp->bi_daddr);
488 } else {
489 /* Indirect block */
490 if (blkp->bi_size != fs->lfs_bsize)
491 panic("lfs_markv: partial indirect block?"
492 " size=%d\n", blkp->bi_size);
493 bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
494 if (!(bp->b_flags & (B_DONE|B_DELWRI))) { /* B_CACHE */
495 /*
496 * The block in question was not found
497 * in the cache; i.e., the block that
498 * getblk() returned is empty. So, we
499 * can (and should) copy in the
500 * contents, because we've already
501 * determined that this was the right
502 * version of this block on disk.
503 *
504 * And, it can't have changed underneath
505 * us, because we have the segment lock.
506 */
507 error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
508 if (error)
509 goto err2;
510 }
511 }
512 if ((error = lfs_bwrite_ext(bp,BW_CLEAN)) != 0)
513 goto err2;
514 }
515
516 /*
517 * Finish the old file, if there was one
518 */
519 if (v_daddr != LFS_UNUSED_DADDR) {
520 #ifdef DEBUG_LFS
521 if (ip->i_flag & (IN_MODIFIED|IN_CLEANING))
522 iwritten++;
523 #endif
524 if (lfs_fastvget_unlock) {
525 VOP_UNLOCK(vp, 0);
526 numlocked--;
527 }
528 lfs_vunref(vp);
529 numrefed--;
530 }
531
532 /*
533 * The last write has to be SEGM_SYNC, because of calling semantics.
534 * It also has to be SEGM_CKP, because otherwise we could write
535 * over the newly cleaned data contained in a checkpoint, and then
536 * we'd be unhappy at recovery time.
537 */
538 lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
539
540 lfs_segunlock(fs);
541
542 #ifdef DEBUG_LFS
543 printf("%d]",iwritten);
544 if (numlocked != 0 || numrefed != 0) {
545 panic("lfs_markv: numlocked=%d numrefed=%d", numlocked, numrefed);
546 }
547 #endif
548
549 vfs_unbusy(mntp);
550 if (error)
551 return (error);
552 else if (do_again)
553 return EAGAIN;
554
555 return 0;
556
557 err2:
558 printf("lfs_markv err2\n");
559 if (lfs_fastvget_unlock) {
560 VOP_UNLOCK(vp, 0);
561 --numlocked;
562 }
563 lfs_vunref(vp);
564 --numrefed;
565
566 /* Free up fakebuffers -- have to take these from the LOCKED list */
567 again:
568 s = splbio();
569 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; bp = nbp) {
570 nbp = bp->b_freelist.tqe_next;
571 if (bp->b_flags & B_CALL) {
572 if (bp->b_flags & B_BUSY) { /* not bloody likely */
573 bp->b_flags |= B_WANTED;
574 tsleep(bp, PRIBIO+1, "markv", 0);
575 splx(s);
576 goto again;
577 }
578 if (bp->b_flags & B_DELWRI)
579 fs->lfs_avail += btofsb(fs, bp->b_bcount);
580 bremfree(bp);
581 splx(s);
582 brelse(bp);
583 s = splbio();
584 }
585 }
586 splx(s);
587 lfs_segunlock(fs);
588 vfs_unbusy(mntp);
589 #ifdef DEBUG_LFS
590 if (numlocked != 0 || numrefed != 0) {
591 panic("lfs_markv: numlocked=%d numrefed=%d", numlocked, numrefed);
592 }
593 #endif
594
595 return (error);
596 }
597
598 /*
599 * sys_lfs_bmapv:
600 *
601 * This will fill in the current disk address for arrays of blocks.
602 *
603 * 0 on success
604 * -1/errno is return on error.
605 */
606 #ifdef USE_64BIT_SYSCALLS
607 int
608 sys_lfs_bmapv(struct proc *p, void *v, register_t *retval)
609 {
610 struct sys_lfs_bmapv_args /* {
611 syscallarg(fsid_t *) fsidp;
612 syscallarg(struct block_info *) blkiov;
613 syscallarg(int) blkcnt;
614 } */ *uap = v;
615 BLOCK_INFO *blkiov;
616 int blkcnt, error;
617 fsid_t fsid;
618
619 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
620 return (error);
621
622 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
623 return (error);
624
625 blkcnt = SCARG(uap, blkcnt);
626 if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
627 return (EINVAL);
628 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
629 if ((error = copyin(SCARG(uap, blkiov), blkiov,
630 blkcnt * sizeof(BLOCK_INFO))) != 0)
631 goto out;
632
633 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0)
634 copyout(blkiov, SCARG(uap, blkiov),
635 blkcnt * sizeof(BLOCK_INFO));
636 out:
637 free(blkiov, M_SEGMENT);
638 return error;
639 }
640 #else
641 int
642 sys_lfs_bmapv(struct proc *p, void *v, register_t *retval)
643 {
644 struct sys_lfs_bmapv_args /* {
645 syscallarg(fsid_t *) fsidp;
646 syscallarg(struct block_info *) blkiov;
647 syscallarg(int) blkcnt;
648 } */ *uap = v;
649 BLOCK_INFO *blkiov;
650 BLOCK_INFO_15 *blkiov15;
651 int i, blkcnt, error;
652 fsid_t fsid;
653
654 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
655 return (error);
656
657 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
658 return (error);
659
660 blkcnt = SCARG(uap, blkcnt);
661 if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
662 return (EINVAL);
663 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
664 blkiov15 = malloc(blkcnt * sizeof(BLOCK_INFO_15), M_SEGMENT, M_WAITOK);
665 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
666 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
667 goto out;
668
669 for (i = 0; i < blkcnt; i++) {
670 blkiov[i].bi_inode = blkiov15[i].bi_inode;
671 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
672 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
673 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
674 blkiov[i].bi_version = blkiov15[i].bi_version;
675 blkiov[i].bi_bp = blkiov15[i].bi_bp;
676 blkiov[i].bi_size = blkiov15[i].bi_size;
677 }
678
679 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0) {
680 for (i = 0; i < blkcnt; i++) {
681 blkiov15[i].bi_inode = blkiov[i].bi_inode;
682 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
683 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
684 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
685 blkiov15[i].bi_version = blkiov[i].bi_version;
686 blkiov15[i].bi_bp = blkiov[i].bi_bp;
687 blkiov15[i].bi_size = blkiov[i].bi_size;
688 }
689 copyout(blkiov15, SCARG(uap, blkiov),
690 blkcnt * sizeof(BLOCK_INFO_15));
691 }
692 out:
693 free(blkiov, M_SEGMENT);
694 free(blkiov15, M_SEGMENT);
695 return error;
696 }
697 #endif
698
699 static int
700 lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
701 {
702 BLOCK_INFO *blkp;
703 IFILE *ifp;
704 struct buf *bp;
705 struct inode *ip = NULL;
706 struct lfs *fs;
707 struct mount *mntp;
708 struct ufsmount *ump;
709 struct vnode *vp;
710 ino_t lastino;
711 ufs_daddr_t v_daddr;
712 int cnt, error, need_unlock = 0;
713 int numlocked = 0, numrefed = 0;
714
715 lfs_cleaner_pid = p->p_pid;
716
717 if ((mntp = vfs_getvfs(fsidp)) == NULL)
718 return (ENOENT);
719
720 ump = VFSTOUFS(mntp);
721 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
722 return (error);
723
724 cnt = blkcnt;
725
726 fs = VFSTOUFS(mntp)->um_lfs;
727
728 error = 0;
729
730 /* these were inside the initialization for the for loop */
731 v_daddr = LFS_UNUSED_DADDR;
732 lastino = LFS_UNUSED_INUM;
733 for (blkp = blkiov; cnt--; ++blkp)
734 {
735 /*
736 * Get the IFILE entry (only once) and see if the file still
737 * exists.
738 */
739 if (lastino != blkp->bi_inode) {
740 /*
741 * Finish the old file, if there was one. The presence
742 * of a usable vnode in vp is signaled by a valid
743 * v_daddr.
744 */
745 if (v_daddr != LFS_UNUSED_DADDR) {
746 if (need_unlock) {
747 VOP_UNLOCK(vp, 0);
748 numlocked--;
749 }
750 lfs_vunref(vp);
751 numrefed--;
752 }
753
754 /*
755 * Start a new file
756 */
757 lastino = blkp->bi_inode;
758 if (blkp->bi_inode == LFS_IFILE_INUM)
759 v_daddr = fs->lfs_idaddr;
760 else {
761 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
762 v_daddr = ifp->if_daddr;
763 brelse(bp);
764 }
765 if (v_daddr == LFS_UNUSED_DADDR) {
766 blkp->bi_daddr = LFS_UNUSED_DADDR;
767 continue;
768 }
769 /*
770 * A regular call to VFS_VGET could deadlock
771 * here. Instead, we try an unlocked access.
772 */
773 vp = ufs_ihashlookup(ump->um_dev, blkp->bi_inode);
774 if (vp != NULL && !(vp->v_flag & VXLOCK)) {
775 ip = VTOI(vp);
776 if (lfs_vref(vp)) {
777 v_daddr = LFS_UNUSED_DADDR;
778 need_unlock = 0;
779 continue;
780 }
781 numrefed++;
782 if (VOP_ISLOCKED(vp)) {
783 #ifdef DEBUG_LFS
784 printf("lfs_bmapv: inode %d inlocked\n",ip->i_number);
785 #endif
786 v_daddr = LFS_UNUSED_DADDR;
787 need_unlock = 0;
788 lfs_vunref(vp);
789 --numrefed;
790 continue;
791 } else {
792 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
793 need_unlock = FVG_UNLOCK;
794 numlocked++;
795 }
796 } else {
797 error = VFS_VGET(mntp, blkp->bi_inode, &vp);
798 if (error) {
799 #ifdef DEBUG_LFS
800 printf("lfs_bmapv: vget of ino %d failed with %d",blkp->bi_inode,error);
801 #endif
802 v_daddr = LFS_UNUSED_DADDR;
803 need_unlock = 0;
804 continue;
805 } else {
806 need_unlock = FVG_PUT;
807 numlocked++;
808 numrefed++;
809 }
810 }
811 ip = VTOI(vp);
812 } else if (v_daddr == LFS_UNUSED_DADDR) {
813 /*
814 * This can only happen if the vnode is dead.
815 * Keep going. Note that we DO NOT set the
816 * bi_addr to anything -- if we failed to get
817 * the vnode, for example, we want to assume
818 * conservatively that all of its blocks *are*
819 * located in the segment in question.
820 * lfs_markv will throw them out if we are
821 * wrong.
822 */
823 /* blkp->bi_daddr = LFS_UNUSED_DADDR; */
824 continue;
825 }
826
827 /* Past this point we are guaranteed that vp, ip are valid. */
828
829 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
830 /*
831 * We just want the inode address, which is
832 * conveniently in v_daddr.
833 */
834 blkp->bi_daddr = v_daddr;
835 } else {
836 error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
837 &(blkp->bi_daddr), NULL);
838 if (error)
839 {
840 blkp->bi_daddr = LFS_UNUSED_DADDR;
841 continue;
842 }
843 blkp->bi_daddr = dbtofsb(fs, blkp->bi_daddr);
844 /* Fill in the block size, too */
845 if (blkp->bi_lbn >= 0)
846 blkp->bi_size = blksize(fs, ip, blkp->bi_lbn);
847 else
848 blkp->bi_size = fs->lfs_bsize;
849 }
850 }
851
852 /*
853 * Finish the old file, if there was one. The presence
854 * of a usable vnode in vp is signaled by a valid v_daddr.
855 */
856 if (v_daddr != LFS_UNUSED_DADDR) {
857 if (need_unlock) {
858 VOP_UNLOCK(vp, 0);
859 numlocked--;
860 }
861 lfs_vunref(vp);
862 numrefed--;
863 }
864
865 if (numlocked != 0 || numrefed != 0) {
866 panic("lfs_bmapv: numlocked=%d numrefed=%d", numlocked,
867 numrefed);
868 }
869
870 vfs_unbusy(mntp);
871
872 return 0;
873 }
874
875 /*
876 * sys_lfs_segclean:
877 *
878 * Mark the segment clean.
879 *
880 * 0 on success
881 * -1/errno is return on error.
882 */
883 int
884 sys_lfs_segclean(struct proc *p, void *v, register_t *retval)
885 {
886 struct sys_lfs_segclean_args /* {
887 syscallarg(fsid_t *) fsidp;
888 syscallarg(u_long) segment;
889 } */ *uap = v;
890 CLEANERINFO *cip;
891 SEGUSE *sup;
892 struct buf *bp;
893 struct mount *mntp;
894 struct lfs *fs;
895 fsid_t fsid;
896 int error;
897 unsigned long segnum;
898
899 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
900 return (error);
901
902 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
903 return (error);
904 if ((mntp = vfs_getvfs(&fsid)) == NULL)
905 return (ENOENT);
906
907 fs = VFSTOUFS(mntp)->um_lfs;
908 segnum = SCARG(uap, segment);
909
910 if (dtosn(fs, fs->lfs_curseg) == segnum)
911 return (EBUSY);
912
913 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
914 return (error);
915 #ifdef LFS_AGGRESSIVE_SEGLOCK
916 lfs_seglock(fs, SEGM_PROT);
917 #endif
918 LFS_SEGENTRY(sup, fs, segnum, bp);
919 if (sup->su_nbytes) {
920 printf("lfs_segclean: not cleaning segment %lu: %d live bytes\n",
921 segnum, sup->su_nbytes);
922 brelse(bp);
923 #ifdef LFS_AGGRESSIVE_SEGLOCK
924 lfs_segunlock(fs);
925 #endif
926 vfs_unbusy(mntp);
927 return (EBUSY);
928 }
929 if (sup->su_flags & SEGUSE_ACTIVE) {
930 brelse(bp);
931 #ifdef LFS_AGGRESSIVE_SEGLOCK
932 lfs_segunlock(fs);
933 #endif
934 vfs_unbusy(mntp);
935 return (EBUSY);
936 }
937 if (!(sup->su_flags & SEGUSE_DIRTY)) {
938 brelse(bp);
939 #ifdef LFS_AGGRESSIVE_SEGLOCK
940 lfs_segunlock(fs);
941 #endif
942 vfs_unbusy(mntp);
943 return (EALREADY);
944 }
945
946 fs->lfs_avail += segtod(fs, 1);
947 if (sup->su_flags & SEGUSE_SUPERBLOCK)
948 fs->lfs_avail -= btofsb(fs, LFS_SBPAD);
949 if (fs->lfs_version > 1 && segnum == 0 &&
950 fs->lfs_start < btofsb(fs, LFS_LABELPAD))
951 fs->lfs_avail -= btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
952 fs->lfs_bfree += sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
953 btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
954 fs->lfs_dmeta -= sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
955 btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
956 if (fs->lfs_dmeta < 0)
957 fs->lfs_dmeta = 0;
958 sup->su_flags &= ~SEGUSE_DIRTY;
959 (void) LFS_BWRITE_LOG(bp);
960
961 LFS_CLEANERINFO(cip, fs, bp);
962 ++cip->clean;
963 --cip->dirty;
964 fs->lfs_nclean = cip->clean;
965 cip->bfree = fs->lfs_bfree;
966 cip->avail = fs->lfs_avail - fs->lfs_ravail;
967 (void) LFS_BWRITE_LOG(bp);
968 wakeup(&fs->lfs_avail);
969 #ifdef LFS_AGGRESSIVE_SEGLOCK
970 lfs_segunlock(fs);
971 #endif
972 vfs_unbusy(mntp);
973
974 return (0);
975 }
976
977 /*
978 * sys_lfs_segwait:
979 *
980 * This will block until a segment in file system fsid is written. A timeout
981 * in milliseconds may be specified which will awake the cleaner automatically.
982 * An fsid of -1 means any file system, and a timeout of 0 means forever.
983 *
984 * 0 on success
985 * 1 on timeout
986 * -1/errno is return on error.
987 */
988 int
989 sys_lfs_segwait(struct proc *p, void *v, register_t *retval)
990 {
991 struct sys_lfs_segwait_args /* {
992 syscallarg(fsid_t *) fsidp;
993 syscallarg(struct timeval *) tv;
994 } */ *uap = v;
995 struct mount *mntp;
996 struct timeval atv;
997 fsid_t fsid;
998 void *addr;
999 u_long timeout;
1000 int error, s;
1001
1002 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) {
1003 return (error);
1004 }
1005 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
1006 return (error);
1007 if ((mntp = vfs_getvfs(&fsid)) == NULL)
1008 addr = &lfs_allclean_wakeup;
1009 else
1010 addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
1011
1012 if (SCARG(uap, tv)) {
1013 error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
1014 if (error)
1015 return (error);
1016 if (itimerfix(&atv))
1017 return (EINVAL);
1018 /*
1019 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
1020 * XXX IS THAT WHAT IS INTENDED?
1021 */
1022 s = splclock();
1023 timeradd(&atv, &time, &atv);
1024 timeout = hzto(&atv);
1025 splx(s);
1026 } else
1027 timeout = 0;
1028
1029 error = tsleep(addr, PCATCH | PUSER, "segment", timeout);
1030 return (error == ERESTART ? EINTR : 0);
1031 }
1032
1033 /*
1034 * VFS_VGET call specialized for the cleaner. The cleaner already knows the
1035 * daddr from the ifile, so don't look it up again. If the cleaner is
1036 * processing IINFO structures, it may have the ondisk inode already, so
1037 * don't go retrieving it again.
1038 *
1039 * If we find the vnode on the hash chain, then it may be locked by another
1040 * process; so we set (*need_unlock) to zero.
1041 *
1042 * If we don't, we call ufs_ihashins, which locks the inode, and we set
1043 * (*need_unlock) to non-zero.
1044 *
1045 * In either case we lfs_vref, and it is the caller's responsibility to
1046 * lfs_vunref and VOP_UNLOCK (if necessary) when finished.
1047 */
1048 extern struct lock ufs_hashlock;
1049
1050 int
1051 lfs_fasthashget(dev_t dev, ino_t ino, int *need_unlock, struct vnode **vpp)
1052 {
1053 struct inode *ip;
1054
1055 /*
1056 * This is playing fast and loose. Someone may have the inode
1057 * locked, in which case they are going to be distinctly unhappy
1058 * if we trash something.
1059 */
1060 if ((*vpp = ufs_ihashlookup(dev, ino)) != NULL) {
1061 if ((*vpp)->v_flag & VXLOCK) {
1062 printf("lfs_fastvget: vnode VXLOCKed for ino %d\n",
1063 ino);
1064 clean_vnlocked++;
1065 #ifdef LFS_EAGAIN_FAIL
1066 return EAGAIN;
1067 #endif
1068 }
1069 ip = VTOI(*vpp);
1070 if (lfs_vref(*vpp)) {
1071 clean_inlocked++;
1072 return EAGAIN;
1073 }
1074 if (VOP_ISLOCKED(*vpp)) {
1075 #ifdef DEBUG_LFS
1076 printf("lfs_fastvget: ino %d inlocked by pid %d\n",
1077 ip->i_number, (*vpp)->v_lock.lk_lockholder);
1078 #endif
1079 clean_inlocked++;
1080 #ifdef LFS_EAGAIN_FAIL
1081 lfs_vunref(*vpp);
1082 return EAGAIN;
1083 #endif /* LFS_EAGAIN_FAIL */
1084 } else {
1085 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
1086 *need_unlock |= FVG_UNLOCK;
1087 }
1088 } else
1089 *vpp = NULL;
1090
1091 return (0);
1092 }
1093
1094 int
1095 lfs_fastvget(struct mount *mp, ino_t ino, ufs_daddr_t daddr, struct vnode **vpp, struct dinode *dinp, int *need_unlock)
1096 {
1097 struct inode *ip;
1098 struct dinode *dip;
1099 struct vnode *vp;
1100 struct ufsmount *ump;
1101 dev_t dev;
1102 int i, error, retries;
1103 struct buf *bp;
1104 struct lfs *fs;
1105
1106 ump = VFSTOUFS(mp);
1107 dev = ump->um_dev;
1108 fs = ump->um_lfs;
1109 *need_unlock = 0;
1110
1111 /*
1112 * Wait until the filesystem is fully mounted before allowing vget
1113 * to complete. This prevents possible problems with roll-forward.
1114 */
1115 while (fs->lfs_flags & LFS_NOTYET) {
1116 tsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0);
1117 }
1118 /*
1119 * This is playing fast and loose. Someone may have the inode
1120 * locked, in which case they are going to be distinctly unhappy
1121 * if we trash something.
1122 */
1123
1124 error = lfs_fasthashget(dev, ino, need_unlock, vpp);
1125 if (error != 0 || *vpp != NULL)
1126 return (error);
1127
1128 if ((error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, &vp)) != 0) {
1129 *vpp = NULL;
1130 return (error);
1131 }
1132
1133 do {
1134 error = lfs_fasthashget(dev, ino, need_unlock, vpp);
1135 if (error != 0 || *vpp != NULL) {
1136 ungetnewvnode(vp);
1137 return (error);
1138 }
1139 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1140
1141 /* Allocate new vnode/inode. */
1142 lfs_vcreate(mp, ino, vp);
1143
1144 /*
1145 * Put it onto its hash chain and lock it so that other requests for
1146 * this inode will block if they arrive while we are sleeping waiting
1147 * for old data structures to be purged or for the contents of the
1148 * disk portion of this inode to be read.
1149 */
1150 ip = VTOI(vp);
1151 ufs_ihashins(ip);
1152 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1153
1154 /*
1155 * XXX
1156 * This may not need to be here, logically it should go down with
1157 * the i_devvp initialization.
1158 * Ask Kirk.
1159 */
1160 ip->i_lfs = fs;
1161
1162 /* Read in the disk contents for the inode, copy into the inode. */
1163 if (dinp) {
1164 error = copyin(dinp, &ip->i_din.ffs_din, DINODE_SIZE);
1165 if (error) {
1166 printf("lfs_fastvget: dinode copyin failed for ino %d\n", ino);
1167 ufs_ihashrem(ip);
1168
1169 /* Unlock and discard unneeded inode. */
1170 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1171 lfs_vunref(vp);
1172 *vpp = NULL;
1173 return (error);
1174 }
1175 if (ip->i_number != ino)
1176 panic("lfs_fastvget: I was fed the wrong inode!");
1177 } else {
1178 retries = 0;
1179 again:
1180 error = bread(ump->um_devvp, fsbtodb(fs, daddr), fs->lfs_ibsize,
1181 NOCRED, &bp);
1182 if (error) {
1183 printf("lfs_fastvget: bread failed with %d\n",error);
1184 /*
1185 * The inode does not contain anything useful, so it
1186 * would be misleading to leave it on its hash chain.
1187 * Iput() will return it to the free list.
1188 */
1189 ufs_ihashrem(ip);
1190
1191 /* Unlock and discard unneeded inode. */
1192 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1193 lfs_vunref(vp);
1194 brelse(bp);
1195 *vpp = NULL;
1196 return (error);
1197 }
1198 dip = lfs_ifind(ump->um_lfs, ino, bp);
1199 if (dip == NULL) {
1200 /* Assume write has not completed yet; try again */
1201 bp->b_flags |= B_INVAL;
1202 brelse(bp);
1203 ++retries;
1204 if (retries > LFS_IFIND_RETRIES)
1205 panic("lfs_fastvget: dinode not found");
1206 printf("lfs_fastvget: dinode not found, retrying...\n");
1207 goto again;
1208 }
1209 ip->i_din.ffs_din = *dip;
1210 brelse(bp);
1211 }
1212 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1213 ip->i_lfs_effnblks = ip->i_ffs_blocks;
1214 ip->i_lfs_osize = ip->i_ffs_size;
1215
1216 memset(ip->i_lfs_fragsize, 0, NDADDR * sizeof(*ip->i_lfs_fragsize));
1217 for (i = 0; i < NDADDR; i++)
1218 if (ip->i_ffs_db[i] != 0)
1219 ip->i_lfs_fragsize[i] = blksize(fs, ip, i);
1220
1221 /*
1222 * Initialize the vnode from the inode, check for aliases. In all
1223 * cases re-init ip, the underlying vnode/inode may have changed.
1224 */
1225 ufs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
1226 #ifdef DEBUG_LFS
1227 if (vp->v_type == VNON) {
1228 printf("lfs_fastvget: ino %d is type VNON! (ifmt=%o, dinp=%p)\n",
1229 ip->i_number, (ip->i_ffs_mode & IFMT) >> 12, dinp);
1230 lfs_dump_dinode(&ip->i_din.ffs_din);
1231 #ifdef DDB
1232 Debugger();
1233 #endif
1234 }
1235 #endif /* DEBUG_LFS */
1236 /*
1237 * Finish inode initialization now that aliasing has been resolved.
1238 */
1239
1240 genfs_node_init(vp, &lfs_genfsops);
1241 ip->i_devvp = ump->um_devvp;
1242 VREF(ip->i_devvp);
1243 *vpp = vp;
1244 *need_unlock |= FVG_PUT;
1245
1246 uvm_vnp_setsize(vp, ip->i_ffs_size);
1247
1248 return (0);
1249 }
1250
1251 struct buf *
1252 lfs_fakebuf(struct lfs *fs, struct vnode *vp, int lbn, size_t size, caddr_t uaddr)
1253 {
1254 struct buf *bp;
1255 int error;
1256
1257 #ifndef ALLOW_VFLUSH_CORRUPTION
1258 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size);
1259 error = copyin(uaddr, bp->b_data, size);
1260 if (error) {
1261 lfs_freebuf(bp);
1262 return NULL;
1263 }
1264 #else
1265 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, 0);
1266 bp->b_flags |= B_INVAL;
1267 bp->b_saveaddr = uaddr;
1268 #endif
1269 #if 0
1270 bp->b_saveaddr = (caddr_t)fs;
1271 ++fs->lfs_iocount;
1272 #endif
1273 bp->b_bufsize = size;
1274 bp->b_bcount = size;
1275 return (bp);
1276 }
1277