lfs_syscalls.c revision 1.75 1 /* $NetBSD: lfs_syscalls.c,v 1.75 2002/12/18 14:05:50 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*-
39 * Copyright (c) 1991, 1993, 1994
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.75 2002/12/18 14:05:50 yamt Exp $");
75
76 #define LFS /* for prototypes in syscallargs.h */
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/proc.h>
81 #include <sys/buf.h>
82 #include <sys/mount.h>
83 #include <sys/vnode.h>
84 #include <sys/malloc.h>
85 #include <sys/kernel.h>
86
87 #include <sys/syscallargs.h>
88
89 #include <ufs/ufs/inode.h>
90 #include <ufs/ufs/ufsmount.h>
91 #include <ufs/ufs/ufs_extern.h>
92
93 #include <ufs/lfs/lfs.h>
94 #include <ufs/lfs/lfs_extern.h>
95
96 /* Max block count for lfs_markv() */
97 #define MARKV_MAXBLKCNT 65536
98
99 struct buf *lfs_fakebuf(struct lfs *, struct vnode *, int, size_t, caddr_t);
100 int lfs_fasthashget(dev_t, ino_t, struct vnode **);
101
102 int debug_cleaner = 0;
103 int clean_vnlocked = 0;
104 int clean_inlocked = 0;
105 int verbose_debug = 0;
106
107 pid_t lfs_cleaner_pid = 0;
108
109 /*
110 * Definitions for the buffer free lists.
111 */
112 #define BQUEUES 4 /* number of free buffer queues */
113
114 #define BQ_LOCKED 0 /* super-blocks &c */
115 #define BQ_LRU 1 /* lru, useful buffers */
116 #define BQ_AGE 2 /* rubbish */
117 #define BQ_EMPTY 3 /* buffer headers with no memory */
118
119 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
120
121 #define LFS_FORCE_WRITE UNASSIGNED
122
123 #define LFS_VREF_THRESHOLD 128
124
125 static int lfs_bmapv(struct proc *, fsid_t *, BLOCK_INFO *, int);
126 static int lfs_markv(struct proc *, fsid_t *, BLOCK_INFO *, int);
127 static void lfs_fakebuf_iodone(struct buf *);
128
129 /*
130 * sys_lfs_markv:
131 *
132 * This will mark inodes and blocks dirty, so they are written into the log.
133 * It will block until all the blocks have been written. The segment create
134 * time passed in the block_info and inode_info structures is used to decide
135 * if the data is valid for each block (in case some process dirtied a block
136 * or inode that is being cleaned between the determination that a block is
137 * live and the lfs_markv call).
138 *
139 * 0 on success
140 * -1/errno is return on error.
141 */
142 #ifdef USE_64BIT_SYSCALLS
143 int
144 sys_lfs_markv(struct proc *p, void *v, register_t *retval)
145 {
146 struct sys_lfs_markv_args /* {
147 syscallarg(fsid_t *) fsidp;
148 syscallarg(struct block_info *) blkiov;
149 syscallarg(int) blkcnt;
150 } */ *uap = v;
151 BLOCK_INFO *blkiov;
152 int blkcnt, error;
153 fsid_t fsid;
154
155 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
156 return (error);
157
158 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
159 return (error);
160
161 blkcnt = SCARG(uap, blkcnt);
162 if ((u_int) blkcnt > MARKV_MAXBLKCNT)
163 return (EINVAL);
164
165 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
166 if ((error = copyin(SCARG(uap, blkiov), blkiov,
167 blkcnt * sizeof(BLOCK_INFO))) != 0)
168 goto out;
169
170 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0)
171 copyout(blkiov, SCARG(uap, blkiov),
172 blkcnt * sizeof(BLOCK_INFO));
173 out:
174 free(blkiov, M_SEGMENT);
175 return error;
176 }
177 #else
178 int
179 sys_lfs_markv(struct proc *p, void *v, register_t *retval)
180 {
181 struct sys_lfs_markv_args /* {
182 syscallarg(fsid_t *) fsidp;
183 syscallarg(struct block_info *) blkiov;
184 syscallarg(int) blkcnt;
185 } */ *uap = v;
186 BLOCK_INFO *blkiov;
187 BLOCK_INFO_15 *blkiov15;
188 int i, blkcnt, error;
189 fsid_t fsid;
190
191 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
192 return (error);
193
194 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
195 return (error);
196
197 blkcnt = SCARG(uap, blkcnt);
198 if ((u_int) blkcnt > MARKV_MAXBLKCNT)
199 return (EINVAL);
200
201 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
202 blkiov15 = malloc(blkcnt * sizeof(BLOCK_INFO_15), M_SEGMENT, M_WAITOK);
203 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
204 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
205 goto out;
206
207 for (i = 0; i < blkcnt; i++) {
208 blkiov[i].bi_inode = blkiov15[i].bi_inode;
209 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
210 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
211 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
212 blkiov[i].bi_version = blkiov15[i].bi_version;
213 blkiov[i].bi_bp = blkiov15[i].bi_bp;
214 blkiov[i].bi_size = blkiov15[i].bi_size;
215 }
216
217 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0) {
218 for (i = 0; i < blkcnt; i++) {
219 blkiov15[i].bi_inode = blkiov[i].bi_inode;
220 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
221 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
222 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
223 blkiov15[i].bi_version = blkiov[i].bi_version;
224 blkiov15[i].bi_bp = blkiov[i].bi_bp;
225 blkiov15[i].bi_size = blkiov[i].bi_size;
226 }
227 copyout(blkiov15, SCARG(uap, blkiov),
228 blkcnt * sizeof(BLOCK_INFO_15));
229 }
230 out:
231 free(blkiov, M_SEGMENT);
232 free(blkiov15, M_SEGMENT);
233 return error;
234 }
235 #endif
236
237 static int
238 lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
239 {
240 BLOCK_INFO *blkp;
241 IFILE *ifp;
242 struct buf *bp, *nbp;
243 struct inode *ip = NULL;
244 struct lfs *fs;
245 struct mount *mntp;
246 struct vnode *vp;
247 #ifdef DEBUG_LFS
248 int vputc = 0, iwritten = 0;
249 #endif
250 ino_t lastino;
251 ufs_daddr_t b_daddr, v_daddr;
252 int cnt, error;
253 int do_again = 0;
254 int s;
255 #ifdef CHECK_COPYIN
256 int i;
257 #endif /* CHECK_COPYIN */
258 int numrefed = 0;
259 ino_t maxino;
260 size_t obsize;
261
262 if ((mntp = vfs_getvfs(fsidp)) == NULL)
263 return (ENOENT);
264
265 fs = VFSTOUFS(mntp)->um_lfs;
266 maxino = (fragstoblks(fs, fsbtofrags(fs, VTOI(fs->lfs_ivnode)->i_ffs_blocks)) -
267 fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb;
268
269 cnt = blkcnt;
270
271 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
272 return (error);
273
274 /*
275 * This seglock is just to prevent the fact that we might have to sleep
276 * from allowing the possibility that our blocks might become
277 * invalid.
278 *
279 * It is also important to note here that unless we specify SEGM_CKP,
280 * any Ifile blocks that we might be asked to clean will never get
281 * to the disk.
282 */
283 lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
284
285 /* Mark blocks/inodes dirty. */
286 error = 0;
287
288 #ifdef DEBUG_LFS
289 /* Run through and count the inodes */
290 lastino = LFS_UNUSED_INUM;
291 for (blkp = blkiov; cnt--; ++blkp) {
292 if (lastino != blkp->bi_inode) {
293 lastino = blkp->bi_inode;
294 vputc++;
295 }
296 }
297 cnt = blkcnt;
298 printf("[%d/",vputc);
299 iwritten = 0;
300 #endif /* DEBUG_LFS */
301 /* these were inside the initialization for the for loop */
302 v_daddr = LFS_UNUSED_DADDR;
303 lastino = LFS_UNUSED_INUM;
304 for (blkp = blkiov; cnt--; ++blkp)
305 {
306 if (blkp->bi_daddr == LFS_FORCE_WRITE)
307 printf("lfs_markv: warning: force-writing ino %d lbn %d\n",
308 blkp->bi_inode, blkp->bi_lbn);
309 /* Bounds-check incoming data, avoid panic for failed VGET */
310 if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
311 error = EINVAL;
312 goto again;
313 }
314 /*
315 * Get the IFILE entry (only once) and see if the file still
316 * exists.
317 */
318 if (lastino != blkp->bi_inode) {
319 /*
320 * Finish the old file, if there was one. The presence
321 * of a usable vnode in vp is signaled by a valid v_daddr.
322 */
323 if (v_daddr != LFS_UNUSED_DADDR) {
324 #ifdef DEBUG_LFS
325 if (ip->i_flag & (IN_MODIFIED|IN_CLEANING))
326 iwritten++;
327 #endif
328 lfs_vunref(vp);
329 numrefed--;
330 }
331
332 /*
333 * Start a new file
334 */
335 lastino = blkp->bi_inode;
336 if (blkp->bi_inode == LFS_IFILE_INUM)
337 v_daddr = fs->lfs_idaddr;
338 else {
339 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
340 /* XXX fix for force write */
341 v_daddr = ifp->if_daddr;
342 brelse(bp);
343 }
344 /* Don't force-write the ifile */
345 if (blkp->bi_inode == LFS_IFILE_INUM
346 && blkp->bi_daddr == LFS_FORCE_WRITE)
347 {
348 continue;
349 }
350 if (v_daddr == LFS_UNUSED_DADDR
351 && blkp->bi_daddr != LFS_FORCE_WRITE)
352 {
353 continue;
354 }
355
356 /* Get the vnode/inode. */
357 error = lfs_fastvget(mntp, blkp->bi_inode, v_daddr,
358 &vp,
359 (blkp->bi_lbn == LFS_UNUSED_LBN
360 ? blkp->bi_bp
361 : NULL));
362
363 if (!error) {
364 numrefed++;
365 }
366 if (error) {
367 #ifdef DEBUG_LFS
368 printf("lfs_markv: lfs_fastvget failed with %d (ino %d, segment %d)\n",
369 error, blkp->bi_inode,
370 dtosn(fs, blkp->bi_daddr));
371 #endif /* DEBUG_LFS */
372 /*
373 * If we got EAGAIN, that means that the
374 * Inode was locked. This is
375 * recoverable: just clean the rest of
376 * this segment, and let the cleaner try
377 * again with another. (When the
378 * cleaner runs again, this segment will
379 * sort high on the list, since it is
380 * now almost entirely empty.) But, we
381 * still set v_daddr = LFS_UNUSED_ADDR
382 * so as not to test this over and over
383 * again.
384 */
385 if (error == EAGAIN) {
386 error = 0;
387 do_again++;
388 }
389 #ifdef DIAGNOSTIC
390 else if (error != ENOENT)
391 panic("lfs_markv VFS_VGET FAILED");
392 #endif
393 /* lastino = LFS_UNUSED_INUM; */
394 v_daddr = LFS_UNUSED_DADDR;
395 vp = NULL;
396 ip = NULL;
397 continue;
398 }
399 ip = VTOI(vp);
400 } else if (v_daddr == LFS_UNUSED_DADDR) {
401 /*
402 * This can only happen if the vnode is dead (or
403 * in any case we can't get it...e.g., it is
404 * inlocked). Keep going.
405 */
406 continue;
407 }
408
409 /* Past this point we are guaranteed that vp, ip are valid. */
410
411 /* If this BLOCK_INFO didn't contain a block, keep going. */
412 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
413 /* XXX need to make sure that the inode gets written in this case */
414 /* XXX but only write the inode if it's the right one */
415 if (blkp->bi_inode != LFS_IFILE_INUM) {
416 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
417 if (ifp->if_daddr == blkp->bi_daddr
418 || blkp->bi_daddr == LFS_FORCE_WRITE)
419 {
420 LFS_SET_UINO(ip, IN_CLEANING);
421 }
422 brelse(bp);
423 }
424 continue;
425 }
426
427 b_daddr = 0;
428 if (blkp->bi_daddr != LFS_FORCE_WRITE) {
429 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
430 dbtofsb(fs, b_daddr) != blkp->bi_daddr)
431 {
432 if (dtosn(fs,dbtofsb(fs, b_daddr))
433 == dtosn(fs,blkp->bi_daddr))
434 {
435 printf("lfs_markv: wrong da same seg: %x vs %x\n",
436 blkp->bi_daddr, dbtofsb(fs, b_daddr));
437 }
438 do_again++;
439 continue;
440 }
441 }
442
443 /*
444 * Check block sizes. The blocks being cleaned come from
445 * disk, so they should have the same size as their on-disk
446 * counterparts.
447 */
448 if (blkp->bi_lbn >= 0)
449 obsize = blksize(fs, ip, blkp->bi_lbn);
450 else
451 obsize = fs->lfs_bsize;
452 /* Check for fragment size change */
453 if (blkp->bi_lbn >= 0 && blkp->bi_lbn < NDADDR) {
454 obsize = ip->i_lfs_fragsize[blkp->bi_lbn];
455 }
456 if (obsize != blkp->bi_size) {
457 printf("lfs_markv: ino %d lbn %d wrong size (%ld != %d), try again\n",
458 blkp->bi_inode, blkp->bi_lbn,
459 (long) obsize, blkp->bi_size);
460 do_again++;
461 continue;
462 }
463
464 /*
465 * If we get to here, then we are keeping the block. If
466 * it is an indirect block, we want to actually put it
467 * in the buffer cache so that it can be updated in the
468 * finish_meta section. If it's not, we need to
469 * allocate a fake buffer so that writeseg can perform
470 * the copyin and write the buffer.
471 */
472 if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
473 /* Data Block */
474 bp = lfs_fakebuf(fs, vp, blkp->bi_lbn,
475 blkp->bi_size, blkp->bi_bp);
476 /* Pretend we used bread() to get it */
477 bp->b_blkno = fsbtodb(fs, blkp->bi_daddr);
478 } else {
479 /* Indirect block or ifile */
480 if (blkp->bi_size != fs->lfs_bsize &&
481 ip->i_number != LFS_IFILE_INUM)
482 panic("lfs_markv: partial indirect block?"
483 " size=%d\n", blkp->bi_size);
484 bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
485 if (!(bp->b_flags & (B_DONE|B_DELWRI))) { /* B_CACHE */
486 /*
487 * The block in question was not found
488 * in the cache; i.e., the block that
489 * getblk() returned is empty. So, we
490 * can (and should) copy in the
491 * contents, because we've already
492 * determined that this was the right
493 * version of this block on disk.
494 *
495 * And, it can't have changed underneath
496 * us, because we have the segment lock.
497 */
498 error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
499 if (error)
500 goto err2;
501 }
502 }
503 if ((error = lfs_bwrite_ext(bp,BW_CLEAN)) != 0)
504 goto err2;
505 }
506
507 /*
508 * Finish the old file, if there was one
509 */
510 if (v_daddr != LFS_UNUSED_DADDR) {
511 #ifdef DEBUG_LFS
512 if (ip->i_flag & (IN_MODIFIED|IN_CLEANING))
513 iwritten++;
514 #endif
515 lfs_vunref(vp);
516 numrefed--;
517 }
518
519 #ifdef DEBUG_LFS
520 printf("%d]",iwritten);
521 if (numrefed != 0) {
522 panic("lfs_markv: numrefed=%d", numrefed);
523 }
524 #endif
525
526 /*
527 * The last write has to be SEGM_SYNC, because of calling semantics.
528 * It also has to be SEGM_CKP, because otherwise we could write
529 * over the newly cleaned data contained in a checkpoint, and then
530 * we'd be unhappy at recovery time.
531 */
532 lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
533
534 lfs_segunlock(fs);
535
536 vfs_unbusy(mntp);
537 if (error)
538 return (error);
539 else if (do_again)
540 return EAGAIN;
541
542 return 0;
543
544 err2:
545 printf("lfs_markv err2\n");
546 lfs_vunref(vp);
547 --numrefed;
548
549 /* Free up fakebuffers -- have to take these from the LOCKED list */
550 again:
551 s = splbio();
552 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; bp = nbp) {
553 nbp = bp->b_freelist.tqe_next;
554 if (bp->b_flags & B_CALL) {
555 if (bp->b_flags & B_BUSY) { /* not bloody likely */
556 bp->b_flags |= B_WANTED;
557 tsleep(bp, PRIBIO+1, "markv", 0);
558 splx(s);
559 goto again;
560 }
561 if (bp->b_flags & B_DELWRI)
562 fs->lfs_avail += btofsb(fs, bp->b_bcount);
563 bremfree(bp);
564 splx(s);
565 brelse(bp);
566 s = splbio();
567 }
568 }
569 splx(s);
570 lfs_segunlock(fs);
571 vfs_unbusy(mntp);
572 #ifdef DEBUG_LFS
573 if (numrefed != 0) {
574 panic("lfs_markv: numrefed=%d", numrefed);
575 }
576 #endif
577
578 return (error);
579 }
580
581 /*
582 * sys_lfs_bmapv:
583 *
584 * This will fill in the current disk address for arrays of blocks.
585 *
586 * 0 on success
587 * -1/errno is return on error.
588 */
589 #ifdef USE_64BIT_SYSCALLS
590 int
591 sys_lfs_bmapv(struct proc *p, void *v, register_t *retval)
592 {
593 struct sys_lfs_bmapv_args /* {
594 syscallarg(fsid_t *) fsidp;
595 syscallarg(struct block_info *) blkiov;
596 syscallarg(int) blkcnt;
597 } */ *uap = v;
598 BLOCK_INFO *blkiov;
599 int blkcnt, error;
600 fsid_t fsid;
601
602 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
603 return (error);
604
605 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
606 return (error);
607
608 blkcnt = SCARG(uap, blkcnt);
609 if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
610 return (EINVAL);
611 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
612 if ((error = copyin(SCARG(uap, blkiov), blkiov,
613 blkcnt * sizeof(BLOCK_INFO))) != 0)
614 goto out;
615
616 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0)
617 copyout(blkiov, SCARG(uap, blkiov),
618 blkcnt * sizeof(BLOCK_INFO));
619 out:
620 free(blkiov, M_SEGMENT);
621 return error;
622 }
623 #else
624 int
625 sys_lfs_bmapv(struct proc *p, void *v, register_t *retval)
626 {
627 struct sys_lfs_bmapv_args /* {
628 syscallarg(fsid_t *) fsidp;
629 syscallarg(struct block_info *) blkiov;
630 syscallarg(int) blkcnt;
631 } */ *uap = v;
632 BLOCK_INFO *blkiov;
633 BLOCK_INFO_15 *blkiov15;
634 int i, blkcnt, error;
635 fsid_t fsid;
636
637 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
638 return (error);
639
640 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
641 return (error);
642
643 blkcnt = SCARG(uap, blkcnt);
644 if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
645 return (EINVAL);
646 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
647 blkiov15 = malloc(blkcnt * sizeof(BLOCK_INFO_15), M_SEGMENT, M_WAITOK);
648 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
649 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
650 goto out;
651
652 for (i = 0; i < blkcnt; i++) {
653 blkiov[i].bi_inode = blkiov15[i].bi_inode;
654 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
655 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
656 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
657 blkiov[i].bi_version = blkiov15[i].bi_version;
658 blkiov[i].bi_bp = blkiov15[i].bi_bp;
659 blkiov[i].bi_size = blkiov15[i].bi_size;
660 }
661
662 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0) {
663 for (i = 0; i < blkcnt; i++) {
664 blkiov15[i].bi_inode = blkiov[i].bi_inode;
665 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
666 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
667 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
668 blkiov15[i].bi_version = blkiov[i].bi_version;
669 blkiov15[i].bi_bp = blkiov[i].bi_bp;
670 blkiov15[i].bi_size = blkiov[i].bi_size;
671 }
672 copyout(blkiov15, SCARG(uap, blkiov),
673 blkcnt * sizeof(BLOCK_INFO_15));
674 }
675 out:
676 free(blkiov, M_SEGMENT);
677 free(blkiov15, M_SEGMENT);
678 return error;
679 }
680 #endif
681
682 static int
683 lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
684 {
685 BLOCK_INFO *blkp;
686 IFILE *ifp;
687 struct buf *bp;
688 struct inode *ip = NULL;
689 struct lfs *fs;
690 struct mount *mntp;
691 struct ufsmount *ump;
692 struct vnode *vp;
693 ino_t lastino;
694 ufs_daddr_t v_daddr;
695 int cnt, error;
696 int numrefed = 0;
697
698 lfs_cleaner_pid = p->p_pid;
699
700 if ((mntp = vfs_getvfs(fsidp)) == NULL)
701 return (ENOENT);
702
703 ump = VFSTOUFS(mntp);
704 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
705 return (error);
706
707 cnt = blkcnt;
708
709 fs = VFSTOUFS(mntp)->um_lfs;
710
711 error = 0;
712
713 /* these were inside the initialization for the for loop */
714 v_daddr = LFS_UNUSED_DADDR;
715 lastino = LFS_UNUSED_INUM;
716 for (blkp = blkiov; cnt--; ++blkp)
717 {
718 /*
719 * Get the IFILE entry (only once) and see if the file still
720 * exists.
721 */
722 if (lastino != blkp->bi_inode) {
723 /*
724 * Finish the old file, if there was one. The presence
725 * of a usable vnode in vp is signaled by a valid
726 * v_daddr.
727 */
728 if (v_daddr != LFS_UNUSED_DADDR) {
729 lfs_vunref(vp);
730 numrefed--;
731 }
732
733 /*
734 * Start a new file
735 */
736 lastino = blkp->bi_inode;
737 if (blkp->bi_inode == LFS_IFILE_INUM)
738 v_daddr = fs->lfs_idaddr;
739 else {
740 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
741 v_daddr = ifp->if_daddr;
742 brelse(bp);
743 }
744 if (v_daddr == LFS_UNUSED_DADDR) {
745 blkp->bi_daddr = LFS_UNUSED_DADDR;
746 continue;
747 }
748 /*
749 * A regular call to VFS_VGET could deadlock
750 * here. Instead, we try an unlocked access.
751 */
752 vp = ufs_ihashlookup(ump->um_dev, blkp->bi_inode);
753 if (vp != NULL && !(vp->v_flag & VXLOCK)) {
754 ip = VTOI(vp);
755 if (lfs_vref(vp)) {
756 v_daddr = LFS_UNUSED_DADDR;
757 continue;
758 }
759 numrefed++;
760 } else {
761 error = VFS_VGET(mntp, blkp->bi_inode, &vp);
762 if (error) {
763 #ifdef DEBUG_LFS
764 printf("lfs_bmapv: vget of ino %d failed with %d",blkp->bi_inode,error);
765 #endif
766 v_daddr = LFS_UNUSED_DADDR;
767 continue;
768 } else {
769 KASSERT(VOP_ISLOCKED(vp));
770 VOP_UNLOCK(vp, 0);
771 numrefed++;
772 }
773 }
774 ip = VTOI(vp);
775 } else if (v_daddr == LFS_UNUSED_DADDR) {
776 /*
777 * This can only happen if the vnode is dead.
778 * Keep going. Note that we DO NOT set the
779 * bi_addr to anything -- if we failed to get
780 * the vnode, for example, we want to assume
781 * conservatively that all of its blocks *are*
782 * located in the segment in question.
783 * lfs_markv will throw them out if we are
784 * wrong.
785 */
786 /* blkp->bi_daddr = LFS_UNUSED_DADDR; */
787 continue;
788 }
789
790 /* Past this point we are guaranteed that vp, ip are valid. */
791
792 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
793 /*
794 * We just want the inode address, which is
795 * conveniently in v_daddr.
796 */
797 blkp->bi_daddr = v_daddr;
798 } else {
799 error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
800 &(blkp->bi_daddr), NULL);
801 if (error)
802 {
803 blkp->bi_daddr = LFS_UNUSED_DADDR;
804 continue;
805 }
806 blkp->bi_daddr = dbtofsb(fs, blkp->bi_daddr);
807 /* Fill in the block size, too */
808 if (blkp->bi_lbn >= 0)
809 blkp->bi_size = blksize(fs, ip, blkp->bi_lbn);
810 else
811 blkp->bi_size = fs->lfs_bsize;
812 }
813 }
814
815 /*
816 * Finish the old file, if there was one. The presence
817 * of a usable vnode in vp is signaled by a valid v_daddr.
818 */
819 if (v_daddr != LFS_UNUSED_DADDR) {
820 lfs_vunref(vp);
821 numrefed--;
822 }
823
824 #ifdef DEBUG_LFS
825 if (numrefed != 0) {
826 panic("lfs_bmapv: numrefed=%d", numrefed);
827 }
828 #endif
829
830 vfs_unbusy(mntp);
831
832 return 0;
833 }
834
835 /*
836 * sys_lfs_segclean:
837 *
838 * Mark the segment clean.
839 *
840 * 0 on success
841 * -1/errno is return on error.
842 */
843 int
844 sys_lfs_segclean(struct proc *p, void *v, register_t *retval)
845 {
846 struct sys_lfs_segclean_args /* {
847 syscallarg(fsid_t *) fsidp;
848 syscallarg(u_long) segment;
849 } */ *uap = v;
850 CLEANERINFO *cip;
851 SEGUSE *sup;
852 struct buf *bp;
853 struct mount *mntp;
854 struct lfs *fs;
855 fsid_t fsid;
856 int error;
857 unsigned long segnum;
858
859 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
860 return (error);
861
862 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
863 return (error);
864 if ((mntp = vfs_getvfs(&fsid)) == NULL)
865 return (ENOENT);
866
867 fs = VFSTOUFS(mntp)->um_lfs;
868 segnum = SCARG(uap, segment);
869
870 if (dtosn(fs, fs->lfs_curseg) == segnum)
871 return (EBUSY);
872
873 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
874 return (error);
875 #ifdef LFS_AGGRESSIVE_SEGLOCK
876 lfs_seglock(fs, SEGM_PROT);
877 #endif
878 LFS_SEGENTRY(sup, fs, segnum, bp);
879 if (sup->su_nbytes) {
880 printf("lfs_segclean: not cleaning segment %lu: %d live bytes\n",
881 segnum, sup->su_nbytes);
882 brelse(bp);
883 #ifdef LFS_AGGRESSIVE_SEGLOCK
884 lfs_segunlock(fs);
885 #endif
886 vfs_unbusy(mntp);
887 return (EBUSY);
888 }
889 if (sup->su_flags & SEGUSE_ACTIVE) {
890 brelse(bp);
891 #ifdef LFS_AGGRESSIVE_SEGLOCK
892 lfs_segunlock(fs);
893 #endif
894 vfs_unbusy(mntp);
895 return (EBUSY);
896 }
897 if (!(sup->su_flags & SEGUSE_DIRTY)) {
898 brelse(bp);
899 #ifdef LFS_AGGRESSIVE_SEGLOCK
900 lfs_segunlock(fs);
901 #endif
902 vfs_unbusy(mntp);
903 return (EALREADY);
904 }
905
906 fs->lfs_avail += segtod(fs, 1);
907 if (sup->su_flags & SEGUSE_SUPERBLOCK)
908 fs->lfs_avail -= btofsb(fs, LFS_SBPAD);
909 if (fs->lfs_version > 1 && segnum == 0 &&
910 fs->lfs_start < btofsb(fs, LFS_LABELPAD))
911 fs->lfs_avail -= btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
912 fs->lfs_bfree += sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
913 btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
914 fs->lfs_dmeta -= sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
915 btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
916 if (fs->lfs_dmeta < 0)
917 fs->lfs_dmeta = 0;
918 sup->su_flags &= ~SEGUSE_DIRTY;
919 (void) LFS_BWRITE_LOG(bp);
920
921 LFS_CLEANERINFO(cip, fs, bp);
922 ++cip->clean;
923 --cip->dirty;
924 fs->lfs_nclean = cip->clean;
925 cip->bfree = fs->lfs_bfree;
926 cip->avail = fs->lfs_avail - fs->lfs_ravail;
927 (void) LFS_BWRITE_LOG(bp);
928 wakeup(&fs->lfs_avail);
929 #ifdef LFS_AGGRESSIVE_SEGLOCK
930 lfs_segunlock(fs);
931 #endif
932 vfs_unbusy(mntp);
933
934 return (0);
935 }
936
937 /*
938 * sys_lfs_segwait:
939 *
940 * This will block until a segment in file system fsid is written. A timeout
941 * in milliseconds may be specified which will awake the cleaner automatically.
942 * An fsid of -1 means any file system, and a timeout of 0 means forever.
943 *
944 * 0 on success
945 * 1 on timeout
946 * -1/errno is return on error.
947 */
948 int
949 sys_lfs_segwait(struct proc *p, void *v, register_t *retval)
950 {
951 struct sys_lfs_segwait_args /* {
952 syscallarg(fsid_t *) fsidp;
953 syscallarg(struct timeval *) tv;
954 } */ *uap = v;
955 struct mount *mntp;
956 struct timeval atv;
957 fsid_t fsid;
958 void *addr;
959 u_long timeout;
960 int error, s;
961
962 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) {
963 return (error);
964 }
965 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
966 return (error);
967 if ((mntp = vfs_getvfs(&fsid)) == NULL)
968 addr = &lfs_allclean_wakeup;
969 else
970 addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
971
972 if (SCARG(uap, tv)) {
973 error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
974 if (error)
975 return (error);
976 if (itimerfix(&atv))
977 return (EINVAL);
978 /*
979 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
980 * XXX IS THAT WHAT IS INTENDED?
981 */
982 s = splclock();
983 timeradd(&atv, &time, &atv);
984 timeout = hzto(&atv);
985 splx(s);
986 } else
987 timeout = 0;
988
989 error = tsleep(addr, PCATCH | PUSER, "segment", timeout);
990 return (error == ERESTART ? EINTR : 0);
991 }
992
993 /*
994 * VFS_VGET call specialized for the cleaner. The cleaner already knows the
995 * daddr from the ifile, so don't look it up again. If the cleaner is
996 * processing IINFO structures, it may have the ondisk inode already, so
997 * don't go retrieving it again.
998 *
999 * we lfs_vref, and it is the caller's responsibility to lfs_vunref
1000 * when finished.
1001 */
1002 extern struct lock ufs_hashlock;
1003
1004 int
1005 lfs_fasthashget(dev_t dev, ino_t ino, struct vnode **vpp)
1006 {
1007 struct inode *ip;
1008
1009 /*
1010 * This is playing fast and loose. Someone may have the inode
1011 * locked, in which case they are going to be distinctly unhappy
1012 * if we trash something.
1013 */
1014 if ((*vpp = ufs_ihashlookup(dev, ino)) != NULL) {
1015 if ((*vpp)->v_flag & VXLOCK) {
1016 printf("lfs_fastvget: vnode VXLOCKed for ino %d\n",
1017 ino);
1018 clean_vnlocked++;
1019 #ifdef LFS_EAGAIN_FAIL
1020 return EAGAIN;
1021 #endif
1022 }
1023 ip = VTOI(*vpp);
1024 if (lfs_vref(*vpp)) {
1025 clean_inlocked++;
1026 return EAGAIN;
1027 }
1028 } else
1029 *vpp = NULL;
1030
1031 return (0);
1032 }
1033
1034 int
1035 lfs_fastvget(struct mount *mp, ino_t ino, ufs_daddr_t daddr, struct vnode **vpp, struct dinode *dinp)
1036 {
1037 struct inode *ip;
1038 struct dinode *dip;
1039 struct vnode *vp;
1040 struct ufsmount *ump;
1041 dev_t dev;
1042 int i, error, retries;
1043 struct buf *bp;
1044 struct lfs *fs;
1045
1046 ump = VFSTOUFS(mp);
1047 dev = ump->um_dev;
1048 fs = ump->um_lfs;
1049
1050 /*
1051 * Wait until the filesystem is fully mounted before allowing vget
1052 * to complete. This prevents possible problems with roll-forward.
1053 */
1054 while (fs->lfs_flags & LFS_NOTYET) {
1055 tsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0);
1056 }
1057 /*
1058 * This is playing fast and loose. Someone may have the inode
1059 * locked, in which case they are going to be distinctly unhappy
1060 * if we trash something.
1061 */
1062
1063 error = lfs_fasthashget(dev, ino, vpp);
1064 if (error != 0 || *vpp != NULL)
1065 return (error);
1066
1067 if ((error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, &vp)) != 0) {
1068 *vpp = NULL;
1069 return (error);
1070 }
1071
1072 do {
1073 error = lfs_fasthashget(dev, ino, vpp);
1074 if (error != 0 || *vpp != NULL) {
1075 ungetnewvnode(vp);
1076 return (error);
1077 }
1078 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1079
1080 /* Allocate new vnode/inode. */
1081 lfs_vcreate(mp, ino, vp);
1082
1083 /*
1084 * Put it onto its hash chain and lock it so that other requests for
1085 * this inode will block if they arrive while we are sleeping waiting
1086 * for old data structures to be purged or for the contents of the
1087 * disk portion of this inode to be read.
1088 */
1089 ip = VTOI(vp);
1090 ufs_ihashins(ip);
1091 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1092
1093 /*
1094 * XXX
1095 * This may not need to be here, logically it should go down with
1096 * the i_devvp initialization.
1097 * Ask Kirk.
1098 */
1099 ip->i_lfs = fs;
1100
1101 /* Read in the disk contents for the inode, copy into the inode. */
1102 if (dinp) {
1103 error = copyin(dinp, &ip->i_din.ffs_din, DINODE_SIZE);
1104 if (error) {
1105 printf("lfs_fastvget: dinode copyin failed for ino %d\n", ino);
1106 ufs_ihashrem(ip);
1107
1108 /* Unlock and discard unneeded inode. */
1109 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1110 lfs_vunref(vp);
1111 *vpp = NULL;
1112 return (error);
1113 }
1114 if (ip->i_number != ino)
1115 panic("lfs_fastvget: I was fed the wrong inode!");
1116 } else {
1117 retries = 0;
1118 again:
1119 error = bread(ump->um_devvp, fsbtodb(fs, daddr), fs->lfs_ibsize,
1120 NOCRED, &bp);
1121 if (error) {
1122 printf("lfs_fastvget: bread failed with %d\n",error);
1123 /*
1124 * The inode does not contain anything useful, so it
1125 * would be misleading to leave it on its hash chain.
1126 * Iput() will return it to the free list.
1127 */
1128 ufs_ihashrem(ip);
1129
1130 /* Unlock and discard unneeded inode. */
1131 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1132 lfs_vunref(vp);
1133 brelse(bp);
1134 *vpp = NULL;
1135 return (error);
1136 }
1137 dip = lfs_ifind(ump->um_lfs, ino, bp);
1138 if (dip == NULL) {
1139 /* Assume write has not completed yet; try again */
1140 bp->b_flags |= B_INVAL;
1141 brelse(bp);
1142 ++retries;
1143 if (retries > LFS_IFIND_RETRIES)
1144 panic("lfs_fastvget: dinode not found");
1145 printf("lfs_fastvget: dinode not found, retrying...\n");
1146 goto again;
1147 }
1148 ip->i_din.ffs_din = *dip;
1149 brelse(bp);
1150 }
1151 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1152 ip->i_lfs_effnblks = ip->i_ffs_blocks;
1153 ip->i_lfs_osize = ip->i_ffs_size;
1154
1155 memset(ip->i_lfs_fragsize, 0, NDADDR * sizeof(*ip->i_lfs_fragsize));
1156 for (i = 0; i < NDADDR; i++)
1157 if (ip->i_ffs_db[i] != 0)
1158 ip->i_lfs_fragsize[i] = blksize(fs, ip, i);
1159
1160 /*
1161 * Initialize the vnode from the inode, check for aliases. In all
1162 * cases re-init ip, the underlying vnode/inode may have changed.
1163 */
1164 ufs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
1165 #ifdef DEBUG_LFS
1166 if (vp->v_type == VNON) {
1167 printf("lfs_fastvget: ino %d is type VNON! (ifmt=%o, dinp=%p)\n",
1168 ip->i_number, (ip->i_ffs_mode & IFMT) >> 12, dinp);
1169 lfs_dump_dinode(&ip->i_din.ffs_din);
1170 #ifdef DDB
1171 Debugger();
1172 #endif
1173 }
1174 #endif /* DEBUG_LFS */
1175 /*
1176 * Finish inode initialization now that aliasing has been resolved.
1177 */
1178
1179 genfs_node_init(vp, &lfs_genfsops);
1180 ip->i_devvp = ump->um_devvp;
1181 VREF(ip->i_devvp);
1182 *vpp = vp;
1183 KASSERT(VOP_ISLOCKED(vp));
1184 VOP_UNLOCK(vp, 0);
1185
1186 uvm_vnp_setsize(vp, ip->i_ffs_size);
1187
1188 return (0);
1189 }
1190
1191 static void
1192 lfs_fakebuf_iodone(struct buf *bp)
1193 {
1194 struct buf *obp = bp->b_saveaddr;
1195
1196 if (!(obp->b_flags & (B_DELWRI | B_DONE)))
1197 obp->b_flags |= B_INVAL;
1198 brelse(obp);
1199 lfs_callback(bp);
1200 }
1201
1202 struct buf *
1203 lfs_fakebuf(struct lfs *fs, struct vnode *vp, int lbn, size_t size, caddr_t uaddr)
1204 {
1205 struct buf *bp;
1206 int error;
1207
1208 struct buf *obp;
1209
1210 KASSERT(VTOI(vp)->i_number != LFS_IFILE_INUM);
1211
1212 /*
1213 * make corresponding buffer busy to avoid
1214 * reading blocks that isn't written yet.
1215 * it's needed because we'll update metadatas in lfs_updatemeta
1216 * before data pointed by them is actually written to disk.
1217 * XXX no need to allocbuf.
1218 */
1219 obp = getblk(vp, lbn, size, 0, 0);
1220 if (obp == NULL)
1221 panic("lfs_fakebuf: getblk failed");
1222
1223 #ifndef ALLOW_VFLUSH_CORRUPTION
1224 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size);
1225 error = copyin(uaddr, bp->b_data, size);
1226 if (error) {
1227 lfs_freebuf(bp);
1228 return NULL;
1229 }
1230 bp->b_saveaddr = obp;
1231 KDASSERT(bp->b_iodone == lfs_callback);
1232 bp->b_iodone = lfs_fakebuf_iodone;
1233
1234 #ifdef DIAGNOSTIC
1235 if (obp->b_flags & B_GATHERED)
1236 panic("lfs_fakebuf: gathered bp: %p, ino=%u, lbn=%d",
1237 bp, VTOI(vp)->i_number, lbn);
1238 #endif
1239 #else
1240 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, 0);
1241 bp->b_flags |= B_INVAL;
1242 bp->b_saveaddr = uaddr;
1243 #endif
1244 #if 0
1245 bp->b_saveaddr = (caddr_t)fs;
1246 ++fs->lfs_iocount;
1247 #endif
1248 bp->b_bufsize = size;
1249 bp->b_bcount = size;
1250 return (bp);
1251 }
1252