lfs_syscalls.c revision 1.74 1 /* $NetBSD: lfs_syscalls.c,v 1.74 2002/12/17 14:37:49 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*-
39 * Copyright (c) 1991, 1993, 1994
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.74 2002/12/17 14:37:49 yamt Exp $");
75
76 #define LFS /* for prototypes in syscallargs.h */
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/proc.h>
81 #include <sys/buf.h>
82 #include <sys/mount.h>
83 #include <sys/vnode.h>
84 #include <sys/malloc.h>
85 #include <sys/kernel.h>
86
87 #include <sys/syscallargs.h>
88
89 #include <ufs/ufs/inode.h>
90 #include <ufs/ufs/ufsmount.h>
91 #include <ufs/ufs/ufs_extern.h>
92
93 #include <ufs/lfs/lfs.h>
94 #include <ufs/lfs/lfs_extern.h>
95
96 /* Max block count for lfs_markv() */
97 #define MARKV_MAXBLKCNT 65536
98
99 struct buf *lfs_fakebuf(struct lfs *, struct vnode *, int, size_t, caddr_t);
100 int lfs_fasthashget(dev_t, ino_t, struct vnode **);
101
102 int debug_cleaner = 0;
103 int clean_vnlocked = 0;
104 int clean_inlocked = 0;
105 int verbose_debug = 0;
106
107 pid_t lfs_cleaner_pid = 0;
108
109 /*
110 * Definitions for the buffer free lists.
111 */
112 #define BQUEUES 4 /* number of free buffer queues */
113
114 #define BQ_LOCKED 0 /* super-blocks &c */
115 #define BQ_LRU 1 /* lru, useful buffers */
116 #define BQ_AGE 2 /* rubbish */
117 #define BQ_EMPTY 3 /* buffer headers with no memory */
118
119 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
120
121 #define LFS_FORCE_WRITE UNASSIGNED
122
123 #define LFS_VREF_THRESHOLD 128
124
125 static int lfs_bmapv(struct proc *, fsid_t *, BLOCK_INFO *, int);
126 static int lfs_markv(struct proc *, fsid_t *, BLOCK_INFO *, int);
127 static void lfs_fakebuf_iodone(struct buf *);
128
129 /*
130 * sys_lfs_markv:
131 *
132 * This will mark inodes and blocks dirty, so they are written into the log.
133 * It will block until all the blocks have been written. The segment create
134 * time passed in the block_info and inode_info structures is used to decide
135 * if the data is valid for each block (in case some process dirtied a block
136 * or inode that is being cleaned between the determination that a block is
137 * live and the lfs_markv call).
138 *
139 * 0 on success
140 * -1/errno is return on error.
141 */
142 #ifdef USE_64BIT_SYSCALLS
143 int
144 sys_lfs_markv(struct proc *p, void *v, register_t *retval)
145 {
146 struct sys_lfs_markv_args /* {
147 syscallarg(fsid_t *) fsidp;
148 syscallarg(struct block_info *) blkiov;
149 syscallarg(int) blkcnt;
150 } */ *uap = v;
151 BLOCK_INFO *blkiov;
152 int blkcnt, error;
153 fsid_t fsid;
154
155 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
156 return (error);
157
158 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
159 return (error);
160
161 blkcnt = SCARG(uap, blkcnt);
162 if ((u_int) blkcnt > MARKV_MAXBLKCNT)
163 return (EINVAL);
164
165 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
166 if ((error = copyin(SCARG(uap, blkiov), blkiov,
167 blkcnt * sizeof(BLOCK_INFO))) != 0)
168 goto out;
169
170 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0)
171 copyout(blkiov, SCARG(uap, blkiov),
172 blkcnt * sizeof(BLOCK_INFO));
173 out:
174 free(blkiov, M_SEGMENT);
175 return error;
176 }
177 #else
178 int
179 sys_lfs_markv(struct proc *p, void *v, register_t *retval)
180 {
181 struct sys_lfs_markv_args /* {
182 syscallarg(fsid_t *) fsidp;
183 syscallarg(struct block_info *) blkiov;
184 syscallarg(int) blkcnt;
185 } */ *uap = v;
186 BLOCK_INFO *blkiov;
187 BLOCK_INFO_15 *blkiov15;
188 int i, blkcnt, error;
189 fsid_t fsid;
190
191 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
192 return (error);
193
194 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
195 return (error);
196
197 blkcnt = SCARG(uap, blkcnt);
198 if ((u_int) blkcnt > MARKV_MAXBLKCNT)
199 return (EINVAL);
200
201 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
202 blkiov15 = malloc(blkcnt * sizeof(BLOCK_INFO_15), M_SEGMENT, M_WAITOK);
203 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
204 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
205 goto out;
206
207 for (i = 0; i < blkcnt; i++) {
208 blkiov[i].bi_inode = blkiov15[i].bi_inode;
209 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
210 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
211 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
212 blkiov[i].bi_version = blkiov15[i].bi_version;
213 blkiov[i].bi_bp = blkiov15[i].bi_bp;
214 blkiov[i].bi_size = blkiov15[i].bi_size;
215 }
216
217 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0) {
218 for (i = 0; i < blkcnt; i++) {
219 blkiov15[i].bi_inode = blkiov[i].bi_inode;
220 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
221 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
222 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
223 blkiov15[i].bi_version = blkiov[i].bi_version;
224 blkiov15[i].bi_bp = blkiov[i].bi_bp;
225 blkiov15[i].bi_size = blkiov[i].bi_size;
226 }
227 copyout(blkiov15, SCARG(uap, blkiov),
228 blkcnt * sizeof(BLOCK_INFO_15));
229 }
230 out:
231 free(blkiov, M_SEGMENT);
232 free(blkiov15, M_SEGMENT);
233 return error;
234 }
235 #endif
236
237 static int
238 lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
239 {
240 BLOCK_INFO *blkp;
241 IFILE *ifp;
242 struct buf *bp, *nbp;
243 struct inode *ip = NULL;
244 struct lfs *fs;
245 struct mount *mntp;
246 struct vnode *vp;
247 #ifdef DEBUG_LFS
248 int vputc = 0, iwritten = 0;
249 #endif
250 ino_t lastino;
251 ufs_daddr_t b_daddr, v_daddr;
252 int cnt, error;
253 int do_again = 0;
254 int s;
255 #ifdef CHECK_COPYIN
256 int i;
257 #endif /* CHECK_COPYIN */
258 int numrefed = 0;
259 ino_t maxino;
260 size_t obsize;
261
262 if ((mntp = vfs_getvfs(fsidp)) == NULL)
263 return (ENOENT);
264
265 fs = VFSTOUFS(mntp)->um_lfs;
266 maxino = (fragstoblks(fs, fsbtofrags(fs, VTOI(fs->lfs_ivnode)->i_ffs_blocks)) -
267 fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb;
268
269 cnt = blkcnt;
270
271 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
272 return (error);
273
274 /*
275 * This seglock is just to prevent the fact that we might have to sleep
276 * from allowing the possibility that our blocks might become
277 * invalid.
278 *
279 * It is also important to note here that unless we specify SEGM_CKP,
280 * any Ifile blocks that we might be asked to clean will never get
281 * to the disk.
282 */
283 lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
284
285 /* Mark blocks/inodes dirty. */
286 error = 0;
287
288 #ifdef DEBUG_LFS
289 /* Run through and count the inodes */
290 lastino = LFS_UNUSED_INUM;
291 for (blkp = blkiov; cnt--; ++blkp) {
292 if (lastino != blkp->bi_inode) {
293 lastino = blkp->bi_inode;
294 vputc++;
295 }
296 }
297 cnt = blkcnt;
298 printf("[%d/",vputc);
299 iwritten = 0;
300 #endif /* DEBUG_LFS */
301 /* these were inside the initialization for the for loop */
302 v_daddr = LFS_UNUSED_DADDR;
303 lastino = LFS_UNUSED_INUM;
304 for (blkp = blkiov; cnt--; ++blkp)
305 {
306 if (blkp->bi_daddr == LFS_FORCE_WRITE)
307 printf("lfs_markv: warning: force-writing ino %d lbn %d\n",
308 blkp->bi_inode, blkp->bi_lbn);
309 /* Bounds-check incoming data, avoid panic for failed VGET */
310 if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
311 error = EINVAL;
312 goto again;
313 }
314 /*
315 * Get the IFILE entry (only once) and see if the file still
316 * exists.
317 */
318 if (lastino != blkp->bi_inode) {
319 /*
320 * Finish the old file, if there was one. The presence
321 * of a usable vnode in vp is signaled by a valid v_daddr.
322 */
323 if (v_daddr != LFS_UNUSED_DADDR) {
324 #ifdef DEBUG_LFS
325 if (ip->i_flag & (IN_MODIFIED|IN_CLEANING))
326 iwritten++;
327 #endif
328 lfs_vunref(vp);
329 numrefed--;
330 }
331
332 /*
333 * Start a new file
334 */
335 lastino = blkp->bi_inode;
336 if (blkp->bi_inode == LFS_IFILE_INUM)
337 v_daddr = fs->lfs_idaddr;
338 else {
339 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
340 /* XXX fix for force write */
341 v_daddr = ifp->if_daddr;
342 brelse(bp);
343 }
344 /* Don't force-write the ifile */
345 if (blkp->bi_inode == LFS_IFILE_INUM
346 && blkp->bi_daddr == LFS_FORCE_WRITE)
347 {
348 continue;
349 }
350 if (v_daddr == LFS_UNUSED_DADDR
351 && blkp->bi_daddr != LFS_FORCE_WRITE)
352 {
353 continue;
354 }
355
356 /* Get the vnode/inode. */
357 error = lfs_fastvget(mntp, blkp->bi_inode, v_daddr,
358 &vp,
359 (blkp->bi_lbn == LFS_UNUSED_LBN
360 ? blkp->bi_bp
361 : NULL));
362
363 if (!error) {
364 numrefed++;
365 }
366 if (error) {
367 #ifdef DEBUG_LFS
368 printf("lfs_markv: lfs_fastvget failed with %d (ino %d, segment %d)\n",
369 error, blkp->bi_inode,
370 dtosn(fs, blkp->bi_daddr));
371 #endif /* DEBUG_LFS */
372 /*
373 * If we got EAGAIN, that means that the
374 * Inode was locked. This is
375 * recoverable: just clean the rest of
376 * this segment, and let the cleaner try
377 * again with another. (When the
378 * cleaner runs again, this segment will
379 * sort high on the list, since it is
380 * now almost entirely empty.) But, we
381 * still set v_daddr = LFS_UNUSED_ADDR
382 * so as not to test this over and over
383 * again.
384 */
385 if (error == EAGAIN) {
386 error = 0;
387 do_again++;
388 }
389 #ifdef DIAGNOSTIC
390 else if (error != ENOENT)
391 panic("lfs_markv VFS_VGET FAILED");
392 #endif
393 /* lastino = LFS_UNUSED_INUM; */
394 v_daddr = LFS_UNUSED_DADDR;
395 vp = NULL;
396 ip = NULL;
397 continue;
398 }
399 ip = VTOI(vp);
400 } else if (v_daddr == LFS_UNUSED_DADDR) {
401 /*
402 * This can only happen if the vnode is dead (or
403 * in any case we can't get it...e.g., it is
404 * inlocked). Keep going.
405 */
406 continue;
407 }
408
409 /* Past this point we are guaranteed that vp, ip are valid. */
410
411 /* If this BLOCK_INFO didn't contain a block, keep going. */
412 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
413 /* XXX need to make sure that the inode gets written in this case */
414 /* XXX but only write the inode if it's the right one */
415 if (blkp->bi_inode != LFS_IFILE_INUM) {
416 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
417 if (ifp->if_daddr == blkp->bi_daddr
418 || blkp->bi_daddr == LFS_FORCE_WRITE)
419 {
420 LFS_SET_UINO(ip, IN_CLEANING);
421 }
422 brelse(bp);
423 }
424 continue;
425 }
426
427 b_daddr = 0;
428 if (blkp->bi_daddr != LFS_FORCE_WRITE) {
429 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
430 dbtofsb(fs, b_daddr) != blkp->bi_daddr)
431 {
432 if (dtosn(fs,dbtofsb(fs, b_daddr))
433 == dtosn(fs,blkp->bi_daddr))
434 {
435 printf("lfs_markv: wrong da same seg: %x vs %x\n",
436 blkp->bi_daddr, dbtofsb(fs, b_daddr));
437 }
438 do_again++;
439 continue;
440 }
441 }
442
443 /*
444 * Check block sizes. The blocks being cleaned come from
445 * disk, so they should have the same size as their on-disk
446 * counterparts.
447 */
448 if (blkp->bi_lbn >= 0)
449 obsize = blksize(fs, ip, blkp->bi_lbn);
450 else
451 obsize = fs->lfs_bsize;
452 /* Check for fragment size change */
453 if (blkp->bi_lbn >= 0 && blkp->bi_lbn < NDADDR) {
454 obsize = ip->i_lfs_fragsize[blkp->bi_lbn];
455 }
456 if (obsize != blkp->bi_size) {
457 printf("lfs_markv: ino %d lbn %d wrong size (%ld != %d), try again\n",
458 blkp->bi_inode, blkp->bi_lbn,
459 (long) obsize, blkp->bi_size);
460 do_again++;
461 continue;
462 }
463
464 /*
465 * If we get to here, then we are keeping the block. If
466 * it is an indirect block, we want to actually put it
467 * in the buffer cache so that it can be updated in the
468 * finish_meta section. If it's not, we need to
469 * allocate a fake buffer so that writeseg can perform
470 * the copyin and write the buffer.
471 */
472 if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
473 /* Data Block */
474 bp = lfs_fakebuf(fs, vp, blkp->bi_lbn,
475 blkp->bi_size, blkp->bi_bp);
476 /* Pretend we used bread() to get it */
477 bp->b_blkno = fsbtodb(fs, blkp->bi_daddr);
478 } else {
479 /* Indirect block */
480 if (blkp->bi_size != fs->lfs_bsize)
481 panic("lfs_markv: partial indirect block?"
482 " size=%d\n", blkp->bi_size);
483 bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
484 if (!(bp->b_flags & (B_DONE|B_DELWRI))) { /* B_CACHE */
485 /*
486 * The block in question was not found
487 * in the cache; i.e., the block that
488 * getblk() returned is empty. So, we
489 * can (and should) copy in the
490 * contents, because we've already
491 * determined that this was the right
492 * version of this block on disk.
493 *
494 * And, it can't have changed underneath
495 * us, because we have the segment lock.
496 */
497 error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
498 if (error)
499 goto err2;
500 }
501 }
502 if ((error = lfs_bwrite_ext(bp,BW_CLEAN)) != 0)
503 goto err2;
504 }
505
506 /*
507 * Finish the old file, if there was one
508 */
509 if (v_daddr != LFS_UNUSED_DADDR) {
510 #ifdef DEBUG_LFS
511 if (ip->i_flag & (IN_MODIFIED|IN_CLEANING))
512 iwritten++;
513 #endif
514 lfs_vunref(vp);
515 numrefed--;
516 }
517
518 #ifdef DEBUG_LFS
519 printf("%d]",iwritten);
520 if (numrefed != 0) {
521 panic("lfs_markv: numrefed=%d", numrefed);
522 }
523 #endif
524
525 /*
526 * The last write has to be SEGM_SYNC, because of calling semantics.
527 * It also has to be SEGM_CKP, because otherwise we could write
528 * over the newly cleaned data contained in a checkpoint, and then
529 * we'd be unhappy at recovery time.
530 */
531 lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
532
533 lfs_segunlock(fs);
534
535 vfs_unbusy(mntp);
536 if (error)
537 return (error);
538 else if (do_again)
539 return EAGAIN;
540
541 return 0;
542
543 err2:
544 printf("lfs_markv err2\n");
545 lfs_vunref(vp);
546 --numrefed;
547
548 /* Free up fakebuffers -- have to take these from the LOCKED list */
549 again:
550 s = splbio();
551 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; bp = nbp) {
552 nbp = bp->b_freelist.tqe_next;
553 if (bp->b_flags & B_CALL) {
554 if (bp->b_flags & B_BUSY) { /* not bloody likely */
555 bp->b_flags |= B_WANTED;
556 tsleep(bp, PRIBIO+1, "markv", 0);
557 splx(s);
558 goto again;
559 }
560 if (bp->b_flags & B_DELWRI)
561 fs->lfs_avail += btofsb(fs, bp->b_bcount);
562 bremfree(bp);
563 splx(s);
564 brelse(bp);
565 s = splbio();
566 }
567 }
568 splx(s);
569 lfs_segunlock(fs);
570 vfs_unbusy(mntp);
571 #ifdef DEBUG_LFS
572 if (numrefed != 0) {
573 panic("lfs_markv: numrefed=%d", numrefed);
574 }
575 #endif
576
577 return (error);
578 }
579
580 /*
581 * sys_lfs_bmapv:
582 *
583 * This will fill in the current disk address for arrays of blocks.
584 *
585 * 0 on success
586 * -1/errno is return on error.
587 */
588 #ifdef USE_64BIT_SYSCALLS
589 int
590 sys_lfs_bmapv(struct proc *p, void *v, register_t *retval)
591 {
592 struct sys_lfs_bmapv_args /* {
593 syscallarg(fsid_t *) fsidp;
594 syscallarg(struct block_info *) blkiov;
595 syscallarg(int) blkcnt;
596 } */ *uap = v;
597 BLOCK_INFO *blkiov;
598 int blkcnt, error;
599 fsid_t fsid;
600
601 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
602 return (error);
603
604 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
605 return (error);
606
607 blkcnt = SCARG(uap, blkcnt);
608 if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
609 return (EINVAL);
610 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
611 if ((error = copyin(SCARG(uap, blkiov), blkiov,
612 blkcnt * sizeof(BLOCK_INFO))) != 0)
613 goto out;
614
615 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0)
616 copyout(blkiov, SCARG(uap, blkiov),
617 blkcnt * sizeof(BLOCK_INFO));
618 out:
619 free(blkiov, M_SEGMENT);
620 return error;
621 }
622 #else
623 int
624 sys_lfs_bmapv(struct proc *p, void *v, register_t *retval)
625 {
626 struct sys_lfs_bmapv_args /* {
627 syscallarg(fsid_t *) fsidp;
628 syscallarg(struct block_info *) blkiov;
629 syscallarg(int) blkcnt;
630 } */ *uap = v;
631 BLOCK_INFO *blkiov;
632 BLOCK_INFO_15 *blkiov15;
633 int i, blkcnt, error;
634 fsid_t fsid;
635
636 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
637 return (error);
638
639 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
640 return (error);
641
642 blkcnt = SCARG(uap, blkcnt);
643 if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
644 return (EINVAL);
645 blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
646 blkiov15 = malloc(blkcnt * sizeof(BLOCK_INFO_15), M_SEGMENT, M_WAITOK);
647 if ((error = copyin(SCARG(uap, blkiov), blkiov15,
648 blkcnt * sizeof(BLOCK_INFO_15))) != 0)
649 goto out;
650
651 for (i = 0; i < blkcnt; i++) {
652 blkiov[i].bi_inode = blkiov15[i].bi_inode;
653 blkiov[i].bi_lbn = blkiov15[i].bi_lbn;
654 blkiov[i].bi_daddr = blkiov15[i].bi_daddr;
655 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
656 blkiov[i].bi_version = blkiov15[i].bi_version;
657 blkiov[i].bi_bp = blkiov15[i].bi_bp;
658 blkiov[i].bi_size = blkiov15[i].bi_size;
659 }
660
661 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0) {
662 for (i = 0; i < blkcnt; i++) {
663 blkiov15[i].bi_inode = blkiov[i].bi_inode;
664 blkiov15[i].bi_lbn = blkiov[i].bi_lbn;
665 blkiov15[i].bi_daddr = blkiov[i].bi_daddr;
666 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
667 blkiov15[i].bi_version = blkiov[i].bi_version;
668 blkiov15[i].bi_bp = blkiov[i].bi_bp;
669 blkiov15[i].bi_size = blkiov[i].bi_size;
670 }
671 copyout(blkiov15, SCARG(uap, blkiov),
672 blkcnt * sizeof(BLOCK_INFO_15));
673 }
674 out:
675 free(blkiov, M_SEGMENT);
676 free(blkiov15, M_SEGMENT);
677 return error;
678 }
679 #endif
680
681 static int
682 lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
683 {
684 BLOCK_INFO *blkp;
685 IFILE *ifp;
686 struct buf *bp;
687 struct inode *ip = NULL;
688 struct lfs *fs;
689 struct mount *mntp;
690 struct ufsmount *ump;
691 struct vnode *vp;
692 ino_t lastino;
693 ufs_daddr_t v_daddr;
694 int cnt, error;
695 int numrefed = 0;
696
697 lfs_cleaner_pid = p->p_pid;
698
699 if ((mntp = vfs_getvfs(fsidp)) == NULL)
700 return (ENOENT);
701
702 ump = VFSTOUFS(mntp);
703 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
704 return (error);
705
706 cnt = blkcnt;
707
708 fs = VFSTOUFS(mntp)->um_lfs;
709
710 error = 0;
711
712 /* these were inside the initialization for the for loop */
713 v_daddr = LFS_UNUSED_DADDR;
714 lastino = LFS_UNUSED_INUM;
715 for (blkp = blkiov; cnt--; ++blkp)
716 {
717 /*
718 * Get the IFILE entry (only once) and see if the file still
719 * exists.
720 */
721 if (lastino != blkp->bi_inode) {
722 /*
723 * Finish the old file, if there was one. The presence
724 * of a usable vnode in vp is signaled by a valid
725 * v_daddr.
726 */
727 if (v_daddr != LFS_UNUSED_DADDR) {
728 lfs_vunref(vp);
729 numrefed--;
730 }
731
732 /*
733 * Start a new file
734 */
735 lastino = blkp->bi_inode;
736 if (blkp->bi_inode == LFS_IFILE_INUM)
737 v_daddr = fs->lfs_idaddr;
738 else {
739 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
740 v_daddr = ifp->if_daddr;
741 brelse(bp);
742 }
743 if (v_daddr == LFS_UNUSED_DADDR) {
744 blkp->bi_daddr = LFS_UNUSED_DADDR;
745 continue;
746 }
747 /*
748 * A regular call to VFS_VGET could deadlock
749 * here. Instead, we try an unlocked access.
750 */
751 vp = ufs_ihashlookup(ump->um_dev, blkp->bi_inode);
752 if (vp != NULL && !(vp->v_flag & VXLOCK)) {
753 ip = VTOI(vp);
754 if (lfs_vref(vp)) {
755 v_daddr = LFS_UNUSED_DADDR;
756 continue;
757 }
758 numrefed++;
759 } else {
760 error = VFS_VGET(mntp, blkp->bi_inode, &vp);
761 if (error) {
762 #ifdef DEBUG_LFS
763 printf("lfs_bmapv: vget of ino %d failed with %d",blkp->bi_inode,error);
764 #endif
765 v_daddr = LFS_UNUSED_DADDR;
766 continue;
767 } else {
768 KASSERT(VOP_ISLOCKED(vp));
769 VOP_UNLOCK(vp, 0);
770 numrefed++;
771 }
772 }
773 ip = VTOI(vp);
774 } else if (v_daddr == LFS_UNUSED_DADDR) {
775 /*
776 * This can only happen if the vnode is dead.
777 * Keep going. Note that we DO NOT set the
778 * bi_addr to anything -- if we failed to get
779 * the vnode, for example, we want to assume
780 * conservatively that all of its blocks *are*
781 * located in the segment in question.
782 * lfs_markv will throw them out if we are
783 * wrong.
784 */
785 /* blkp->bi_daddr = LFS_UNUSED_DADDR; */
786 continue;
787 }
788
789 /* Past this point we are guaranteed that vp, ip are valid. */
790
791 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
792 /*
793 * We just want the inode address, which is
794 * conveniently in v_daddr.
795 */
796 blkp->bi_daddr = v_daddr;
797 } else {
798 error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
799 &(blkp->bi_daddr), NULL);
800 if (error)
801 {
802 blkp->bi_daddr = LFS_UNUSED_DADDR;
803 continue;
804 }
805 blkp->bi_daddr = dbtofsb(fs, blkp->bi_daddr);
806 /* Fill in the block size, too */
807 if (blkp->bi_lbn >= 0)
808 blkp->bi_size = blksize(fs, ip, blkp->bi_lbn);
809 else
810 blkp->bi_size = fs->lfs_bsize;
811 }
812 }
813
814 /*
815 * Finish the old file, if there was one. The presence
816 * of a usable vnode in vp is signaled by a valid v_daddr.
817 */
818 if (v_daddr != LFS_UNUSED_DADDR) {
819 lfs_vunref(vp);
820 numrefed--;
821 }
822
823 #ifdef DEBUG_LFS
824 if (numrefed != 0) {
825 panic("lfs_bmapv: numrefed=%d", numrefed);
826 }
827 #endif
828
829 vfs_unbusy(mntp);
830
831 return 0;
832 }
833
834 /*
835 * sys_lfs_segclean:
836 *
837 * Mark the segment clean.
838 *
839 * 0 on success
840 * -1/errno is return on error.
841 */
842 int
843 sys_lfs_segclean(struct proc *p, void *v, register_t *retval)
844 {
845 struct sys_lfs_segclean_args /* {
846 syscallarg(fsid_t *) fsidp;
847 syscallarg(u_long) segment;
848 } */ *uap = v;
849 CLEANERINFO *cip;
850 SEGUSE *sup;
851 struct buf *bp;
852 struct mount *mntp;
853 struct lfs *fs;
854 fsid_t fsid;
855 int error;
856 unsigned long segnum;
857
858 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
859 return (error);
860
861 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
862 return (error);
863 if ((mntp = vfs_getvfs(&fsid)) == NULL)
864 return (ENOENT);
865
866 fs = VFSTOUFS(mntp)->um_lfs;
867 segnum = SCARG(uap, segment);
868
869 if (dtosn(fs, fs->lfs_curseg) == segnum)
870 return (EBUSY);
871
872 if ((error = vfs_busy(mntp, LK_NOWAIT, NULL)) != 0)
873 return (error);
874 #ifdef LFS_AGGRESSIVE_SEGLOCK
875 lfs_seglock(fs, SEGM_PROT);
876 #endif
877 LFS_SEGENTRY(sup, fs, segnum, bp);
878 if (sup->su_nbytes) {
879 printf("lfs_segclean: not cleaning segment %lu: %d live bytes\n",
880 segnum, sup->su_nbytes);
881 brelse(bp);
882 #ifdef LFS_AGGRESSIVE_SEGLOCK
883 lfs_segunlock(fs);
884 #endif
885 vfs_unbusy(mntp);
886 return (EBUSY);
887 }
888 if (sup->su_flags & SEGUSE_ACTIVE) {
889 brelse(bp);
890 #ifdef LFS_AGGRESSIVE_SEGLOCK
891 lfs_segunlock(fs);
892 #endif
893 vfs_unbusy(mntp);
894 return (EBUSY);
895 }
896 if (!(sup->su_flags & SEGUSE_DIRTY)) {
897 brelse(bp);
898 #ifdef LFS_AGGRESSIVE_SEGLOCK
899 lfs_segunlock(fs);
900 #endif
901 vfs_unbusy(mntp);
902 return (EALREADY);
903 }
904
905 fs->lfs_avail += segtod(fs, 1);
906 if (sup->su_flags & SEGUSE_SUPERBLOCK)
907 fs->lfs_avail -= btofsb(fs, LFS_SBPAD);
908 if (fs->lfs_version > 1 && segnum == 0 &&
909 fs->lfs_start < btofsb(fs, LFS_LABELPAD))
910 fs->lfs_avail -= btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
911 fs->lfs_bfree += sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
912 btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
913 fs->lfs_dmeta -= sup->su_nsums * btofsb(fs, fs->lfs_sumsize) +
914 btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
915 if (fs->lfs_dmeta < 0)
916 fs->lfs_dmeta = 0;
917 sup->su_flags &= ~SEGUSE_DIRTY;
918 (void) LFS_BWRITE_LOG(bp);
919
920 LFS_CLEANERINFO(cip, fs, bp);
921 ++cip->clean;
922 --cip->dirty;
923 fs->lfs_nclean = cip->clean;
924 cip->bfree = fs->lfs_bfree;
925 cip->avail = fs->lfs_avail - fs->lfs_ravail;
926 (void) LFS_BWRITE_LOG(bp);
927 wakeup(&fs->lfs_avail);
928 #ifdef LFS_AGGRESSIVE_SEGLOCK
929 lfs_segunlock(fs);
930 #endif
931 vfs_unbusy(mntp);
932
933 return (0);
934 }
935
936 /*
937 * sys_lfs_segwait:
938 *
939 * This will block until a segment in file system fsid is written. A timeout
940 * in milliseconds may be specified which will awake the cleaner automatically.
941 * An fsid of -1 means any file system, and a timeout of 0 means forever.
942 *
943 * 0 on success
944 * 1 on timeout
945 * -1/errno is return on error.
946 */
947 int
948 sys_lfs_segwait(struct proc *p, void *v, register_t *retval)
949 {
950 struct sys_lfs_segwait_args /* {
951 syscallarg(fsid_t *) fsidp;
952 syscallarg(struct timeval *) tv;
953 } */ *uap = v;
954 struct mount *mntp;
955 struct timeval atv;
956 fsid_t fsid;
957 void *addr;
958 u_long timeout;
959 int error, s;
960
961 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) {
962 return (error);
963 }
964 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
965 return (error);
966 if ((mntp = vfs_getvfs(&fsid)) == NULL)
967 addr = &lfs_allclean_wakeup;
968 else
969 addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
970
971 if (SCARG(uap, tv)) {
972 error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
973 if (error)
974 return (error);
975 if (itimerfix(&atv))
976 return (EINVAL);
977 /*
978 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
979 * XXX IS THAT WHAT IS INTENDED?
980 */
981 s = splclock();
982 timeradd(&atv, &time, &atv);
983 timeout = hzto(&atv);
984 splx(s);
985 } else
986 timeout = 0;
987
988 error = tsleep(addr, PCATCH | PUSER, "segment", timeout);
989 return (error == ERESTART ? EINTR : 0);
990 }
991
992 /*
993 * VFS_VGET call specialized for the cleaner. The cleaner already knows the
994 * daddr from the ifile, so don't look it up again. If the cleaner is
995 * processing IINFO structures, it may have the ondisk inode already, so
996 * don't go retrieving it again.
997 *
998 * we lfs_vref, and it is the caller's responsibility to lfs_vunref
999 * when finished.
1000 */
1001 extern struct lock ufs_hashlock;
1002
1003 int
1004 lfs_fasthashget(dev_t dev, ino_t ino, struct vnode **vpp)
1005 {
1006 struct inode *ip;
1007
1008 /*
1009 * This is playing fast and loose. Someone may have the inode
1010 * locked, in which case they are going to be distinctly unhappy
1011 * if we trash something.
1012 */
1013 if ((*vpp = ufs_ihashlookup(dev, ino)) != NULL) {
1014 if ((*vpp)->v_flag & VXLOCK) {
1015 printf("lfs_fastvget: vnode VXLOCKed for ino %d\n",
1016 ino);
1017 clean_vnlocked++;
1018 #ifdef LFS_EAGAIN_FAIL
1019 return EAGAIN;
1020 #endif
1021 }
1022 ip = VTOI(*vpp);
1023 if (lfs_vref(*vpp)) {
1024 clean_inlocked++;
1025 return EAGAIN;
1026 }
1027 } else
1028 *vpp = NULL;
1029
1030 return (0);
1031 }
1032
1033 int
1034 lfs_fastvget(struct mount *mp, ino_t ino, ufs_daddr_t daddr, struct vnode **vpp, struct dinode *dinp)
1035 {
1036 struct inode *ip;
1037 struct dinode *dip;
1038 struct vnode *vp;
1039 struct ufsmount *ump;
1040 dev_t dev;
1041 int i, error, retries;
1042 struct buf *bp;
1043 struct lfs *fs;
1044
1045 ump = VFSTOUFS(mp);
1046 dev = ump->um_dev;
1047 fs = ump->um_lfs;
1048
1049 /*
1050 * Wait until the filesystem is fully mounted before allowing vget
1051 * to complete. This prevents possible problems with roll-forward.
1052 */
1053 while (fs->lfs_flags & LFS_NOTYET) {
1054 tsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0);
1055 }
1056 /*
1057 * This is playing fast and loose. Someone may have the inode
1058 * locked, in which case they are going to be distinctly unhappy
1059 * if we trash something.
1060 */
1061
1062 error = lfs_fasthashget(dev, ino, vpp);
1063 if (error != 0 || *vpp != NULL)
1064 return (error);
1065
1066 if ((error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, &vp)) != 0) {
1067 *vpp = NULL;
1068 return (error);
1069 }
1070
1071 do {
1072 error = lfs_fasthashget(dev, ino, vpp);
1073 if (error != 0 || *vpp != NULL) {
1074 ungetnewvnode(vp);
1075 return (error);
1076 }
1077 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1078
1079 /* Allocate new vnode/inode. */
1080 lfs_vcreate(mp, ino, vp);
1081
1082 /*
1083 * Put it onto its hash chain and lock it so that other requests for
1084 * this inode will block if they arrive while we are sleeping waiting
1085 * for old data structures to be purged or for the contents of the
1086 * disk portion of this inode to be read.
1087 */
1088 ip = VTOI(vp);
1089 ufs_ihashins(ip);
1090 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1091
1092 /*
1093 * XXX
1094 * This may not need to be here, logically it should go down with
1095 * the i_devvp initialization.
1096 * Ask Kirk.
1097 */
1098 ip->i_lfs = fs;
1099
1100 /* Read in the disk contents for the inode, copy into the inode. */
1101 if (dinp) {
1102 error = copyin(dinp, &ip->i_din.ffs_din, DINODE_SIZE);
1103 if (error) {
1104 printf("lfs_fastvget: dinode copyin failed for ino %d\n", ino);
1105 ufs_ihashrem(ip);
1106
1107 /* Unlock and discard unneeded inode. */
1108 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1109 lfs_vunref(vp);
1110 *vpp = NULL;
1111 return (error);
1112 }
1113 if (ip->i_number != ino)
1114 panic("lfs_fastvget: I was fed the wrong inode!");
1115 } else {
1116 retries = 0;
1117 again:
1118 error = bread(ump->um_devvp, fsbtodb(fs, daddr), fs->lfs_ibsize,
1119 NOCRED, &bp);
1120 if (error) {
1121 printf("lfs_fastvget: bread failed with %d\n",error);
1122 /*
1123 * The inode does not contain anything useful, so it
1124 * would be misleading to leave it on its hash chain.
1125 * Iput() will return it to the free list.
1126 */
1127 ufs_ihashrem(ip);
1128
1129 /* Unlock and discard unneeded inode. */
1130 lockmgr(&vp->v_lock, LK_RELEASE, &vp->v_interlock);
1131 lfs_vunref(vp);
1132 brelse(bp);
1133 *vpp = NULL;
1134 return (error);
1135 }
1136 dip = lfs_ifind(ump->um_lfs, ino, bp);
1137 if (dip == NULL) {
1138 /* Assume write has not completed yet; try again */
1139 bp->b_flags |= B_INVAL;
1140 brelse(bp);
1141 ++retries;
1142 if (retries > LFS_IFIND_RETRIES)
1143 panic("lfs_fastvget: dinode not found");
1144 printf("lfs_fastvget: dinode not found, retrying...\n");
1145 goto again;
1146 }
1147 ip->i_din.ffs_din = *dip;
1148 brelse(bp);
1149 }
1150 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1151 ip->i_lfs_effnblks = ip->i_ffs_blocks;
1152 ip->i_lfs_osize = ip->i_ffs_size;
1153
1154 memset(ip->i_lfs_fragsize, 0, NDADDR * sizeof(*ip->i_lfs_fragsize));
1155 for (i = 0; i < NDADDR; i++)
1156 if (ip->i_ffs_db[i] != 0)
1157 ip->i_lfs_fragsize[i] = blksize(fs, ip, i);
1158
1159 /*
1160 * Initialize the vnode from the inode, check for aliases. In all
1161 * cases re-init ip, the underlying vnode/inode may have changed.
1162 */
1163 ufs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
1164 #ifdef DEBUG_LFS
1165 if (vp->v_type == VNON) {
1166 printf("lfs_fastvget: ino %d is type VNON! (ifmt=%o, dinp=%p)\n",
1167 ip->i_number, (ip->i_ffs_mode & IFMT) >> 12, dinp);
1168 lfs_dump_dinode(&ip->i_din.ffs_din);
1169 #ifdef DDB
1170 Debugger();
1171 #endif
1172 }
1173 #endif /* DEBUG_LFS */
1174 /*
1175 * Finish inode initialization now that aliasing has been resolved.
1176 */
1177
1178 genfs_node_init(vp, &lfs_genfsops);
1179 ip->i_devvp = ump->um_devvp;
1180 VREF(ip->i_devvp);
1181 *vpp = vp;
1182 KASSERT(VOP_ISLOCKED(vp));
1183 VOP_UNLOCK(vp, 0);
1184
1185 uvm_vnp_setsize(vp, ip->i_ffs_size);
1186
1187 return (0);
1188 }
1189
1190 static void
1191 lfs_fakebuf_iodone(struct buf *bp)
1192 {
1193 struct buf *obp = bp->b_saveaddr;
1194
1195 if (!(obp->b_flags & (B_DELWRI | B_DONE)))
1196 obp->b_flags |= B_INVAL;
1197 brelse(obp);
1198 lfs_callback(bp);
1199 }
1200
1201 struct buf *
1202 lfs_fakebuf(struct lfs *fs, struct vnode *vp, int lbn, size_t size, caddr_t uaddr)
1203 {
1204 struct buf *bp;
1205 int error;
1206
1207 struct buf *obp;
1208
1209 /*
1210 * make corresponding buffer busy to avoid
1211 * reading blocks that isn't written yet.
1212 * it's needed because we'll update metadatas in lfs_updatemeta
1213 * before data pointed by them is actually written to disk.
1214 * XXX no need to allocbuf.
1215 */
1216 obp = getblk(vp, lbn, size, 0, 0);
1217 if (obp == NULL)
1218 panic("lfs_fakebuf: getblk failed");
1219
1220 #ifndef ALLOW_VFLUSH_CORRUPTION
1221 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size);
1222 error = copyin(uaddr, bp->b_data, size);
1223 if (error) {
1224 lfs_freebuf(bp);
1225 return NULL;
1226 }
1227 bp->b_saveaddr = obp;
1228 KDASSERT(bp->b_iodone == lfs_callback);
1229 bp->b_iodone = lfs_fakebuf_iodone;
1230
1231 #ifdef DIAGNOSTIC
1232 if (obp->b_flags & B_GATHERED)
1233 panic("lfs_fakebuf: gathered bp: %p, ino=%u, lbn=%d",
1234 bp, VTOI(vp)->i_number, lbn);
1235 #endif
1236 #else
1237 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, 0);
1238 bp->b_flags |= B_INVAL;
1239 bp->b_saveaddr = uaddr;
1240 #endif
1241 #if 0
1242 bp->b_saveaddr = (caddr_t)fs;
1243 ++fs->lfs_iocount;
1244 #endif
1245 bp->b_bufsize = size;
1246 bp->b_bcount = size;
1247 return (bp);
1248 }
1249