lfs_syscalls.c revision 1.24 1 /* $NetBSD: lfs_syscalls.c,v 1.24 1999/03/25 21:54:10 perseant Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*-
39 * Copyright (c) 1991, 1993, 1994
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95
71 */
72
73 #include "fs_lfs.h" /* for prototypes in syscallargs.h */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/proc.h>
78 #include <sys/buf.h>
79 #include <sys/mount.h>
80 #include <sys/vnode.h>
81 #include <sys/malloc.h>
82 #include <sys/kernel.h>
83
84 #include <sys/syscallargs.h>
85
86 #include <ufs/ufs/quota.h>
87 #include <ufs/ufs/inode.h>
88 #include <ufs/ufs/ufsmount.h>
89 #include <ufs/ufs/ufs_extern.h>
90
91 #include <ufs/lfs/lfs.h>
92 #include <ufs/lfs/lfs_extern.h>
93
94 /* Flags for return from lfs_fastvget */
95 #define FVG_UNLOCK 0x01 /* Needs to be unlocked */
96 #define FVG_PUT 0x02 /* Needs to be vput() */
97
98 struct buf *lfs_fakebuf __P((struct vnode *, int, size_t, caddr_t));
99
100 int debug_cleaner = 0;
101 int clean_vnlocked = 0;
102 int clean_inlocked = 0;
103 int verbose_debug = 0;
104 int lfs_clean_vnhead = 1;
105
106 pid_t lfs_cleaner_pid = 0;
107
108 /*
109 * Definitions for the buffer free lists.
110 */
111 #define BQUEUES 4 /* number of free buffer queues */
112
113 #define BQ_LOCKED 0 /* super-blocks &c */
114 #define BQ_LRU 1 /* lru, useful buffers */
115 #define BQ_AGE 2 /* rubbish */
116 #define BQ_EMPTY 3 /* buffer headers with no memory */
117
118 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
119
120 #define LFS_FORCE_WRITE UNASSIGNED
121
122 #define LFS_VREF_THRESHOLD 128
123
124 /*
125 * lfs_markv:
126 *
127 * This will mark inodes and blocks dirty, so they are written into the log.
128 * It will block until all the blocks have been written. The segment create
129 * time passed in the block_info and inode_info structures is used to decide
130 * if the data is valid for each block (in case some process dirtied a block
131 * or inode that is being cleaned between the determination that a block is
132 * live and the lfs_markv call).
133 *
134 * 0 on success
135 * -1/errno is return on error.
136 */
137 int
138 lfs_markv(p, v, retval)
139 struct proc *p;
140 void *v;
141 register_t *retval;
142 {
143 struct lfs_markv_args /* {
144 syscallarg(fsid_t *) fsidp;
145 syscallarg(struct block_info *) blkiov;
146 syscallarg(int) blkcnt;
147 } */ *uap = v;
148 BLOCK_INFO *blkp;
149 IFILE *ifp;
150 struct buf *bp, *nbp;
151 struct inode *ip = NULL;
152 struct lfs *fs;
153 struct mount *mntp;
154 struct vnode *vp;
155 #ifdef DEBUG_LFS
156 int vputc=0, iwritten=0;
157 #endif
158 fsid_t fsid;
159 void *start;
160 ino_t lastino;
161 ufs_daddr_t b_daddr, v_daddr;
162 int origcnt, cnt, error, lfs_fastvget_unlock;
163 int do_again=0;
164 int s;
165 #ifdef CHECK_COPYIN
166 int i;
167 #endif /* CHECK_COPYIN */
168 #ifdef LFS_TRACK_IOS
169 int j;
170 #endif
171 int numlocked=0, numrefed=0;
172
173 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
174 return (error);
175
176 if ((mntp = vfs_getvfs(&fsid)) == NULL)
177 return (EINVAL);
178
179 fs = VFSTOUFS(mntp)->um_lfs;
180
181 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
182 return (error);
183
184 origcnt = cnt = SCARG(uap, blkcnt);
185 start = malloc(cnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
186 error = copyin(SCARG(uap, blkiov), start, cnt * sizeof(BLOCK_INFO));
187 if (error)
188 goto err1;
189
190 /*
191 * This seglock is just to prevent the fact that we might have to sleep
192 * from allowing the possibility that our blocks might become
193 * invalid.
194 *
195 * It is also important to note here that unless we specify SEGM_CKP,
196 * any Ifile blocks that we might be asked to clean will never get
197 * to the disk.
198 */
199 lfs_seglock(fs, SEGM_SYNC|SEGM_CLEAN|SEGM_CKP);
200
201 /* Mark blocks/inodes dirty. */
202 error = 0;
203
204 #ifdef DEBUG_LFS
205 /* Run through and count the inodes */
206 lastino = LFS_UNUSED_INUM;
207 for(blkp = start; cnt--; ++blkp) {
208 if(lastino != blkp->bi_inode) {
209 lastino = blkp->bi_inode;
210 vputc++;
211 }
212 }
213 cnt = origcnt;
214 printf("[%d/",vputc);
215 iwritten=0;
216 #endif /* DEBUG_LFS */
217 /* these were inside the initialization for the for loop */
218 v_daddr = LFS_UNUSED_DADDR;
219 lastino = LFS_UNUSED_INUM;
220 for (blkp = start; cnt--; ++blkp)
221 {
222 #ifdef LFS_TRACK_IOS
223 /*
224 * If there is I/O on this segment that is not yet complete,
225 * the cleaner probably does not have the right information.
226 * Send it packing.
227 */
228 for(j=0;j<LFS_THROTTLE;j++) {
229 if(fs->lfs_pending[j] != LFS_UNUSED_DADDR
230 && datosn(fs,fs->lfs_pending[j])==datosn(fs,blkp->bi_daddr)
231 && blkp->bi_daddr != LFS_FORCE_WRITE)
232 {
233 printf("lfs_markv: attempt to clean pending segment? (#%d)\n",
234 datosn(fs, fs->lfs_pending[j]));
235 /* free(start,M_SEGMENT); */
236 /* return (EBUSY); */
237 }
238 }
239 #endif /* LFS_TRACK_IOS */
240 /*
241 * Get the IFILE entry (only once) and see if the file still
242 * exists.
243 */
244 if (lastino != blkp->bi_inode) {
245 /*
246 * Finish the old file, if there was one. The presence
247 * of a usable vnode in vp is signaled by a valid v_daddr.
248 */
249 if(v_daddr != LFS_UNUSED_DADDR) {
250 if(ip->i_flag & (IN_MODIFIED|IN_CLEANING))
251 #ifdef DEBUG_LFS
252 iwritten++;
253 #endif
254 if(lfs_fastvget_unlock) {
255 VOP_UNLOCK(vp,0);
256 numlocked--;
257 }
258 lfs_vunref(vp);
259 numrefed--;
260 }
261
262 /*
263 * Start a new file
264 */
265 lastino = blkp->bi_inode;
266 if (blkp->bi_inode == LFS_IFILE_INUM)
267 v_daddr = fs->lfs_idaddr;
268 else {
269 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
270 /* XXX fix for force write */
271 v_daddr = ifp->if_daddr;
272 brelse(bp);
273 }
274 /* Don't force-write the ifile */
275 if (blkp->bi_inode == LFS_IFILE_INUM
276 && blkp->bi_daddr == LFS_FORCE_WRITE)
277 {
278 continue;
279 }
280 if (v_daddr == LFS_UNUSED_DADDR
281 && blkp->bi_daddr != LFS_FORCE_WRITE)
282 {
283 continue;
284 }
285
286 /* Get the vnode/inode. */
287 error=lfs_fastvget(mntp, blkp->bi_inode, v_daddr,
288 &vp,
289 (blkp->bi_lbn==LFS_UNUSED_LBN
290 ? blkp->bi_bp
291 : NULL),
292 &lfs_fastvget_unlock);
293 if(lfs_fastvget_unlock)
294 numlocked++;
295
296 if(!error) {
297 numrefed++;
298 }
299 if(error) {
300 #ifdef DIAGNOSTIC
301 printf("lfs_markv: VFS_VGET failed with %d (ino %d, segment %d)\n",
302 error, blkp->bi_inode,
303 datosn(fs, blkp->bi_daddr));
304 #endif /* DIAGNOSTIC */
305 /*
306 * If we got EAGAIN, that means that the
307 * Inode was locked. This is
308 * recoverable: just clean the rest of
309 * this segment, and let the cleaner try
310 * again with another. (When the
311 * cleaner runs again, this segment will
312 * sort high on the list, since it is
313 * now almost entirely empty.) But, we
314 * still set v_daddr = LFS_UNUSED_ADDR
315 * so as not to test this over and over
316 * again.
317 */
318 if(error == EAGAIN) {
319 error = 0;
320 do_again++;
321 }
322 #ifdef DIAGNOSTIC
323 else if(error != ENOENT)
324 panic("lfs_markv VFS_VGET FAILED");
325 #endif
326 /* lastino = LFS_UNUSED_INUM; */
327 v_daddr = LFS_UNUSED_DADDR;
328 vp = NULL;
329 ip = NULL;
330 continue;
331 }
332 ip = VTOI(vp);
333 } else if (v_daddr == LFS_UNUSED_DADDR) {
334 /*
335 * This can only happen if the vnode is dead (or
336 * in any case we can't get it...e.g., it is
337 * inlocked). Keep going.
338 */
339 continue;
340 }
341
342 /* Past this point we are guaranteed that vp, ip are valid. */
343
344 /* If this BLOCK_INFO didn't contain a block, keep going. */
345 if (blkp->bi_lbn == LFS_UNUSED_LBN) {
346 /* XXX need to make sure that the inode gets written in this case */
347 /* XXX but only write the inode if it's the right one */
348 if (blkp->bi_inode != LFS_IFILE_INUM) {
349 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
350 if(ifp->if_daddr == blkp->bi_daddr
351 || blkp->bi_daddr == LFS_FORCE_WRITE)
352 {
353 if(!(ip->i_flag & IN_CLEANING))
354 fs->lfs_uinodes++;
355 ip->i_flag |= IN_CLEANING;
356 }
357 brelse(bp);
358 }
359 continue;
360 }
361
362 b_daddr = 0;
363 if(blkp->bi_daddr != LFS_FORCE_WRITE) {
364 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
365 b_daddr != blkp->bi_daddr)
366 {
367 if(datosn(fs,b_daddr)
368 == datosn(fs,blkp->bi_daddr))
369 {
370 printf("Wrong da same seg: %x vs %x\n",
371 blkp->bi_daddr, b_daddr);
372 }
373 continue;
374 }
375 }
376 /*
377 * If we got to here, then we are keeping the block. If
378 * it is an indirect block, we want to actually put it
379 * in the buffer cache so that it can be updated in the
380 * finish_meta section. If it's not, we need to
381 * allocate a fake buffer so that writeseg can perform
382 * the copyin and write the buffer.
383 */
384 /*
385 * XXX - if the block we are reading has been *extended* since
386 * it was written to disk, then we risk throwing away
387 * the extension in bread()/getblk(). Check the size
388 * here.
389 */
390 if(blkp->bi_size < fs->lfs_bsize) {
391 s = splbio();
392 bp = incore(vp, blkp->bi_lbn);
393 if(bp && bp->b_bcount > blkp->bi_size) {
394 printf("lfs_markv: %ld > %d (fixed)\n",
395 bp->b_bcount, blkp->bi_size);
396 blkp->bi_size = bp->b_bcount;
397 }
398 splx(s);
399 }
400 if (blkp->bi_lbn >= 0) { /* Data Block */
401 /* XXX KS - should we use incore here, or just always use getblk()? */
402 bp = lfs_fakebuf(vp, blkp->bi_lbn,
403 blkp->bi_size, blkp->bi_bp);
404 /* Pretend we used bread() to get it */
405 bp->b_blkno = blkp->bi_daddr;
406 } else { /* Indirect block */
407 bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
408 if (!(bp->b_flags & (B_DONE|B_DELWRI))) { /* B_CACHE */
409 /*
410 * The block in question was not found
411 * in the cache; i.e., the block that
412 * getblk() returned is empty. So, we
413 * can (and should) copy in the
414 * contents, because we've already
415 * determined that this was the right
416 * version of this block on disk.
417 *
418 * And, it can't have changed underneath
419 * us, because we have the segment lock.
420 */
421 error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
422 if(error)
423 goto err2;
424 }
425 }
426 if ((error = lfs_bwrite_ext(bp,BW_CLEAN)) != 0)
427 goto err2;
428 }
429
430 /*
431 * Finish the old file, if there was one
432 */
433 if(v_daddr != LFS_UNUSED_DADDR) {
434 #ifdef DEBUG_LFS
435 if(ip->i_flag & (IN_MODIFIED|IN_CLEANING))
436 iwritten++;
437 #endif
438 if(lfs_fastvget_unlock) {
439 VOP_UNLOCK(vp,0);
440 numlocked--;
441 }
442 lfs_vunref(vp);
443 numrefed--;
444 }
445
446 /*
447 * The last write has to be SEGM_SYNC, because of calling semantics.
448 * It also has to be SEGM_CKP, because otherwise we could write
449 * over the newly cleaned data contained in a checkpoint, and then
450 * we'd be unhappy at recovery time.
451 */
452 lfs_segwrite(mntp, SEGM_SYNC|SEGM_CLEAN|SEGM_CKP);
453 free(start, M_SEGMENT);
454
455 lfs_segunlock(fs);
456
457 #ifdef DEBUG_LFS
458 printf("%d]",iwritten);
459 if(numlocked != 0 || numrefed != 0) {
460 panic("lfs_markv: numlocked=%d numrefed=%d", numlocked, numrefed);
461 }
462 #endif
463
464 if(error)
465 return (error);
466 else if(do_again)
467 return EAGAIN;
468
469 return 0;
470
471 err2:
472 printf("markv err2\n");
473 lfs_vunref(vp);
474 /* Free up fakebuffers -- have to take these from the LOCKED list */
475 again:
476 for(bp = bufqueues[BQ_LOCKED].tqh_first; bp; bp=nbp) {
477 nbp = bp->b_freelist.tqe_next;
478 if(bp->b_flags & B_CALL) {
479 s = splbio();
480 if(bp->b_flags & B_BUSY) { /* not bloody likely */
481 bp->b_flags |= B_WANTED;
482 tsleep(bp, PRIBIO+1, "markv", 0);
483 splx(s);
484 goto again;
485 }
486 bremfree(bp);
487 splx(s);
488 brelse(bp);
489 }
490 }
491 free(start, M_SEGMENT);
492 lfs_segunlock(fs);
493 vfs_unbusy(mntp);
494 return (error);
495
496 err1:
497 printf("markv err1\n");
498 free(start, M_SEGMENT);
499 return (error);
500 }
501
502 /*
503 * lfs_bmapv:
504 *
505 * This will fill in the current disk address for arrays of blocks.
506 *
507 * 0 on success
508 * -1/errno is return on error.
509 */
510
511 int
512 lfs_bmapv(p, v, retval)
513 struct proc *p;
514 void *v;
515 register_t *retval;
516 {
517 struct lfs_bmapv_args /* {
518 syscallarg(fsid_t *) fsidp;
519 syscallarg(struct block_info *) blkiov;
520 syscallarg(int) blkcnt;
521 } */ *uap = v;
522 BLOCK_INFO *blkp;
523 IFILE *ifp;
524 struct buf *bp;
525 struct inode *ip = NULL;
526 struct lfs *fs;
527 struct mount *mntp;
528 struct ufsmount *ump;
529 struct vnode *vp;
530 fsid_t fsid;
531 void *start;
532 ino_t lastino;
533 ufs_daddr_t v_daddr;
534 int origcnt, cnt, error, need_unlock=0;
535 int numlocked=0, numrefed=0;
536 #ifdef LFS_TRACK_IOS
537 int j;
538 #endif
539
540 lfs_cleaner_pid = p->p_pid;
541
542 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
543 return (error);
544
545 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
546 return (error);
547 if ((mntp = vfs_getvfs(&fsid)) == NULL)
548 return (EINVAL);
549
550 ump = VFSTOUFS(mntp);
551
552 origcnt = cnt = SCARG(uap, blkcnt);
553 start = malloc(cnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
554 error = copyin(SCARG(uap, blkiov), start, cnt * sizeof(BLOCK_INFO));
555 if (error) {
556 free(start, M_SEGMENT);
557 return (error);
558 }
559
560 fs = VFSTOUFS(mntp)->um_lfs;
561
562 error = 0;
563
564 /* these were inside the initialization for the for loop */
565 v_daddr = LFS_UNUSED_DADDR;
566 lastino = LFS_UNUSED_INUM;
567 for (blkp = start; cnt--; ++blkp)
568 {
569 #ifdef DEBUG
570 if (datosn(fs, fs->lfs_curseg) == datosn(fs, blkp->bi_daddr)) {
571 printf("Hm, attempt to clean current segment? (#%d)\n",
572 datosn(fs, fs->lfs_curseg));
573 free(start,M_SEGMENT);
574 return (EBUSY);
575 }
576 #endif /* DEBUG */
577 #ifdef LFS_TRACK_IOS
578 /*
579 * If there is I/O on this segment that is not yet complete,
580 * the cleaner probably does not have the right information.
581 * Send it packing.
582 */
583 for(j=0;j<LFS_THROTTLE;j++) {
584 if(fs->lfs_pending[j] != LFS_UNUSED_DADDR
585 && datosn(fs,fs->lfs_pending[j])==datosn(fs,blkp->bi_daddr))
586 {
587 printf("lfs_bmapv: attempt to clean pending segment? (#%d)\n",
588 datosn(fs, fs->lfs_pending[j]));
589 free(start,M_SEGMENT);
590 return (EBUSY);
591 }
592 }
593
594 #endif /* LFS_TRACK_IOS */
595 /*
596 * Get the IFILE entry (only once) and see if the file still
597 * exists.
598 */
599 if (lastino != blkp->bi_inode) {
600 /*
601 * Finish the old file, if there was one. The presence
602 * of a usable vnode in vp is signaled by a valid
603 * v_daddr.
604 */
605 if(v_daddr != LFS_UNUSED_DADDR) {
606 if(need_unlock) {
607 VOP_UNLOCK(vp,0);
608 numlocked--;
609 }
610 lfs_vunref(vp);
611 numrefed--;
612 }
613
614 /*
615 * Start a new file
616 */
617 lastino = blkp->bi_inode;
618 if (blkp->bi_inode == LFS_IFILE_INUM)
619 v_daddr = fs->lfs_idaddr;
620 else {
621 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
622 v_daddr = ifp->if_daddr;
623 brelse(bp);
624 }
625 if (v_daddr == LFS_UNUSED_DADDR) {
626 blkp->bi_daddr = LFS_UNUSED_DADDR;
627 continue;
628 }
629 /*
630 * A regular call to VFS_VGET could deadlock
631 * here. Instead, we try an unlocked access.
632 */
633 vp = ufs_ihashlookup(ump->um_dev, blkp->bi_inode);
634 if (vp != NULL && !(vp->v_flag & VXLOCK)) {
635 ip = VTOI(vp);
636 if(VOP_ISLOCKED(vp)) {
637 /* printf("inode %d inlocked in bmapv\n",ip->i_number); */
638 need_unlock = 0;
639 } else {
640 VOP_LOCK(vp,LK_EXCLUSIVE);
641 need_unlock = FVG_UNLOCK;
642 numlocked++;
643 }
644 lfs_vref(vp);
645 numrefed++;
646 } else {
647 error = VFS_VGET(mntp, blkp->bi_inode, &vp);
648 if(error) {
649 v_daddr = LFS_UNUSED_DADDR;
650 need_unlock = 0;
651 #ifdef DEBUG_LFS
652 printf("lfs_bmapv: vget of ino %d failed with %d)",blkp->bi_inode,error);
653 #endif
654 continue;
655 } else {
656 need_unlock = FVG_PUT;
657 numlocked++;
658 numrefed++;
659 }
660 }
661 ip = VTOI(vp);
662 } else if (v_daddr == LFS_UNUSED_DADDR) {
663 /*
664 * This can only happen if the vnode is dead.
665 * Keep going. Note that we DO NOT set the
666 * bi_addr to anything -- if we failed to get
667 * the vnode, for example, we want to assume
668 * conservatively that all of its blocks *are*
669 * located in the segment in question.
670 * lfs_markv will throw them out if we are
671 * wrong.
672 */
673 /* blkp->bi_daddr = LFS_UNUSED_DADDR; */
674 continue;
675 }
676
677 /* Past this point we are guaranteed that vp, ip are valid. */
678
679 if(blkp->bi_lbn == LFS_UNUSED_LBN) {
680 /*
681 * We just want the inode address, which is
682 * conveniently in v_daddr.
683 */
684 blkp->bi_daddr = v_daddr;
685 } else {
686 error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
687 &(blkp->bi_daddr), NULL);
688 if(error)
689 {
690 blkp->bi_daddr = LFS_UNUSED_DADDR;
691 continue;
692 }
693 }
694 }
695
696 /*
697 * Finish the old file, if there was one. The presence
698 * of a usable vnode in vp is signaled by a valid v_daddr.
699 */
700 if(v_daddr != LFS_UNUSED_DADDR) {
701 if(need_unlock) {
702 VOP_UNLOCK(vp,0);
703 numlocked--;
704 }
705 lfs_vunref(vp);
706 numrefed--;
707 }
708
709 if(numlocked != 0 || numrefed != 0) {
710 panic("lfs_bmapv: numlocked=%d numrefed=%d", numlocked,
711 numrefed);
712 }
713
714 copyout(start, SCARG(uap, blkiov), origcnt * sizeof(BLOCK_INFO));
715 free(start, M_SEGMENT);
716
717 return 0;
718 }
719
720 /*
721 * lfs_segclean:
722 *
723 * Mark the segment clean.
724 *
725 * 0 on success
726 * -1/errno is return on error.
727 */
728 int
729 lfs_segclean(p, v, retval)
730 struct proc *p;
731 void *v;
732 register_t *retval;
733 {
734 struct lfs_segclean_args /* {
735 syscallarg(fsid_t *) fsidp;
736 syscallarg(u_long) segment;
737 } */ *uap = v;
738 CLEANERINFO *cip;
739 SEGUSE *sup;
740 struct buf *bp;
741 struct mount *mntp;
742 struct lfs *fs;
743 fsid_t fsid;
744 int error;
745
746 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
747 return (error);
748
749 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
750 return (error);
751 if ((mntp = vfs_getvfs(&fsid)) == NULL)
752 return (EINVAL);
753
754 fs = VFSTOUFS(mntp)->um_lfs;
755
756 if (datosn(fs, fs->lfs_curseg) == SCARG(uap, segment))
757 return (EBUSY);
758
759 LFS_SEGENTRY(sup, fs, SCARG(uap, segment), bp);
760 if (sup->su_flags & SEGUSE_ACTIVE) {
761 brelse(bp);
762 return (EBUSY);
763 }
764
765 fs->lfs_avail += fsbtodb(fs, fs->lfs_ssize) - 1;
766 fs->lfs_bfree += (sup->su_nsums * LFS_SUMMARY_SIZE / DEV_BSIZE) +
767 sup->su_ninos * btodb(fs->lfs_bsize);
768 sup->su_flags &= ~SEGUSE_DIRTY;
769 #if 1
770 /* XXX KS - before we return, really empty the segment (i.e., fill
771 it with zeroes). This is only for debugging purposes. */
772 {
773 daddr_t start;
774 int offset, sizeleft, bufsize;
775 struct buf *zbp;
776
777 start = sntoda(fs, SCARG(uap, segment));
778 offset = (sup->su_flags & SEGUSE_SUPERBLOCK) ? LFS_SBPAD : 0;
779 sizeleft = fs->lfs_ssize / DEV_BSIZE - offset;
780 while(sizeleft > 0) {
781 bufsize = (sizeleft < MAXPHYS) ? sizeleft : MAXPHYS;
782 zbp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, start+offset, bufsize);
783 memset(zbp->b_data, 'Z', bufsize);
784 VOP_STRATEGY(zbp);
785 offset += bufsize;
786 sizeleft -= bufsize;
787 }
788 }
789 #endif
790 (void) VOP_BWRITE(bp);
791
792 LFS_CLEANERINFO(cip, fs, bp);
793 ++cip->clean;
794 --cip->dirty;
795 fs->lfs_nclean = cip->clean;
796 (void) VOP_BWRITE(bp);
797 wakeup(&fs->lfs_avail);
798
799 return (0);
800 }
801
802 /*
803 * lfs_segwait:
804 *
805 * This will block until a segment in file system fsid is written. A timeout
806 * in milliseconds may be specified which will awake the cleaner automatically.
807 * An fsid of -1 means any file system, and a timeout of 0 means forever.
808 *
809 * 0 on success
810 * 1 on timeout
811 * -1/errno is return on error.
812 */
813 int
814 lfs_segwait(p, v, retval)
815 struct proc *p;
816 void *v;
817 register_t *retval;
818 {
819 struct lfs_segwait_args /* {
820 syscallarg(fsid_t *) fsidp;
821 syscallarg(struct timeval *) tv;
822 } */ *uap = v;
823 extern int lfs_allclean_wakeup;
824 struct mount *mntp;
825 struct timeval atv;
826 fsid_t fsid;
827 void *addr;
828 u_long timeout;
829 int error, s;
830
831 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) {
832 return (error);
833 }
834 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
835 return (error);
836 if ((mntp = vfs_getvfs(&fsid)) == NULL)
837 addr = &lfs_allclean_wakeup;
838 else
839 addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
840
841 if (SCARG(uap, tv)) {
842 error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
843 if (error)
844 return (error);
845 if (itimerfix(&atv))
846 return (EINVAL);
847 s = splclock();
848 timeradd(&atv, &time, &atv);
849 timeout = hzto(&atv);
850 splx(s);
851 } else
852 timeout = 0;
853
854 error = tsleep(addr, PCATCH | PUSER, "segment", timeout);
855 return (error == ERESTART ? EINTR : 0);
856 }
857
858 /*
859 * VFS_VGET call specialized for the cleaner. The cleaner already knows the
860 * daddr from the ifile, so don't look it up again. If the cleaner is
861 * processing IINFO structures, it may have the ondisk inode already, so
862 * don't go retrieving it again.
863 *
864 * If we find the vnode on the hash chain, then it may be locked by another
865 * process; so we set (*need_unlock) to zero.
866 *
867 * If we don't, we call ufs_ihashins, which locks the inode, and we set
868 * (*need_unlock) to non-zero.
869 *
870 * In either case we lfs_vref, and it is the caller's responsibility to
871 * lfs_vunref and VOP_UNLOCK (if necessary) when finished.
872 */
873 #ifdef USE_UFS_HASHLOCK
874 extern struct lock ufs_hashlock;
875 #endif
876
877 int
878 lfs_fastvget(mp, ino, daddr, vpp, dinp, need_unlock)
879 struct mount *mp;
880 ino_t ino;
881 ufs_daddr_t daddr;
882 struct vnode **vpp;
883 struct dinode *dinp;
884 int *need_unlock;
885 {
886 register struct inode *ip;
887 struct vnode *vp;
888 struct ufsmount *ump;
889 dev_t dev;
890 int error;
891 struct buf *bp;
892
893 ump = VFSTOUFS(mp);
894 dev = ump->um_dev;
895 *need_unlock = 0;
896 /*
897 * This is playing fast and loose. Someone may have the inode
898 * locked, in which case they are going to be distinctly unhappy
899 * if we trash something.
900 */
901 #ifdef USE_UFS_HASHLOCK
902 do {
903 #endif
904 if ((*vpp = ufs_ihashlookup(dev, ino)) != NULL) {
905 lfs_vref(*vpp);
906 if ((*vpp)->v_flag & VXLOCK) {
907 printf("vnode VXLOCKed for ino %d\n",ino);
908 clean_vnlocked++;
909 #ifdef LFS_EAGAIN_FAIL
910 #if 0 /* XXXX KS */
911 lfs_vunref(*vpp);
912 #endif
913 return EAGAIN;
914 #endif
915 }
916 ip = VTOI(*vpp);
917 if (VOP_ISLOCKED(*vpp)) {
918 printf("ino %d inlocked by pid %d\n",ip->i_number,
919 ip->i_lock.lk_lockholder);
920 clean_inlocked++;
921 #ifdef LFS_EAGAIN_FAIL
922 lfs_vunref(*vpp);
923 return EAGAIN;
924 #endif /* LFS_EAGAIN_FAIL */
925 } else {
926 VOP_LOCK(*vpp,LK_EXCLUSIVE);
927 *need_unlock |= FVG_UNLOCK;
928 }
929 return (0);
930 }
931 #ifdef USE_UFS_HASHLOCK
932 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
933 #endif
934
935 /* Allocate new vnode/inode. */
936 if ((error = lfs_vcreate(mp, ino, &vp)) != 0) {
937 *vpp = NULL;
938 #ifdef USE_UFS_HASHLOCK
939 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
940 #endif
941 return (error);
942 }
943 /*
944 * Put it onto its hash chain and lock it so that other requests for
945 * this inode will block if they arrive while we are sleeping waiting
946 * for old data structures to be purged or for the contents of the
947 * disk portion of this inode to be read.
948 */
949 ip = VTOI(vp);
950 ufs_ihashins(ip);
951 #ifdef USE_UFS_HASHLOCK
952 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
953 #endif
954
955 /*
956 * XXX
957 * This may not need to be here, logically it should go down with
958 * the i_devvp initialization.
959 * Ask Kirk.
960 */
961 ip->i_lfs = ump->um_lfs;
962
963 /* Read in the disk contents for the inode, copy into the inode. */
964 if (dinp) {
965 error = copyin(dinp, &ip->i_din.ffs_din, DINODE_SIZE);
966 if (error) {
967 printf("lfs_fastvget: dinode copyin failed for ino %d\n", ino);
968 ufs_ihashrem(ip);
969
970 /* Unlock and discard unneeded inode. */
971 lfs_vunref(vp);
972 *vpp = NULL;
973 return (error);
974 }
975 if(ip->i_number != ino)
976 panic("lfs_fastvget: I was fed the wrong inode!");
977 } else {
978 error = bread(ump->um_devvp, daddr,
979 (int)ump->um_lfs->lfs_bsize, NOCRED, &bp);
980 if (error) {
981 printf("error != 0 at %s:%d\n",__FILE__,__LINE__);
982 /*
983 * The inode does not contain anything useful, so it
984 * would be misleading to leave it on its hash chain.
985 * Iput() will return it to the free list.
986 */
987 ufs_ihashrem(ip);
988
989 /* Unlock and discard unneeded inode. */
990 lfs_vunref(vp);
991 brelse(bp);
992 *vpp = NULL;
993 return (error);
994 }
995 ip->i_din.ffs_din =
996 *lfs_ifind(ump->um_lfs, ino, (struct dinode *)bp->b_data);
997 brelse(bp);
998 }
999
1000 /*
1001 * Initialize the vnode from the inode, check for aliases. In all
1002 * cases re-init ip, the underlying vnode/inode may have changed.
1003 */
1004 error = ufs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
1005 if (error) {
1006 printf("ufs_vinit returned %d for ino %d\n", error, ino);
1007 lfs_vunref(vp);
1008 *vpp = NULL;
1009 return (error);
1010 }
1011 #ifdef DEBUG_LFS
1012 if(vp->v_type == VNON) {
1013 printf("lfs_fastvget: ino %d is type VNON! (ifmt=%o, dinp=%p)\n",
1014 ip->i_number, (ip->i_ffs_mode & IFMT)>>12, dinp);
1015 lfs_dump_dinode(&ip->i_din.ffs_din);
1016 #ifdef DDB
1017 Debugger();
1018 #endif
1019 }
1020 #endif /* DEBUG_LFS */
1021 /*
1022 * Finish inode initialization now that aliasing has been resolved.
1023 */
1024 ip->i_devvp = ump->um_devvp;
1025 VREF(ip->i_devvp);
1026 *vpp = vp;
1027 *need_unlock |= FVG_PUT;
1028
1029 return (0);
1030 }
1031
1032 struct buf *
1033 lfs_fakebuf(vp, lbn, size, uaddr)
1034 struct vnode *vp;
1035 int lbn;
1036 size_t size;
1037 caddr_t uaddr;
1038 {
1039 struct buf *bp;
1040
1041 #ifdef DEBUG
1042 /* Check for duplicates too */
1043 if(incore(vp,lbn)) {
1044 printf("Fake buffer (%d/%d) is in core\n", VTOI(vp)->i_number,
1045 lbn);
1046 if(bread(vp, lbn, size, NOCRED, &bp))
1047 return NULL;
1048 }
1049 #endif
1050 bp = lfs_newbuf(vp, lbn, 0);
1051 bp->b_saveaddr = uaddr;
1052 bp->b_bufsize = size;
1053 bp->b_bcount = size;
1054 bp->b_flags |= B_INVAL;
1055 return (bp);
1056 }
1057