lfs_bio.c revision 1.114 1 /* $NetBSD: lfs_bio.c,v 1.114 2008/05/06 18:43:45 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * Copyright (c) 1991, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.114 2008/05/06 18:43:45 ad Exp $");
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/proc.h>
68 #include <sys/buf.h>
69 #include <sys/vnode.h>
70 #include <sys/resourcevar.h>
71 #include <sys/mount.h>
72 #include <sys/kernel.h>
73 #include <sys/kauth.h>
74
75 #include <ufs/ufs/inode.h>
76 #include <ufs/ufs/ufsmount.h>
77 #include <ufs/ufs/ufs_extern.h>
78
79 #include <ufs/lfs/lfs.h>
80 #include <ufs/lfs/lfs_extern.h>
81
82 #include <uvm/uvm.h>
83
84 /*
85 * LFS block write function.
86 *
87 * XXX
88 * No write cost accounting is done.
89 * This is almost certainly wrong for synchronous operations and NFS.
90 *
91 * protected by lfs_lock.
92 */
93 int locked_queue_count = 0; /* Count of locked-down buffers. */
94 long locked_queue_bytes = 0L; /* Total size of locked buffers. */
95 int lfs_subsys_pages = 0L; /* Total number LFS-written pages */
96 int lfs_fs_pagetrip = 0; /* # of pages to trip per-fs write */
97 int lfs_writing = 0; /* Set if already kicked off a writer
98 because of buffer space */
99
100 /* Lock and condition variables for above. */
101 kcondvar_t locked_queue_cv;
102 kcondvar_t lfs_writing_cv;
103 kmutex_t lfs_lock;
104
105 extern int lfs_dostats;
106
107 /*
108 * reserved number/bytes of locked buffers
109 */
110 int locked_queue_rcount = 0;
111 long locked_queue_rbytes = 0L;
112
113 int lfs_fits_buf(struct lfs *, int, int);
114 int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
115 int, int);
116 int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2, int);
117
118 int
119 lfs_fits_buf(struct lfs *fs, int n, int bytes)
120 {
121 int count_fit, bytes_fit;
122
123 ASSERT_NO_SEGLOCK(fs);
124 KASSERT(mutex_owned(&lfs_lock));
125
126 count_fit =
127 (locked_queue_count + locked_queue_rcount + n < LFS_WAIT_BUFS);
128 bytes_fit =
129 (locked_queue_bytes + locked_queue_rbytes + bytes < LFS_WAIT_BYTES);
130
131 #ifdef DEBUG
132 if (!count_fit) {
133 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
134 locked_queue_count, locked_queue_rcount,
135 n, LFS_WAIT_BUFS));
136 }
137 if (!bytes_fit) {
138 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
139 locked_queue_bytes, locked_queue_rbytes,
140 bytes, LFS_WAIT_BYTES));
141 }
142 #endif /* DEBUG */
143
144 return (count_fit && bytes_fit);
145 }
146
147 /* ARGSUSED */
148 int
149 lfs_reservebuf(struct lfs *fs, struct vnode *vp,
150 struct vnode *vp2, int n, int bytes)
151 {
152 ASSERT_MAYBE_SEGLOCK(fs);
153 KASSERT(locked_queue_rcount >= 0);
154 KASSERT(locked_queue_rbytes >= 0);
155
156 mutex_enter(&lfs_lock);
157 while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
158 int error;
159
160 lfs_flush(fs, 0, 0);
161
162 error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
163 hz * LFS_BUFWAIT);
164 if (error && error != EWOULDBLOCK) {
165 mutex_exit(&lfs_lock);
166 return error;
167 }
168 }
169
170 locked_queue_rcount += n;
171 locked_queue_rbytes += bytes;
172
173 mutex_exit(&lfs_lock);
174
175 KASSERT(locked_queue_rcount >= 0);
176 KASSERT(locked_queue_rbytes >= 0);
177
178 return 0;
179 }
180
181 /*
182 * Try to reserve some blocks, prior to performing a sensitive operation that
183 * requires the vnode lock to be honored. If there is not enough space, give
184 * up the vnode lock temporarily and wait for the space to become available.
185 *
186 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
187 *
188 * XXX YAMT - it isn't safe to unlock vp here
189 * because the node might be modified while we sleep.
190 * (eg. cached states like i_offset might be stale,
191 * the vnode might be truncated, etc..)
192 * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
193 * or rearrange vnodeop interface to leave vnode locking to file system
194 * specific code so that each file systems can have their own vnode locking and
195 * vnode re-using strategies.
196 */
197 int
198 lfs_reserveavail(struct lfs *fs, struct vnode *vp,
199 struct vnode *vp2, int fsb)
200 {
201 CLEANERINFO *cip;
202 struct buf *bp;
203 int error, slept;
204
205 ASSERT_MAYBE_SEGLOCK(fs);
206 slept = 0;
207 mutex_enter(&lfs_lock);
208 while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
209 mutex_exit(&lfs_lock);
210 #if 0
211 /*
212 * XXX ideally, we should unlock vnodes here
213 * because we might sleep very long time.
214 */
215 VOP_UNLOCK(vp, 0);
216 if (vp2 != NULL) {
217 VOP_UNLOCK(vp2, 0);
218 }
219 #else
220 /*
221 * XXX since we'll sleep for cleaner with vnode lock holding,
222 * deadlock will occur if cleaner tries to lock the vnode.
223 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
224 */
225 #endif
226
227 if (!slept) {
228 DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
229 " est_bfree = %d)\n",
230 fsb + fs->lfs_ravail + fs->lfs_favail,
231 fs->lfs_bfree, LFS_EST_BFREE(fs)));
232 }
233 ++slept;
234
235 /* Wake up the cleaner */
236 LFS_CLEANERINFO(cip, fs, bp);
237 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
238 lfs_wakeup_cleaner(fs);
239
240 mutex_enter(&lfs_lock);
241 /* Cleaner might have run while we were reading, check again */
242 if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
243 break;
244
245 error = mtsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
246 0, &lfs_lock);
247 #if 0
248 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
249 vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
250 #endif
251 if (error) {
252 mutex_exit(&lfs_lock);
253 return error;
254 }
255 }
256 #ifdef DEBUG
257 if (slept) {
258 DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
259 }
260 #endif
261 fs->lfs_ravail += fsb;
262 mutex_exit(&lfs_lock);
263
264 return 0;
265 }
266
267 #ifdef DIAGNOSTIC
268 int lfs_rescount;
269 int lfs_rescountdirop;
270 #endif
271
272 int
273 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
274 {
275 int error;
276 int cantwait;
277
278 ASSERT_MAYBE_SEGLOCK(fs);
279 if (vp2) {
280 /* Make sure we're not in the process of reclaiming vp2 */
281 mutex_enter(&lfs_lock);
282 while(fs->lfs_flags & LFS_UNDIROP) {
283 mtsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
284 &lfs_lock);
285 }
286 mutex_exit(&lfs_lock);
287 }
288
289 KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
290 KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
291 KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
292 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
293
294 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
295 #ifdef DIAGNOSTIC
296 if (cantwait) {
297 if (fsb > 0)
298 lfs_rescountdirop++;
299 else if (fsb < 0)
300 lfs_rescountdirop--;
301 if (lfs_rescountdirop < 0)
302 panic("lfs_rescountdirop");
303 }
304 else {
305 if (fsb > 0)
306 lfs_rescount++;
307 else if (fsb < 0)
308 lfs_rescount--;
309 if (lfs_rescount < 0)
310 panic("lfs_rescount");
311 }
312 #endif
313 if (cantwait)
314 return 0;
315
316 /*
317 * XXX
318 * vref vnodes here so that cleaner doesn't try to reuse them.
319 * (see XXX comment in lfs_reserveavail)
320 */
321 mutex_enter(&vp->v_interlock);
322 lfs_vref(vp);
323 if (vp2 != NULL) {
324 mutex_enter(&vp2->v_interlock);
325 lfs_vref(vp2);
326 }
327
328 error = lfs_reserveavail(fs, vp, vp2, fsb);
329 if (error)
330 goto done;
331
332 /*
333 * XXX just a guess. should be more precise.
334 */
335 error = lfs_reservebuf(fs, vp, vp2,
336 fragstoblks(fs, fsb), fsbtob(fs, fsb));
337 if (error)
338 lfs_reserveavail(fs, vp, vp2, -fsb);
339
340 done:
341 lfs_vunref(vp);
342 if (vp2 != NULL) {
343 lfs_vunref(vp2);
344 }
345
346 return error;
347 }
348
349 int
350 lfs_bwrite(void *v)
351 {
352 struct vop_bwrite_args /* {
353 struct buf *a_bp;
354 } */ *ap = v;
355 struct buf *bp = ap->a_bp;
356
357 #ifdef DIAGNOSTIC
358 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
359 panic("bawrite LFS buffer");
360 }
361 #endif /* DIAGNOSTIC */
362 return lfs_bwrite_ext(bp, 0);
363 }
364
365 /*
366 * Determine if there is enough room currently available to write fsb
367 * blocks. We need enough blocks for the new blocks, the current
368 * inode blocks (including potentially the ifile inode), a summary block,
369 * and the segment usage table, plus an ifile block.
370 */
371 int
372 lfs_fits(struct lfs *fs, int fsb)
373 {
374 int needed;
375
376 ASSERT_NO_SEGLOCK(fs);
377 needed = fsb + btofsb(fs, fs->lfs_sumsize) +
378 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
379 1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
380
381 if (needed >= fs->lfs_avail) {
382 #ifdef DEBUG
383 DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
384 "needed = %ld, avail = %ld\n",
385 (long)fsb, (long)fs->lfs_uinodes, (long)needed,
386 (long)fs->lfs_avail));
387 #endif
388 return 0;
389 }
390 return 1;
391 }
392
393 int
394 lfs_availwait(struct lfs *fs, int fsb)
395 {
396 int error;
397 CLEANERINFO *cip;
398 struct buf *cbp;
399
400 ASSERT_NO_SEGLOCK(fs);
401 /* Push cleaner blocks through regardless */
402 mutex_enter(&lfs_lock);
403 if (LFS_SEGLOCK_HELD(fs) &&
404 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
405 mutex_exit(&lfs_lock);
406 return 0;
407 }
408 mutex_exit(&lfs_lock);
409
410 while (!lfs_fits(fs, fsb)) {
411 /*
412 * Out of space, need cleaner to run.
413 * Update the cleaner info, then wake it up.
414 * Note the cleanerinfo block is on the ifile
415 * so it CANT_WAIT.
416 */
417 LFS_CLEANERINFO(cip, fs, cbp);
418 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
419
420 #ifdef DEBUG
421 DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
422 "waiting on cleaner\n"));
423 #endif
424
425 lfs_wakeup_cleaner(fs);
426 #ifdef DIAGNOSTIC
427 if (LFS_SEGLOCK_HELD(fs))
428 panic("lfs_availwait: deadlock");
429 #endif
430 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
431 if (error)
432 return (error);
433 }
434 return 0;
435 }
436
437 int
438 lfs_bwrite_ext(struct buf *bp, int flags)
439 {
440 struct lfs *fs;
441 struct inode *ip;
442 struct vnode *vp;
443 int fsb;
444
445 vp = bp->b_vp;
446 fs = VFSTOUFS(vp->v_mount)->um_lfs;
447
448 ASSERT_MAYBE_SEGLOCK(fs);
449 KASSERT(bp->b_cflags & BC_BUSY);
450 KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
451 KASSERT(((bp->b_oflags | bp->b_flags) & (BO_DELWRI|B_LOCKED))
452 != BO_DELWRI);
453
454 /*
455 * Don't write *any* blocks if we're mounted read-only, or
456 * if we are "already unmounted".
457 *
458 * In particular the cleaner can't write blocks either.
459 */
460 if (fs->lfs_ronly || (fs->lfs_pflags & LFS_PF_CLEAN)) {
461 bp->b_oflags &= ~BO_DELWRI;
462 bp->b_flags |= B_READ;
463 bp->b_error = 0;
464 mutex_enter(&bufcache_lock);
465 LFS_UNLOCK_BUF(bp);
466 if (LFS_IS_MALLOC_BUF(bp))
467 bp->b_cflags &= ~BC_BUSY;
468 else
469 brelsel(bp, 0);
470 mutex_exit(&bufcache_lock);
471 return (fs->lfs_ronly ? EROFS : 0);
472 }
473
474 /*
475 * Set the delayed write flag and use reassignbuf to move the buffer
476 * from the clean list to the dirty one.
477 *
478 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
479 * the buffer onto the LOCKED free list. This is necessary, otherwise
480 * getnewbuf() would try to reclaim the buffers using bawrite, which
481 * isn't going to work.
482 *
483 * XXX we don't let meta-data writes run out of space because they can
484 * come from the segment writer. We need to make sure that there is
485 * enough space reserved so that there's room to write meta-data
486 * blocks.
487 */
488 if ((bp->b_flags & B_LOCKED) == 0) {
489 fsb = fragstofsb(fs, numfrags(fs, bp->b_bcount));
490
491 ip = VTOI(vp);
492 mutex_enter(&lfs_lock);
493 if (flags & BW_CLEAN) {
494 LFS_SET_UINO(ip, IN_CLEANING);
495 } else {
496 LFS_SET_UINO(ip, IN_MODIFIED);
497 }
498 mutex_exit(&lfs_lock);
499 fs->lfs_avail -= fsb;
500
501 mutex_enter(&bufcache_lock);
502 mutex_enter(&vp->v_interlock);
503 bp->b_oflags = (bp->b_oflags | BO_DELWRI) & ~BO_DONE;
504 LFS_LOCK_BUF(bp);
505 bp->b_flags &= ~B_READ;
506 bp->b_error = 0;
507 reassignbuf(bp, bp->b_vp);
508 mutex_exit(&vp->v_interlock);
509 } else {
510 mutex_enter(&bufcache_lock);
511 }
512
513 if (bp->b_iodone != NULL)
514 bp->b_cflags &= ~BC_BUSY;
515 else
516 brelsel(bp, 0);
517 mutex_exit(&bufcache_lock);
518
519 return (0);
520 }
521
522 /*
523 * Called and return with the lfs_lock held.
524 */
525 void
526 lfs_flush_fs(struct lfs *fs, int flags)
527 {
528 ASSERT_NO_SEGLOCK(fs);
529 KASSERT(mutex_owned(&lfs_lock));
530 if (fs->lfs_ronly)
531 return;
532
533 if (lfs_dostats)
534 ++lfs_stats.flush_invoked;
535
536 mutex_exit(&lfs_lock);
537 lfs_writer_enter(fs, "fldirop");
538 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
539 lfs_writer_leave(fs);
540 mutex_enter(&lfs_lock);
541 fs->lfs_favail = 0; /* XXX */
542 }
543
544 /*
545 * This routine initiates segment writes when LFS is consuming too many
546 * resources. Ideally the pageout daemon would be able to direct LFS
547 * more subtly.
548 * XXX We have one static count of locked buffers;
549 * XXX need to think more about the multiple filesystem case.
550 *
551 * Called and return with lfs_lock held.
552 * If fs != NULL, we hold the segment lock for fs.
553 */
554 void
555 lfs_flush(struct lfs *fs, int flags, int only_onefs)
556 {
557 extern u_int64_t locked_fakequeue_count;
558 struct mount *mp, *nmp;
559 struct lfs *tfs;
560
561 KASSERT(mutex_owned(&lfs_lock));
562 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
563
564 if (lfs_dostats)
565 ++lfs_stats.write_exceeded;
566 /* XXX should we include SEGM_CKP here? */
567 if (lfs_writing && !(flags & SEGM_SYNC)) {
568 DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
569 return;
570 }
571 while (lfs_writing)
572 cv_wait(&lfs_writing_cv, &lfs_lock);
573 lfs_writing = 1;
574
575 mutex_exit(&lfs_lock);
576
577 if (only_onefs) {
578 KASSERT(fs != NULL);
579 if (vfs_busy(fs->lfs_ivnode->v_mount, NULL))
580 goto errout;
581 mutex_enter(&lfs_lock);
582 lfs_flush_fs(fs, flags);
583 mutex_exit(&lfs_lock);
584 vfs_unbusy(fs->lfs_ivnode->v_mount, false, NULL);
585 } else {
586 locked_fakequeue_count = 0;
587 mutex_enter(&mountlist_lock);
588 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
589 mp = nmp) {
590 if (vfs_busy(mp, &nmp)) {
591 DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
592 continue;
593 }
594 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
595 sizeof(mp->mnt_stat.f_fstypename)) == 0) {
596 tfs = VFSTOUFS(mp)->um_lfs;
597 mutex_enter(&lfs_lock);
598 lfs_flush_fs(tfs, flags);
599 mutex_exit(&lfs_lock);
600 }
601 vfs_unbusy(mp, false, &nmp);
602 }
603 mutex_exit(&mountlist_lock);
604 }
605 LFS_DEBUG_COUNTLOCKED("flush");
606 wakeup(&lfs_subsys_pages);
607
608 errout:
609 mutex_enter(&lfs_lock);
610 KASSERT(lfs_writing);
611 lfs_writing = 0;
612 wakeup(&lfs_writing);
613 }
614
615 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
616 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
617
618 /*
619 * make sure that we don't have too many locked buffers.
620 * flush buffers if needed.
621 */
622 int
623 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
624 {
625 int error;
626 struct lfs *fs;
627 struct inode *ip;
628 extern pid_t lfs_writer_daemon;
629
630 error = 0;
631 ip = VTOI(vp);
632
633 /* If out of buffers, wait on writer */
634 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
635 if (ip->i_number == LFS_IFILE_INUM)
636 return 0;
637 /* If we're being called from inside a dirop, don't sleep */
638 if (ip->i_flag & IN_ADIROP)
639 return 0;
640
641 fs = ip->i_lfs;
642
643 ASSERT_NO_SEGLOCK(fs);
644
645 /*
646 * If we would flush below, but dirops are active, sleep.
647 * Note that a dirop cannot ever reach this code!
648 */
649 mutex_enter(&lfs_lock);
650 while (fs->lfs_dirops > 0 &&
651 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
652 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
653 lfs_subsys_pages > LFS_MAX_PAGES ||
654 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
655 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
656 {
657 ++fs->lfs_diropwait;
658 mtsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
659 &lfs_lock);
660 --fs->lfs_diropwait;
661 }
662
663 #ifdef DEBUG
664 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
665 DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
666 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
667 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
668 DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
669 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
670 if (lfs_subsys_pages > LFS_MAX_PAGES)
671 DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
672 lfs_subsys_pages, LFS_MAX_PAGES));
673 if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
674 DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
675 fs->lfs_pages, lfs_fs_pagetrip));
676 if (lfs_dirvcount > LFS_MAX_DIROP)
677 DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
678 lfs_dirvcount, LFS_MAX_DIROP));
679 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
680 DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n",
681 fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
682 if (fs->lfs_diropwait > 0)
683 DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
684 fs->lfs_diropwait));
685 #endif
686
687 /* If there are too many pending dirops, we have to flush them. */
688 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
689 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
690 flags |= SEGM_CKP;
691 }
692
693 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
694 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
695 lfs_subsys_pages > LFS_MAX_PAGES ||
696 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
697 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
698 lfs_flush(fs, flags, 0);
699 } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
700 /*
701 * If we didn't flush the whole thing, some filesystems
702 * still might want to be flushed.
703 */
704 ++fs->lfs_pdflush;
705 wakeup(&lfs_writer_daemon);
706 }
707
708 while (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS ||
709 locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES ||
710 lfs_subsys_pages > LFS_WAIT_PAGES ||
711 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
712 lfs_dirvcount > LFS_MAX_DIROP) {
713
714 if (lfs_dostats)
715 ++lfs_stats.wait_exceeded;
716 DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
717 locked_queue_count, locked_queue_bytes));
718 error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
719 hz * LFS_BUFWAIT);
720 if (error != EWOULDBLOCK)
721 break;
722
723 /*
724 * lfs_flush might not flush all the buffers, if some of the
725 * inodes were locked or if most of them were Ifile blocks
726 * and we weren't asked to checkpoint. Try flushing again
727 * to keep us from blocking indefinitely.
728 */
729 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
730 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES) {
731 lfs_flush(fs, flags | SEGM_CKP, 0);
732 }
733 }
734 mutex_exit(&lfs_lock);
735 return (error);
736 }
737
738 /*
739 * Allocate a new buffer header.
740 */
741 struct buf *
742 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
743 {
744 struct buf *bp;
745 size_t nbytes;
746
747 ASSERT_MAYBE_SEGLOCK(fs);
748 nbytes = roundup(size, fsbtob(fs, 1));
749
750 bp = getiobuf(NULL, true);
751 if (nbytes) {
752 bp->b_data = lfs_malloc(fs, nbytes, type);
753 /* memset(bp->b_data, 0, nbytes); */
754 }
755 #ifdef DIAGNOSTIC
756 if (vp == NULL)
757 panic("vp is NULL in lfs_newbuf");
758 if (bp == NULL)
759 panic("bp is NULL after malloc in lfs_newbuf");
760 #endif
761
762 bp->b_bufsize = size;
763 bp->b_bcount = size;
764 bp->b_lblkno = daddr;
765 bp->b_blkno = daddr;
766 bp->b_error = 0;
767 bp->b_resid = 0;
768 bp->b_iodone = lfs_callback;
769 bp->b_cflags = BC_BUSY | BC_NOCACHE;
770 bp->b_private = fs;
771
772 mutex_enter(&bufcache_lock);
773 mutex_enter(&vp->v_interlock);
774 bgetvp(vp, bp);
775 mutex_exit(&vp->v_interlock);
776 mutex_exit(&bufcache_lock);
777
778 return (bp);
779 }
780
781 void
782 lfs_freebuf(struct lfs *fs, struct buf *bp)
783 {
784 struct vnode *vp;
785
786 if ((vp = bp->b_vp) != NULL) {
787 mutex_enter(&bufcache_lock);
788 mutex_enter(&vp->v_interlock);
789 brelvp(bp);
790 mutex_exit(&vp->v_interlock);
791 mutex_exit(&bufcache_lock);
792 }
793 if (!(bp->b_cflags & BC_INVAL)) { /* BC_INVAL indicates a "fake" buffer */
794 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
795 bp->b_data = NULL;
796 }
797 putiobuf(bp);
798 }
799
800 /*
801 * Count buffers on the "locked" queue, and compare it to a pro-forma count.
802 * Don't count malloced buffers, since they don't detract from the total.
803 */
804 void
805 lfs_countlocked(int *count, long *bytes, const char *msg)
806 {
807 struct buf *bp;
808 int n = 0;
809 long int size = 0L;
810
811 mutex_enter(&bufcache_lock);
812 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) {
813 KASSERT(bp->b_iodone == NULL);
814 n++;
815 size += bp->b_bufsize;
816 #ifdef DIAGNOSTIC
817 if (n > nbuf)
818 panic("lfs_countlocked: this can't happen: more"
819 " buffers locked than exist");
820 #endif
821 }
822 /*
823 * Theoretically this function never really does anything.
824 * Give a warning if we have to fix the accounting.
825 */
826 if (n != *count) {
827 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
828 " from %d to %d\n", msg, *count, n));
829 }
830 if (size != *bytes) {
831 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
832 " from %ld to %ld\n", msg, *bytes, size));
833 }
834 *count = n;
835 *bytes = size;
836 mutex_exit(&bufcache_lock);
837 return;
838 }
839
840 int
841 lfs_wait_pages(void)
842 {
843 int active, inactive;
844
845 uvm_estimatepageable(&active, &inactive);
846 return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1);
847 }
848
849 int
850 lfs_max_pages(void)
851 {
852 int active, inactive;
853
854 uvm_estimatepageable(&active, &inactive);
855 return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1);
856 }
857