lfs_bio.c revision 1.80 1 /* $NetBSD: lfs_bio.c,v 1.80 2005/03/08 00:18:19 perseant Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.80 2005/03/08 00:18:19 perseant Exp $");
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/buf.h>
76 #include <sys/vnode.h>
77 #include <sys/resourcevar.h>
78 #include <sys/mount.h>
79 #include <sys/kernel.h>
80
81 #include <ufs/ufs/inode.h>
82 #include <ufs/ufs/ufsmount.h>
83 #include <ufs/ufs/ufs_extern.h>
84
85 #include <ufs/lfs/lfs.h>
86 #include <ufs/lfs/lfs_extern.h>
87
88 #include <uvm/uvm.h>
89
90 /* Macros to clear/set/test flags. */
91 # define SET(t, f) (t) |= (f)
92 # define CLR(t, f) (t) &= ~(f)
93 # define ISSET(t, f) ((t) & (f))
94
95 /*
96 * LFS block write function.
97 *
98 * XXX
99 * No write cost accounting is done.
100 * This is almost certainly wrong for synchronous operations and NFS.
101 *
102 * protected by lfs_subsys_lock.
103 */
104 int locked_queue_count = 0; /* Count of locked-down buffers. */
105 long locked_queue_bytes = 0L; /* Total size of locked buffers. */
106 int lfs_subsys_pages = 0L; /* Total number LFS-written pages */
107 int lfs_fs_pagetrip = 0; /* # of pages to trip per-fs write */
108 int lfs_writing = 0; /* Set if already kicked off a writer
109 because of buffer space */
110 /* Lock for aboves */
111 struct simplelock lfs_subsys_lock = SIMPLELOCK_INITIALIZER;
112
113 extern int lfs_dostats;
114
115 /*
116 * reserved number/bytes of locked buffers
117 */
118 int locked_queue_rcount = 0;
119 long locked_queue_rbytes = 0L;
120
121 int lfs_fits_buf(struct lfs *, int, int);
122 int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
123 int, int);
124 int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2, int);
125
126 int
127 lfs_fits_buf(struct lfs *fs, int n, int bytes)
128 {
129 int count_fit, bytes_fit;
130
131 LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
132
133 count_fit =
134 (locked_queue_count + locked_queue_rcount + n < LFS_WAIT_BUFS);
135 bytes_fit =
136 (locked_queue_bytes + locked_queue_rbytes + bytes < LFS_WAIT_BYTES);
137
138 #ifdef DEBUG
139 if (!count_fit) {
140 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
141 locked_queue_count, locked_queue_rcount,
142 n, LFS_WAIT_BUFS));
143 }
144 if (!bytes_fit) {
145 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
146 locked_queue_bytes, locked_queue_rbytes,
147 bytes, LFS_WAIT_BYTES));
148 }
149 #endif /* DEBUG */
150
151 return (count_fit && bytes_fit);
152 }
153
154 /* ARGSUSED */
155 int
156 lfs_reservebuf(struct lfs *fs, struct vnode *vp, struct vnode *vp2,
157 int n, int bytes)
158 {
159 KASSERT(locked_queue_rcount >= 0);
160 KASSERT(locked_queue_rbytes >= 0);
161
162 simple_lock(&lfs_subsys_lock);
163 while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
164 int error;
165
166 lfs_flush(fs, 0, 0);
167
168 error = ltsleep(&locked_queue_count, PCATCH | PUSER,
169 "lfsresbuf", hz * LFS_BUFWAIT, &lfs_subsys_lock);
170 if (error && error != EWOULDBLOCK) {
171 simple_unlock(&lfs_subsys_lock);
172 return error;
173 }
174 }
175
176 locked_queue_rcount += n;
177 locked_queue_rbytes += bytes;
178
179 simple_unlock(&lfs_subsys_lock);
180
181 KASSERT(locked_queue_rcount >= 0);
182 KASSERT(locked_queue_rbytes >= 0);
183
184 return 0;
185 }
186
187 /*
188 * Try to reserve some blocks, prior to performing a sensitive operation that
189 * requires the vnode lock to be honored. If there is not enough space, give
190 * up the vnode lock temporarily and wait for the space to become available.
191 *
192 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
193 *
194 * XXX YAMT - it isn't safe to unlock vp here
195 * because the node might be modified while we sleep.
196 * (eg. cached states like i_offset might be stale,
197 * the vnode might be truncated, etc..)
198 * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
199 * or rearrange vnodeop interface to leave vnode locking to file system
200 * specific code so that each file systems can have their own vnode locking and
201 * vnode re-using strategies.
202 */
203 int
204 lfs_reserveavail(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
205 {
206 CLEANERINFO *cip;
207 struct buf *bp;
208 int error, slept;
209
210 slept = 0;
211 while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
212 #if 0
213 /*
214 * XXX ideally, we should unlock vnodes here
215 * because we might sleep very long time.
216 */
217 VOP_UNLOCK(vp, 0);
218 if (vp2 != NULL) {
219 VOP_UNLOCK(vp2, 0);
220 }
221 #else
222 /*
223 * XXX since we'll sleep for cleaner with vnode lock holding,
224 * deadlock will occur if cleaner tries to lock the vnode.
225 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
226 */
227 #endif
228
229 #ifdef DEBUG
230 if (!slept) {
231 DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
232 " est_bfree = %d)\n",
233 fsb + fs->lfs_ravail + fs->lfs_favail,
234 fs->lfs_bfree, LFS_EST_BFREE(fs)));
235 }
236 #endif
237 ++slept;
238
239 /* Wake up the cleaner */
240 LFS_CLEANERINFO(cip, fs, bp);
241 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
242 wakeup(&lfs_allclean_wakeup);
243 wakeup(&fs->lfs_nextseg);
244
245 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
246 0);
247 #if 0
248 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
249 vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
250 #endif
251 if (error)
252 return error;
253 }
254 #ifdef DEBUG
255 if (slept) {
256 DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
257 }
258 #endif
259 fs->lfs_ravail += fsb;
260
261 return 0;
262 }
263
264 #ifdef DIAGNOSTIC
265 int lfs_rescount;
266 int lfs_rescountdirop;
267 #endif
268
269 int
270 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
271 {
272 int error;
273 int cantwait;
274
275 KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
276 KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
277 KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
278 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
279
280 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
281 #ifdef DIAGNOSTIC
282 if (cantwait) {
283 if (fsb > 0)
284 lfs_rescountdirop++;
285 else if (fsb < 0)
286 lfs_rescountdirop--;
287 if (lfs_rescountdirop < 0)
288 panic("lfs_rescountdirop");
289 }
290 else {
291 if (fsb > 0)
292 lfs_rescount++;
293 else if (fsb < 0)
294 lfs_rescount--;
295 if (lfs_rescount < 0)
296 panic("lfs_rescount");
297 }
298 #endif
299 if (cantwait)
300 return 0;
301
302 /*
303 * XXX
304 * vref vnodes here so that cleaner doesn't try to reuse them.
305 * (see XXX comment in lfs_reserveavail)
306 */
307 lfs_vref(vp);
308 if (vp2 != NULL) {
309 lfs_vref(vp2);
310 }
311
312 error = lfs_reserveavail(fs, vp, vp2, fsb);
313 if (error)
314 goto done;
315
316 /*
317 * XXX just a guess. should be more precise.
318 */
319 error = lfs_reservebuf(fs, vp, vp2,
320 fragstoblks(fs, fsb), fsbtob(fs, fsb));
321 if (error)
322 lfs_reserveavail(fs, vp, vp2, -fsb);
323
324 done:
325 lfs_vunref(vp);
326 if (vp2 != NULL) {
327 lfs_vunref(vp2);
328 }
329
330 return error;
331 }
332
333 int
334 lfs_bwrite(void *v)
335 {
336 struct vop_bwrite_args /* {
337 struct buf *a_bp;
338 } */ *ap = v;
339 struct buf *bp = ap->a_bp;
340
341 #ifdef DIAGNOSTIC
342 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
343 panic("bawrite LFS buffer");
344 }
345 #endif /* DIAGNOSTIC */
346 return lfs_bwrite_ext(bp,0);
347 }
348
349 /*
350 * Determine if there is enough room currently available to write fsb
351 * blocks. We need enough blocks for the new blocks, the current
352 * inode blocks (including potentially the ifile inode), a summary block,
353 * and the segment usage table, plus an ifile block.
354 */
355 int
356 lfs_fits(struct lfs *fs, int fsb)
357 {
358 int needed;
359
360 needed = fsb + btofsb(fs, fs->lfs_sumsize) +
361 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
362 1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
363
364 if (needed >= fs->lfs_avail) {
365 #ifdef DEBUG
366 DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
367 "needed = %ld, avail = %ld\n",
368 (long)fsb, (long)fs->lfs_uinodes, (long)needed,
369 (long)fs->lfs_avail));
370 #endif
371 return 0;
372 }
373 return 1;
374 }
375
376 int
377 lfs_availwait(struct lfs *fs, int fsb)
378 {
379 int error;
380 CLEANERINFO *cip;
381 struct buf *cbp;
382
383 /* Push cleaner blocks through regardless */
384 simple_lock(&fs->lfs_interlock);
385 if (fs->lfs_seglock &&
386 fs->lfs_lockpid == curproc->p_pid &&
387 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
388 simple_unlock(&fs->lfs_interlock);
389 return 0;
390 }
391 simple_unlock(&fs->lfs_interlock);
392
393 while (!lfs_fits(fs, fsb)) {
394 /*
395 * Out of space, need cleaner to run.
396 * Update the cleaner info, then wake it up.
397 * Note the cleanerinfo block is on the ifile
398 * so it CANT_WAIT.
399 */
400 LFS_CLEANERINFO(cip, fs, cbp);
401 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
402
403 #ifdef DEBUG
404 DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
405 "waiting on cleaner\n"));
406 #endif
407
408 wakeup(&lfs_allclean_wakeup);
409 wakeup(&fs->lfs_nextseg);
410 #ifdef DIAGNOSTIC
411 if (fs->lfs_seglock && fs->lfs_lockpid == curproc->p_pid)
412 panic("lfs_availwait: deadlock");
413 #endif
414 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
415 if (error)
416 return (error);
417 }
418 return 0;
419 }
420
421 int
422 lfs_bwrite_ext(struct buf *bp, int flags)
423 {
424 struct lfs *fs;
425 struct inode *ip;
426 int fsb, s;
427
428 KASSERT(bp->b_flags & B_BUSY);
429 KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
430 KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_DELWRI);
431 KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_LOCKED);
432
433 /*
434 * Don't write *any* blocks if we're mounted read-only.
435 * In particular the cleaner can't write blocks either.
436 */
437 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly) {
438 bp->b_flags &= ~(B_DELWRI | B_READ | B_ERROR);
439 LFS_UNLOCK_BUF(bp);
440 if (LFS_IS_MALLOC_BUF(bp))
441 bp->b_flags &= ~B_BUSY;
442 else
443 brelse(bp);
444 return EROFS;
445 }
446
447 /*
448 * Set the delayed write flag and use reassignbuf to move the buffer
449 * from the clean list to the dirty one.
450 *
451 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
452 * the buffer onto the LOCKED free list. This is necessary, otherwise
453 * getnewbuf() would try to reclaim the buffers using bawrite, which
454 * isn't going to work.
455 *
456 * XXX we don't let meta-data writes run out of space because they can
457 * come from the segment writer. We need to make sure that there is
458 * enough space reserved so that there's room to write meta-data
459 * blocks.
460 */
461 if (!(bp->b_flags & B_LOCKED)) {
462 fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
463 fsb = fragstofsb(fs, numfrags(fs, bp->b_bcount));
464
465 ip = VTOI(bp->b_vp);
466 if (flags & BW_CLEAN) {
467 LFS_SET_UINO(ip, IN_CLEANING);
468 } else {
469 LFS_SET_UINO(ip, IN_MODIFIED);
470 }
471 fs->lfs_avail -= fsb;
472 bp->b_flags |= B_DELWRI;
473
474 LFS_LOCK_BUF(bp);
475 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
476 s = splbio();
477 reassignbuf(bp, bp->b_vp);
478 splx(s);
479 }
480
481 if (bp->b_flags & B_CALL)
482 bp->b_flags &= ~B_BUSY;
483 else
484 brelse(bp);
485
486 return (0);
487 }
488
489 void
490 lfs_flush_fs(struct lfs *fs, int flags)
491 {
492 if (fs->lfs_ronly)
493 return;
494
495 lfs_subsys_pages -= fs->lfs_pages; /* XXXUBC */
496 if (lfs_subsys_pages < 0) /* XXXUBC */
497 lfs_subsys_pages = 0; /* XXXUBC */
498 fs->lfs_pages = 0; /* XXXUBC need a better way to count this */
499
500 lfs_writer_enter(fs, "fldirop");
501
502 if (lfs_dostats)
503 ++lfs_stats.flush_invoked;
504 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
505 fs->lfs_favail = 0; /* XXX */
506
507 lfs_writer_leave(fs);
508 }
509
510 /*
511 * XXX
512 * This routine flushes buffers out of the B_LOCKED queue when LFS has too
513 * many locked down. Eventually the pageout daemon will simply call LFS
514 * when pages need to be reclaimed. Note, we have one static count of locked
515 * buffers, so we can't have more than a single file system. To make this
516 * work for multiple file systems, put the count into the mount structure.
517 *
518 * called and return with lfs_subsys_lock held.
519 */
520 void
521 lfs_flush(struct lfs *fs, int flags, int only_onefs)
522 {
523 extern u_int64_t locked_fakequeue_count;
524 struct mount *mp, *nmp;
525
526 LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
527 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
528
529 if (lfs_dostats)
530 ++lfs_stats.write_exceeded;
531 if (lfs_writing && flags == 0) {/* XXX flags */
532 DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
533 return;
534 }
535 while (lfs_writing && (flags & SEGM_WRITERD))
536 ltsleep(&lfs_writing, PRIBIO + 1, "lfsflush", 0,
537 &lfs_subsys_lock);
538 lfs_writing = 1;
539
540 simple_unlock(&lfs_subsys_lock);
541
542 if (only_onefs) {
543 if (vfs_busy(fs->lfs_ivnode->v_mount, LK_NOWAIT, &mountlist_slock))
544 goto errout;
545 lfs_flush_fs(fs, flags);
546 vfs_unbusy(fs->lfs_ivnode->v_mount);
547 } else {
548 locked_fakequeue_count = 0;
549 simple_lock(&mountlist_slock);
550 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
551 mp = nmp) {
552 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
553 DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
554 nmp = CIRCLEQ_NEXT(mp, mnt_list);
555 continue;
556 }
557 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
558 MFSNAMELEN) == 0)
559 lfs_flush_fs(VFSTOUFS(mp)->um_lfs, flags);
560 simple_lock(&mountlist_slock);
561 nmp = CIRCLEQ_NEXT(mp, mnt_list);
562 vfs_unbusy(mp);
563 }
564 simple_unlock(&mountlist_slock);
565 }
566 LFS_DEBUG_COUNTLOCKED("flush");
567 wakeup(&lfs_subsys_pages);
568
569 errout:
570 simple_lock(&lfs_subsys_lock);
571 KASSERT(lfs_writing);
572 lfs_writing = 0;
573 wakeup(&lfs_writing);
574 }
575
576 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
577 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
578
579 /*
580 * make sure that we don't have too many locked buffers.
581 * flush buffers if needed.
582 */
583 int
584 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
585 {
586 int error;
587 struct lfs *fs;
588 struct inode *ip;
589 extern pid_t lfs_writer_daemon;
590
591 error = 0;
592 ip = VTOI(vp);
593
594 /* If out of buffers, wait on writer */
595 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
596 if (ip->i_number == LFS_IFILE_INUM)
597 return 0;
598 /* If we're being called from inside a dirop, don't sleep */
599 if (ip->i_flag & IN_ADIROP)
600 return 0;
601
602 fs = ip->i_lfs;
603
604 /*
605 * If we would flush below, but dirops are active, sleep.
606 * Note that a dirop cannot ever reach this code!
607 */
608 simple_lock(&lfs_subsys_lock);
609 while (fs->lfs_dirops > 0 &&
610 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
611 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
612 lfs_subsys_pages > LFS_MAX_PAGES ||
613 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
614 {
615 ++fs->lfs_diropwait;
616 ltsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
617 &lfs_subsys_lock);
618 --fs->lfs_diropwait;
619 }
620
621 #ifdef DEBUG
622 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
623 DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
624 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
625 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
626 DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
627 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
628 if (lfs_subsys_pages > LFS_MAX_PAGES)
629 DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n", lfs_subsys_pages, LFS_MAX_PAGES));
630 if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
631 DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n", fs->lfs_pages, lfs_fs_pagetrip));
632 if (lfs_dirvcount > LFS_MAX_DIROP)
633 DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n", lfs_dirvcount, LFS_MAX_DIROP));
634 if (fs->lfs_diropwait > 0)
635 DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n", fs->lfs_diropwait));
636 #endif
637
638 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
639 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
640 lfs_subsys_pages > LFS_MAX_PAGES ||
641 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
642 lfs_flush(fs, flags, 0);
643 } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
644 /*
645 * If we didn't flush the whole thing, some filesystems
646 * still might want to be flushed.
647 */
648 ++fs->lfs_pdflush;
649 wakeup(&lfs_writer_daemon);
650 }
651
652 while (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS ||
653 locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES ||
654 lfs_subsys_pages > LFS_WAIT_PAGES ||
655 lfs_dirvcount > LFS_MAX_DIROP) {
656 simple_unlock(&lfs_subsys_lock);
657 if (lfs_dostats)
658 ++lfs_stats.wait_exceeded;
659 DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
660 locked_queue_count, locked_queue_bytes));
661 error = tsleep(&locked_queue_count, PCATCH | PUSER,
662 "buffers", hz * LFS_BUFWAIT);
663 if (error != EWOULDBLOCK) {
664 simple_lock(&lfs_subsys_lock);
665 break;
666 }
667 /*
668 * lfs_flush might not flush all the buffers, if some of the
669 * inodes were locked or if most of them were Ifile blocks
670 * and we weren't asked to checkpoint. Try flushing again
671 * to keep us from blocking indefinitely.
672 */
673 simple_lock(&lfs_subsys_lock);
674 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
675 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES) {
676 lfs_flush(fs, flags | SEGM_CKP, 0);
677 }
678 }
679 simple_unlock(&lfs_subsys_lock);
680 return (error);
681 }
682
683 /*
684 * Allocate a new buffer header.
685 */
686 struct buf *
687 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
688 {
689 struct buf *bp;
690 size_t nbytes;
691 int s;
692
693 nbytes = roundup(size, fsbtob(fs, 1));
694
695 s = splbio();
696 bp = pool_get(&bufpool, PR_WAITOK);
697 splx(s);
698 memset(bp, 0, sizeof(struct buf));
699 BUF_INIT(bp);
700 if (nbytes) {
701 bp->b_data = lfs_malloc(fs, nbytes, type);
702 /* memset(bp->b_data, 0, nbytes); */
703 }
704 #ifdef DIAGNOSTIC
705 if (vp == NULL)
706 panic("vp is NULL in lfs_newbuf");
707 if (bp == NULL)
708 panic("bp is NULL after malloc in lfs_newbuf");
709 #endif
710 s = splbio();
711 bgetvp(vp, bp);
712 splx(s);
713
714 bp->b_bufsize = size;
715 bp->b_bcount = size;
716 bp->b_lblkno = daddr;
717 bp->b_blkno = daddr;
718 bp->b_error = 0;
719 bp->b_resid = 0;
720 bp->b_iodone = lfs_callback;
721 bp->b_flags |= B_BUSY | B_CALL | B_NOCACHE;
722 bp->b_private = fs;
723
724 return (bp);
725 }
726
727 void
728 lfs_freebuf(struct lfs *fs, struct buf *bp)
729 {
730 int s;
731
732 s = splbio();
733 if (bp->b_vp)
734 brelvp(bp);
735 if (!(bp->b_flags & B_INVAL)) { /* B_INVAL indicates a "fake" buffer */
736 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
737 bp->b_data = NULL;
738 }
739 pool_put(&bufpool, bp);
740 splx(s);
741 }
742
743 /*
744 * Definitions for the buffer free lists.
745 */
746 #define BQUEUES 4 /* number of free buffer queues */
747
748 #define BQ_LOCKED 0 /* super-blocks &c */
749 #define BQ_LRU 1 /* lru, useful buffers */
750 #define BQ_AGE 2 /* rubbish */
751 #define BQ_EMPTY 3 /* buffer headers with no memory */
752
753 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
754 extern struct simplelock bqueue_slock;
755
756 /*
757 * Count buffers on the "locked" queue, and compare it to a pro-forma count.
758 * Don't count malloced buffers, since they don't detract from the total.
759 */
760 void
761 lfs_countlocked(int *count, long *bytes, char *msg)
762 {
763 struct buf *bp;
764 int n = 0;
765 long int size = 0L;
766 int s;
767
768 s = splbio();
769 simple_lock(&bqueue_slock);
770 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist) {
771 KASSERT(!(bp->b_flags & B_CALL));
772 n++;
773 size += bp->b_bufsize;
774 #ifdef DIAGNOSTIC
775 if (n > nbuf)
776 panic("lfs_countlocked: this can't happen: more"
777 " buffers locked than exist");
778 #endif
779 }
780 /*
781 * Theoretically this function never really does anything.
782 * Give a warning if we have to fix the accounting.
783 */
784 if (n != *count)
785 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
786 " from %d to %d\n", msg, *count, n));
787 if (size != *bytes)
788 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
789 " from %ld to %ld\n", msg, *bytes, size));
790 *count = n;
791 *bytes = size;
792 simple_unlock(&bqueue_slock);
793 splx(s);
794 return;
795 }
796