lfs_bio.c revision 1.134 1 /* $NetBSD: lfs_bio.c,v 1.134 2015/08/12 18:28:01 dholland Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * Copyright (c) 1991, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.134 2015/08/12 18:28:01 dholland Exp $");
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/proc.h>
68 #include <sys/buf.h>
69 #include <sys/vnode.h>
70 #include <sys/resourcevar.h>
71 #include <sys/mount.h>
72 #include <sys/kernel.h>
73 #include <sys/kauth.h>
74
75 #include <ufs/lfs/ulfs_inode.h>
76 #include <ufs/lfs/ulfsmount.h>
77 #include <ufs/lfs/ulfs_extern.h>
78
79 #include <ufs/lfs/lfs.h>
80 #include <ufs/lfs/lfs_accessors.h>
81 #include <ufs/lfs/lfs_extern.h>
82 #include <ufs/lfs/lfs_kernel.h>
83
84 #include <uvm/uvm.h>
85
86 /*
87 * LFS block write function.
88 *
89 * XXX
90 * No write cost accounting is done.
91 * This is almost certainly wrong for synchronous operations and NFS.
92 *
93 * protected by lfs_lock.
94 */
95 int locked_queue_count = 0; /* Count of locked-down buffers. */
96 long locked_queue_bytes = 0L; /* Total size of locked buffers. */
97 int lfs_subsys_pages = 0L; /* Total number LFS-written pages */
98 int lfs_fs_pagetrip = 0; /* # of pages to trip per-fs write */
99 int lfs_writing = 0; /* Set if already kicked off a writer
100 because of buffer space */
101 int locked_queue_waiters = 0; /* Number of processes waiting on lq */
102
103 /* Lock and condition variables for above. */
104 kcondvar_t locked_queue_cv;
105 kcondvar_t lfs_writing_cv;
106 kmutex_t lfs_lock;
107
108 extern int lfs_dostats;
109
110 /*
111 * reserved number/bytes of locked buffers
112 */
113 int locked_queue_rcount = 0;
114 long locked_queue_rbytes = 0L;
115
116 static int lfs_fits_buf(struct lfs *, int, int);
117 static int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
118 int, int);
119 static int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2,
120 int);
121
122 static int
123 lfs_fits_buf(struct lfs *fs, int n, int bytes)
124 {
125 int count_fit, bytes_fit;
126
127 ASSERT_NO_SEGLOCK(fs);
128 KASSERT(mutex_owned(&lfs_lock));
129
130 count_fit =
131 (locked_queue_count + locked_queue_rcount + n <= LFS_WAIT_BUFS);
132 bytes_fit =
133 (locked_queue_bytes + locked_queue_rbytes + bytes <= LFS_WAIT_BYTES);
134
135 #ifdef DEBUG
136 if (!count_fit) {
137 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
138 locked_queue_count, locked_queue_rcount,
139 n, LFS_WAIT_BUFS));
140 }
141 if (!bytes_fit) {
142 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
143 locked_queue_bytes, locked_queue_rbytes,
144 bytes, LFS_WAIT_BYTES));
145 }
146 #endif /* DEBUG */
147
148 return (count_fit && bytes_fit);
149 }
150
151 /* ARGSUSED */
152 static int
153 lfs_reservebuf(struct lfs *fs, struct vnode *vp,
154 struct vnode *vp2, int n, int bytes)
155 {
156 int cantwait;
157
158 ASSERT_MAYBE_SEGLOCK(fs);
159 KASSERT(locked_queue_rcount >= 0);
160 KASSERT(locked_queue_rbytes >= 0);
161
162 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
163 mutex_enter(&lfs_lock);
164 while (!cantwait && n > 0 && !lfs_fits_buf(fs, n, bytes)) {
165 int error;
166
167 lfs_flush(fs, 0, 0);
168
169 DLOG((DLOG_AVAIL, "lfs_reservebuf: waiting: count=%d, bytes=%ld\n",
170 locked_queue_count, locked_queue_bytes));
171 ++locked_queue_waiters;
172 error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
173 hz * LFS_BUFWAIT);
174 --locked_queue_waiters;
175 if (error && error != EWOULDBLOCK) {
176 mutex_exit(&lfs_lock);
177 return error;
178 }
179 }
180
181 locked_queue_rcount += n;
182 locked_queue_rbytes += bytes;
183
184 if (n < 0 && locked_queue_waiters > 0) {
185 DLOG((DLOG_AVAIL, "lfs_reservebuf: broadcast: count=%d, bytes=%ld\n",
186 locked_queue_count, locked_queue_bytes));
187 cv_broadcast(&locked_queue_cv);
188 }
189
190 mutex_exit(&lfs_lock);
191
192 KASSERT(locked_queue_rcount >= 0);
193 KASSERT(locked_queue_rbytes >= 0);
194
195 return 0;
196 }
197
198 /*
199 * Try to reserve some blocks, prior to performing a sensitive operation that
200 * requires the vnode lock to be honored. If there is not enough space, give
201 * up the vnode lock temporarily and wait for the space to become available.
202 *
203 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
204 *
205 * XXX YAMT - it isn't safe to unlock vp here
206 * because the node might be modified while we sleep.
207 * (eg. cached states like i_offset might be stale,
208 * the vnode might be truncated, etc..)
209 * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
210 * or rearrange vnodeop interface to leave vnode locking to file system
211 * specific code so that each file systems can have their own vnode locking and
212 * vnode re-using strategies.
213 */
214 static int
215 lfs_reserveavail(struct lfs *fs, struct vnode *vp,
216 struct vnode *vp2, int fsb)
217 {
218 CLEANERINFO *cip;
219 struct buf *bp;
220 int error, slept;
221 int cantwait;
222
223 ASSERT_MAYBE_SEGLOCK(fs);
224 slept = 0;
225 mutex_enter(&lfs_lock);
226 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
227 while (!cantwait && fsb > 0 &&
228 !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
229 mutex_exit(&lfs_lock);
230
231 if (!slept) {
232 DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %jd,"
233 " est_bfree = %jd)\n",
234 fsb + fs->lfs_ravail + fs->lfs_favail,
235 (intmax_t)lfs_sb_getbfree(fs),
236 (intmax_t)LFS_EST_BFREE(fs)));
237 }
238 ++slept;
239
240 /* Wake up the cleaner */
241 LFS_CLEANERINFO(cip, fs, bp);
242 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
243 lfs_wakeup_cleaner(fs);
244
245 mutex_enter(&lfs_lock);
246 /* Cleaner might have run while we were reading, check again */
247 if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
248 break;
249
250 error = mtsleep(&fs->lfs_availsleep, PCATCH | PUSER,
251 "lfs_reserve", 0, &lfs_lock);
252 if (error) {
253 mutex_exit(&lfs_lock);
254 return error;
255 }
256 }
257 #ifdef DEBUG
258 if (slept) {
259 DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
260 }
261 #endif
262 fs->lfs_ravail += fsb;
263 mutex_exit(&lfs_lock);
264
265 return 0;
266 }
267
268 #ifdef DIAGNOSTIC
269 int lfs_rescount;
270 int lfs_rescountdirop;
271 #endif
272
273 int
274 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
275 {
276 int error;
277
278 ASSERT_MAYBE_SEGLOCK(fs);
279 if (vp2) {
280 /* Make sure we're not in the process of reclaiming vp2 */
281 mutex_enter(&lfs_lock);
282 while(fs->lfs_flags & LFS_UNDIROP) {
283 mtsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
284 &lfs_lock);
285 }
286 mutex_exit(&lfs_lock);
287 }
288
289 KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
290 KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
291 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
292
293 #ifdef DIAGNOSTIC
294 mutex_enter(&lfs_lock);
295 if (fsb > 0)
296 lfs_rescount++;
297 else if (fsb < 0)
298 lfs_rescount--;
299 if (lfs_rescount < 0)
300 panic("lfs_rescount");
301 mutex_exit(&lfs_lock);
302 #endif
303
304 /*
305 * XXX
306 * vref vnodes here so that cleaner doesn't try to reuse them.
307 * (see XXX comment in lfs_reserveavail)
308 */
309 vhold(vp);
310 if (vp2 != NULL) {
311 vhold(vp2);
312 }
313
314 error = lfs_reserveavail(fs, vp, vp2, fsb);
315 if (error)
316 goto done;
317
318 /*
319 * XXX just a guess. should be more precise.
320 */
321 error = lfs_reservebuf(fs, vp, vp2, fsb, lfs_fsbtob(fs, fsb));
322 if (error)
323 lfs_reserveavail(fs, vp, vp2, -fsb);
324
325 done:
326 holdrele(vp);
327 if (vp2 != NULL) {
328 holdrele(vp2);
329 }
330
331 return error;
332 }
333
334 int
335 lfs_bwrite(void *v)
336 {
337 struct vop_bwrite_args /* {
338 struct vnode *a_vp;
339 struct buf *a_bp;
340 } */ *ap = v;
341 struct buf *bp = ap->a_bp;
342
343 #ifdef DIAGNOSTIC
344 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
345 panic("bawrite LFS buffer");
346 }
347 #endif /* DIAGNOSTIC */
348 return lfs_bwrite_ext(bp, 0);
349 }
350
351 /*
352 * Determine if there is enough room currently available to write fsb
353 * blocks. We need enough blocks for the new blocks, the current
354 * inode blocks (including potentially the ifile inode), a summary block,
355 * and the segment usage table, plus an ifile block.
356 */
357 int
358 lfs_fits(struct lfs *fs, int fsb)
359 {
360 int64_t needed;
361
362 ASSERT_NO_SEGLOCK(fs);
363 needed = fsb + lfs_btofsb(fs, lfs_sb_getsumsize(fs)) +
364 ((howmany(lfs_sb_getuinodes(fs) + 1, LFS_INOPB(fs)) +
365 lfs_sb_getsegtabsz(fs) +
366 1) << (lfs_sb_getbshift(fs) - lfs_sb_getffshift(fs)));
367
368 if (needed >= lfs_sb_getavail(fs)) {
369 #ifdef DEBUG
370 DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
371 "needed = %jd, avail = %jd\n",
372 (long)fsb, (long)lfs_sb_getuinodes(fs), (intmax_t)needed,
373 (intmax_t)lfs_sb_getavail(fs)));
374 #endif
375 return 0;
376 }
377 return 1;
378 }
379
380 int
381 lfs_availwait(struct lfs *fs, int fsb)
382 {
383 int error;
384 CLEANERINFO *cip;
385 struct buf *cbp;
386
387 ASSERT_NO_SEGLOCK(fs);
388 /* Push cleaner blocks through regardless */
389 mutex_enter(&lfs_lock);
390 if (LFS_SEGLOCK_HELD(fs) &&
391 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
392 mutex_exit(&lfs_lock);
393 return 0;
394 }
395 mutex_exit(&lfs_lock);
396
397 while (!lfs_fits(fs, fsb)) {
398 /*
399 * Out of space, need cleaner to run.
400 * Update the cleaner info, then wake it up.
401 * Note the cleanerinfo block is on the ifile
402 * so it CANT_WAIT.
403 */
404 LFS_CLEANERINFO(cip, fs, cbp);
405 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
406
407 #ifdef DEBUG
408 DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
409 "waiting on cleaner\n"));
410 #endif
411
412 lfs_wakeup_cleaner(fs);
413 #ifdef DIAGNOSTIC
414 if (LFS_SEGLOCK_HELD(fs))
415 panic("lfs_availwait: deadlock");
416 #endif
417 error = tsleep(&fs->lfs_availsleep, PCATCH | PUSER,
418 "cleaner", 0);
419 if (error)
420 return (error);
421 }
422 return 0;
423 }
424
425 int
426 lfs_bwrite_ext(struct buf *bp, int flags)
427 {
428 struct lfs *fs;
429 struct inode *ip;
430 struct vnode *vp;
431 int fsb;
432
433 vp = bp->b_vp;
434 fs = VFSTOULFS(vp->v_mount)->um_lfs;
435
436 ASSERT_MAYBE_SEGLOCK(fs);
437 KASSERT(bp->b_cflags & BC_BUSY);
438 KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
439 KASSERT(((bp->b_oflags | bp->b_flags) & (BO_DELWRI|B_LOCKED))
440 != BO_DELWRI);
441
442 /*
443 * Don't write *any* blocks if we're mounted read-only, or
444 * if we are "already unmounted".
445 *
446 * In particular the cleaner can't write blocks either.
447 */
448 if (fs->lfs_ronly || (lfs_sb_getpflags(fs) & LFS_PF_CLEAN)) {
449 bp->b_oflags &= ~BO_DELWRI;
450 bp->b_flags |= B_READ; /* XXX is this right? --ks */
451 bp->b_error = 0;
452 mutex_enter(&bufcache_lock);
453 LFS_UNLOCK_BUF(bp);
454 if (LFS_IS_MALLOC_BUF(bp))
455 bp->b_cflags &= ~BC_BUSY;
456 else
457 brelsel(bp, 0);
458 mutex_exit(&bufcache_lock);
459 return (fs->lfs_ronly ? EROFS : 0);
460 }
461
462 /*
463 * Set the delayed write flag and use reassignbuf to move the buffer
464 * from the clean list to the dirty one.
465 *
466 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
467 * the buffer onto the LOCKED free list. This is necessary, otherwise
468 * getnewbuf() would try to reclaim the buffers using bawrite, which
469 * isn't going to work.
470 *
471 * XXX we don't let meta-data writes run out of space because they can
472 * come from the segment writer. We need to make sure that there is
473 * enough space reserved so that there's room to write meta-data
474 * blocks.
475 */
476 if ((bp->b_flags & B_LOCKED) == 0) {
477 fsb = lfs_numfrags(fs, bp->b_bcount);
478
479 ip = VTOI(vp);
480 mutex_enter(&lfs_lock);
481 if (flags & BW_CLEAN) {
482 LFS_SET_UINO(ip, IN_CLEANING);
483 } else {
484 LFS_SET_UINO(ip, IN_MODIFIED);
485 }
486 mutex_exit(&lfs_lock);
487 lfs_sb_subavail(fs, fsb);
488
489 mutex_enter(&bufcache_lock);
490 mutex_enter(vp->v_interlock);
491 bp->b_oflags = (bp->b_oflags | BO_DELWRI) & ~BO_DONE;
492 LFS_LOCK_BUF(bp);
493 bp->b_flags &= ~B_READ;
494 bp->b_error = 0;
495 reassignbuf(bp, bp->b_vp);
496 mutex_exit(vp->v_interlock);
497 } else {
498 mutex_enter(&bufcache_lock);
499 }
500
501 if (bp->b_iodone != NULL)
502 bp->b_cflags &= ~BC_BUSY;
503 else
504 brelsel(bp, 0);
505 mutex_exit(&bufcache_lock);
506
507 return (0);
508 }
509
510 /*
511 * Called and return with the lfs_lock held.
512 */
513 void
514 lfs_flush_fs(struct lfs *fs, int flags)
515 {
516 ASSERT_NO_SEGLOCK(fs);
517 KASSERT(mutex_owned(&lfs_lock));
518 if (fs->lfs_ronly)
519 return;
520
521 if (lfs_dostats)
522 ++lfs_stats.flush_invoked;
523
524 fs->lfs_pdflush = 0;
525 mutex_exit(&lfs_lock);
526 lfs_writer_enter(fs, "fldirop");
527 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
528 lfs_writer_leave(fs);
529 mutex_enter(&lfs_lock);
530 fs->lfs_favail = 0; /* XXX */
531 }
532
533 /*
534 * This routine initiates segment writes when LFS is consuming too many
535 * resources. Ideally the pageout daemon would be able to direct LFS
536 * more subtly.
537 * XXX We have one static count of locked buffers;
538 * XXX need to think more about the multiple filesystem case.
539 *
540 * Called and return with lfs_lock held.
541 * If fs != NULL, we hold the segment lock for fs.
542 */
543 void
544 lfs_flush(struct lfs *fs, int flags, int only_onefs)
545 {
546 extern u_int64_t locked_fakequeue_count;
547 struct mount *mp, *nmp;
548 struct lfs *tfs;
549
550 KASSERT(mutex_owned(&lfs_lock));
551 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
552
553 if (lfs_dostats)
554 ++lfs_stats.write_exceeded;
555 /* XXX should we include SEGM_CKP here? */
556 if (lfs_writing && !(flags & SEGM_SYNC)) {
557 DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
558 return;
559 }
560 while (lfs_writing)
561 cv_wait(&lfs_writing_cv, &lfs_lock);
562 lfs_writing = 1;
563
564 mutex_exit(&lfs_lock);
565
566 if (only_onefs) {
567 KASSERT(fs != NULL);
568 if (vfs_busy(fs->lfs_ivnode->v_mount, NULL))
569 goto errout;
570 mutex_enter(&lfs_lock);
571 lfs_flush_fs(fs, flags);
572 mutex_exit(&lfs_lock);
573 vfs_unbusy(fs->lfs_ivnode->v_mount, false, NULL);
574 } else {
575 locked_fakequeue_count = 0;
576 mutex_enter(&mountlist_lock);
577 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
578 if (vfs_busy(mp, &nmp)) {
579 DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
580 continue;
581 }
582 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
583 sizeof(mp->mnt_stat.f_fstypename)) == 0) {
584 tfs = VFSTOULFS(mp)->um_lfs;
585 mutex_enter(&lfs_lock);
586 lfs_flush_fs(tfs, flags);
587 mutex_exit(&lfs_lock);
588 }
589 vfs_unbusy(mp, false, &nmp);
590 }
591 mutex_exit(&mountlist_lock);
592 }
593 LFS_DEBUG_COUNTLOCKED("flush");
594 wakeup(&lfs_subsys_pages);
595
596 errout:
597 mutex_enter(&lfs_lock);
598 KASSERT(lfs_writing);
599 lfs_writing = 0;
600 wakeup(&lfs_writing);
601 }
602
603 #define INOCOUNT(fs) howmany(lfs_sb_getuinodes(fs), LFS_INOPB(fs))
604 #define INOBYTES(fs) (lfs_sb_getuinodes(fs) * DINOSIZE(fs))
605
606 /*
607 * make sure that we don't have too many locked buffers.
608 * flush buffers if needed.
609 */
610 int
611 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
612 {
613 int error;
614 struct lfs *fs;
615 struct inode *ip;
616 extern pid_t lfs_writer_daemon;
617
618 error = 0;
619 ip = VTOI(vp);
620
621 /* If out of buffers, wait on writer */
622 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
623 if (ip->i_number == LFS_IFILE_INUM)
624 return 0;
625 /* If we're being called from inside a dirop, don't sleep */
626 if (ip->i_flag & IN_ADIROP)
627 return 0;
628
629 fs = ip->i_lfs;
630
631 ASSERT_NO_SEGLOCK(fs);
632
633 /*
634 * If we would flush below, but dirops are active, sleep.
635 * Note that a dirop cannot ever reach this code!
636 */
637 mutex_enter(&lfs_lock);
638 while (fs->lfs_dirops > 0 &&
639 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
640 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
641 lfs_subsys_pages > LFS_MAX_PAGES ||
642 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
643 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
644 {
645 ++fs->lfs_diropwait;
646 mtsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
647 &lfs_lock);
648 --fs->lfs_diropwait;
649 }
650
651 #ifdef DEBUG
652 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
653 DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
654 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
655 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
656 DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
657 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
658 if (lfs_subsys_pages > LFS_MAX_PAGES)
659 DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
660 lfs_subsys_pages, LFS_MAX_PAGES));
661 if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
662 DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
663 fs->lfs_pages, lfs_fs_pagetrip));
664 if (lfs_dirvcount > LFS_MAX_DIROP)
665 DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
666 lfs_dirvcount, LFS_MAX_DIROP));
667 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
668 DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n",
669 fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
670 if (fs->lfs_diropwait > 0)
671 DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
672 fs->lfs_diropwait));
673 #endif
674
675 /* If there are too many pending dirops, we have to flush them. */
676 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
677 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
678 mutex_exit(&lfs_lock);
679 lfs_flush_dirops(fs);
680 mutex_enter(&lfs_lock);
681 } else if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
682 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
683 lfs_subsys_pages > LFS_MAX_PAGES ||
684 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
685 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
686 lfs_flush(fs, flags, 0);
687 } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
688 /*
689 * If we didn't flush the whole thing, some filesystems
690 * still might want to be flushed.
691 */
692 ++fs->lfs_pdflush;
693 wakeup(&lfs_writer_daemon);
694 }
695
696 while (locked_queue_count + INOCOUNT(fs) >= LFS_WAIT_BUFS ||
697 locked_queue_bytes + INOBYTES(fs) >= LFS_WAIT_BYTES ||
698 lfs_subsys_pages > LFS_WAIT_PAGES ||
699 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
700 lfs_dirvcount > LFS_MAX_DIROP) {
701
702 if (lfs_dostats)
703 ++lfs_stats.wait_exceeded;
704 DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
705 locked_queue_count, locked_queue_bytes));
706 ++locked_queue_waiters;
707 error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
708 hz * LFS_BUFWAIT);
709 --locked_queue_waiters;
710 if (error != EWOULDBLOCK)
711 break;
712
713 /*
714 * lfs_flush might not flush all the buffers, if some of the
715 * inodes were locked or if most of them were Ifile blocks
716 * and we weren't asked to checkpoint. Try flushing again
717 * to keep us from blocking indefinitely.
718 */
719 if (locked_queue_count + INOCOUNT(fs) >= LFS_MAX_BUFS ||
720 locked_queue_bytes + INOBYTES(fs) >= LFS_MAX_BYTES) {
721 lfs_flush(fs, flags | SEGM_CKP, 0);
722 }
723 }
724 mutex_exit(&lfs_lock);
725 return (error);
726 }
727
728 /*
729 * Allocate a new buffer header.
730 */
731 struct buf *
732 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
733 {
734 struct buf *bp;
735 size_t nbytes;
736
737 ASSERT_MAYBE_SEGLOCK(fs);
738 nbytes = roundup(size, lfs_fsbtob(fs, 1));
739
740 bp = getiobuf(NULL, true);
741 if (nbytes) {
742 bp->b_data = lfs_malloc(fs, nbytes, type);
743 /* memset(bp->b_data, 0, nbytes); */
744 }
745 #ifdef DIAGNOSTIC
746 if (vp == NULL)
747 panic("vp is NULL in lfs_newbuf");
748 if (bp == NULL)
749 panic("bp is NULL after malloc in lfs_newbuf");
750 #endif
751
752 bp->b_bufsize = size;
753 bp->b_bcount = size;
754 bp->b_lblkno = daddr;
755 bp->b_blkno = daddr;
756 bp->b_error = 0;
757 bp->b_resid = 0;
758 bp->b_iodone = lfs_callback;
759 bp->b_cflags = BC_BUSY | BC_NOCACHE;
760 bp->b_private = fs;
761
762 mutex_enter(&bufcache_lock);
763 mutex_enter(vp->v_interlock);
764 bgetvp(vp, bp);
765 mutex_exit(vp->v_interlock);
766 mutex_exit(&bufcache_lock);
767
768 return (bp);
769 }
770
771 void
772 lfs_freebuf(struct lfs *fs, struct buf *bp)
773 {
774 struct vnode *vp;
775
776 if ((vp = bp->b_vp) != NULL) {
777 mutex_enter(&bufcache_lock);
778 mutex_enter(vp->v_interlock);
779 brelvp(bp);
780 mutex_exit(vp->v_interlock);
781 mutex_exit(&bufcache_lock);
782 }
783 if (!(bp->b_cflags & BC_INVAL)) { /* BC_INVAL indicates a "fake" buffer */
784 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
785 bp->b_data = NULL;
786 }
787 putiobuf(bp);
788 }
789
790 /*
791 * Count buffers on the "locked" queue, and compare it to a pro-forma count.
792 * Don't count malloced buffers, since they don't detract from the total.
793 */
794 void
795 lfs_countlocked(int *count, long *bytes, const char *msg)
796 {
797 struct buf *bp;
798 int n = 0;
799 long int size = 0L;
800
801 mutex_enter(&bufcache_lock);
802 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) {
803 KASSERT(bp->b_iodone == NULL);
804 n++;
805 size += bp->b_bufsize;
806 #ifdef DIAGNOSTIC
807 if (n > nbuf)
808 panic("lfs_countlocked: this can't happen: more"
809 " buffers locked than exist");
810 #endif
811 }
812 /*
813 * Theoretically this function never really does anything.
814 * Give a warning if we have to fix the accounting.
815 */
816 if (n != *count) {
817 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
818 " from %d to %d\n", msg, *count, n));
819 }
820 if (size != *bytes) {
821 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
822 " from %ld to %ld\n", msg, *bytes, size));
823 }
824 *count = n;
825 *bytes = size;
826 mutex_exit(&bufcache_lock);
827 return;
828 }
829
830 int
831 lfs_wait_pages(void)
832 {
833 int active, inactive;
834
835 uvm_estimatepageable(&active, &inactive);
836 return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1);
837 }
838
839 int
840 lfs_max_pages(void)
841 {
842 int active, inactive;
843
844 uvm_estimatepageable(&active, &inactive);
845 return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1);
846 }
847