vfs_bio.c revision 1.100 1 /* $NetBSD: vfs_bio.c,v 1.100 2003/12/30 12:33:23 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
37 */
38
39 /*-
40 * Copyright (c) 1994 Christopher G. Demetriou
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
71 */
72
73 /*
74 * Some references:
75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76 * Leffler, et al.: The Design and Implementation of the 4.3BSD
77 * UNIX Operating System (Addison Welley, 1989)
78 */
79
80 #include "opt_bufcache.h"
81 #include "opt_softdep.h"
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.100 2003/12/30 12:33:23 pk Exp $");
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/buf.h>
91 #include <sys/vnode.h>
92 #include <sys/mount.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sysctl.h>
96 #include <sys/conf.h>
97
98 #include <uvm/uvm.h>
99
100 #include <miscfs/specfs/specdev.h>
101
102 #ifndef BUFPAGES
103 # define BUFPAGES 0
104 #endif
105
106 #ifdef BUFCACHE
107 # if (BUFCACHE < 5) || (BUFCACHE > 95)
108 # error BUFCACHE is not between 5 and 95
109 # endif
110 #else
111 # define BUFCACHE 30
112 #endif
113
114 u_int nbuf; /* XXX - for softdep_lockedbufs */
115 u_int bufpages = BUFPAGES; /* optional hardwired count */
116 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */
117
118
119 /* Macros to clear/set/test flags. */
120 #define SET(t, f) (t) |= (f)
121 #define CLR(t, f) (t) &= ~(f)
122 #define ISSET(t, f) ((t) & (f))
123
124 /*
125 * Definitions for the buffer hash lists.
126 */
127 #define BUFHASH(dvp, lbn) \
128 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
129 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
130 u_long bufhash;
131 #ifndef SOFTDEP
132 struct bio_ops bioops; /* I/O operation notification */
133 #endif
134
135 /*
136 * Insq/Remq for the buffer hash lists.
137 */
138 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
139 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
140
141 /*
142 * Definitions for the buffer free lists.
143 */
144 #define BQUEUES 3 /* number of free buffer queues */
145
146 #define BQ_LOCKED 0 /* super-blocks &c */
147 #define BQ_LRU 1 /* lru, useful buffers */
148 #define BQ_AGE 2 /* rubbish */
149
150 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
151 int needbuffer;
152
153 /*
154 * Buffer queue lock.
155 * Take this lock first if also taking some buffer's b_interlock.
156 */
157 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
158
159 /*
160 * Buffer pool for I/O buffers.
161 */
162 struct pool bufpool;
163
164 /* XXX - somewhat gross.. */
165 #if MAXBSIZE == 0x2000
166 #define NMEMPOOLS 4
167 #elif MAXBSIZE == 0x4000
168 #define NMEMPOOLS 5
169 #elif MAXBSIZE == 0x8000
170 #define NMEMPOOLS 6
171 #else
172 #define NMEMPOOLS 7
173 #endif
174
175 #define MEMPOOL_INDEX_OFFSET 10 /* smallest pool is 1k */
176 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
177 #error update vfs_bio buffer memory parameters
178 #endif
179
180 /* Buffer memory pools */
181 struct pool bmempools[NMEMPOOLS];
182
183 struct vm_map *buf_map;
184
185 /*
186 * Buffer memory pool allocator.
187 */
188 static void *bufpool_page_alloc(struct pool *pp, int flags)
189 {
190 return (void *)uvm_km_kmemalloc1(buf_map,
191 uvm.kernel_object, MAXBSIZE, MAXBSIZE,
192 UVM_UNKNOWN_OFFSET,
193 (flags & PR_WAITOK)?0:UVM_KMF_NOWAIT);
194 }
195
196 static void bufpool_page_free(struct pool *pp, void *v)
197 {
198 uvm_km_free(kernel_map, (vaddr_t)v, MAXBSIZE);
199 }
200
201 struct pool_allocator bufmempool_allocator = {
202 bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
203 };
204
205 /* Buffer memory management variables */
206 u_long bufmem_valimit;
207 u_long bufmem_hiwater;
208 u_long bufmem_lowater;
209 u_long bufmem;
210
211 /*
212 * MD code can call this to set a hard limit on the amount
213 * of virtual memory used by the buffer cache.
214 */
215 int buf_setvalimit(vsize_t sz)
216 {
217
218 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
219 if (sz < NMEMPOOLS * MAXBSIZE)
220 return EINVAL;
221
222 bufmem_valimit = sz;
223 return 0;
224 }
225
226 static int buf_trim(void);
227
228 /*
229 * bread()/breadn() helper.
230 */
231 static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
232 struct ucred *, int);
233 int count_lock_queue(void);
234
235 /*
236 * Insq/Remq for the buffer free lists.
237 * Call with buffer queue locked.
238 */
239 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
240 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
241
242 #ifdef DEBUG
243 int debug_verify_freelist = 0;
244 static int checkfreelist(struct buf *bp, struct bqueues *dp)
245 {
246 struct buf *b;
247 TAILQ_FOREACH(b, dp, b_freelist) {
248 if (b == bp)
249 return 1;
250 }
251 return 0;
252 }
253 #endif
254
255 void
256 bremfree(bp)
257 struct buf *bp;
258 {
259 struct bqueues *dp = NULL;
260
261 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
262
263 KDASSERT(!debug_verify_freelist ||
264 checkfreelist(bp, &bufqueues[BQ_AGE]) ||
265 checkfreelist(bp, &bufqueues[BQ_LRU]) ||
266 checkfreelist(bp, &bufqueues[BQ_LOCKED]) );
267
268 /*
269 * We only calculate the head of the freelist when removing
270 * the last element of the list as that is the only time that
271 * it is needed (e.g. to reset the tail pointer).
272 *
273 * NB: This makes an assumption about how tailq's are implemented.
274 *
275 * We break the TAILQ abstraction in order to efficiently remove a
276 * buffer from its freelist without having to know exactly which
277 * freelist it is on.
278 */
279 if (TAILQ_NEXT(bp, b_freelist) == NULL) {
280 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
281 if (dp->tqh_last == &bp->b_freelist.tqe_next)
282 break;
283 if (dp == &bufqueues[BQUEUES])
284 panic("bremfree: lost tail");
285 }
286 TAILQ_REMOVE(dp, bp, b_freelist);
287 }
288
289 u_long buf_memcalc()
290 {
291 u_long n;
292
293 /*
294 * Determine the upper bound of memory to use for buffers.
295 *
296 * - If bufpages is specified, use that as the number
297 * pages.
298 *
299 * - Otherwise, use bufcache as the percentage of
300 * physical memory.
301 */
302 if (bufpages != 0) {
303 n = bufpages;
304 } else {
305 if (bufcache < 5) {
306 printf("forcing bufcache %d -> 5", bufcache);
307 bufcache = 5;
308 }
309 if (bufcache > 95) {
310 printf("forcing bufcache %d -> 95", bufcache);
311 bufcache = 95;
312 }
313 n = physmem / 100 * bufcache;
314 }
315
316 n <<= PAGE_SHIFT;
317 if (bufmem_valimit != 0 && n > bufmem_valimit)
318 n = bufmem_valimit;
319
320 return (n);
321 }
322
323 /*
324 * Initialize buffers and hash links for buffers.
325 */
326 void
327 bufinit()
328 {
329 struct bqueues *dp;
330 int smallmem;
331 u_int i;
332
333 /*
334 * Initialize buffer cache memory parameters.
335 */
336 bufmem = 0;
337 bufmem_hiwater = buf_memcalc();
338 /* lowater is approx. 2% of memory (with bufcache=30) */
339 bufmem_lowater = (bufmem_hiwater >> 4);
340 if (bufmem_lowater < 64 * 1024)
341 /* Ensure a reasonable minimum value */
342 bufmem_lowater = 64 * 1024;
343
344 if (bufmem_valimit != 0) {
345 vaddr_t minaddr = 0, maxaddr;
346 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
347 bufmem_valimit, VM_MAP_PAGEABLE,
348 FALSE, 0);
349 if (buf_map == NULL)
350 panic("bufinit: cannot allocate submap");
351 } else
352 buf_map = kernel_map;
353
354 /*
355 * Initialize the buffer pools.
356 */
357 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
358
359 /* On "small" machines use small pool page sizes where possible */
360 smallmem = (physmem < atop(16*1024*1024));
361
362 for (i = 0; i < NMEMPOOLS; i++) {
363 struct pool_allocator *pa;
364 struct pool *pp = &bmempools[i];
365 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
366 char *name = malloc(8, M_TEMP, M_WAITOK);
367 snprintf(name, 8, "buf%dk", 1 << i);
368 pa = (size <= PAGE_SIZE && smallmem)
369 ? &pool_allocator_nointr
370 : &bufmempool_allocator;
371 pool_init(pp, size, 0, 0, 0, name, pa);
372 pool_setlowat(pp, 1);
373 }
374
375 /* Initialize the buffer queues */
376 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
377 TAILQ_INIT(dp);
378
379 /*
380 * Estimate hash table size based on the amount of memory we
381 * intend to use for the buffer cache. The average buffer
382 * size is dependent on our clients (i.e. filesystems).
383 *
384 * For now, use an empirical 3K per buffer.
385 */
386 nbuf = (bufmem_hiwater / 1024) / 3;
387 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
388 }
389
390 static int
391 buf_lotsfree(void)
392 {
393 return (bufmem < bufmem_lowater ||
394 (bufmem < bufmem_hiwater && uvmexp.free > 2*uvmexp.freetarg));
395 }
396
397 /*
398 * Return estimate of # of buffers we think need to be
399 * released to help resolve low memory conditions.
400 */
401 static int
402 buf_canrelease(void)
403 {
404 int n;
405
406 if (bufmem < bufmem_lowater)
407 return 0;
408
409 n = uvmexp.freetarg - uvmexp.free;
410 if (n < 0)
411 n = 0;
412 return 2*n;
413 }
414
415 /*
416 * Buffer memory allocation helper functions
417 */
418 static __inline__ u_long buf_mempoolidx(u_long size)
419 {
420 u_int n = 0;
421
422 size -= 1;
423 size >>= MEMPOOL_INDEX_OFFSET;
424 while (size) {
425 size >>= 1;
426 n += 1;
427 }
428 if (n >= NMEMPOOLS)
429 panic("buf mem pool index %d", n);
430 return n;
431 }
432
433 static __inline__ u_long buf_roundsize(u_long size)
434 {
435 /* Round up to nearest power of 2 */
436 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
437 }
438
439 static __inline__ caddr_t buf_malloc(size_t size)
440 {
441 u_int n = buf_mempoolidx(size);
442 caddr_t addr;
443 int s;
444
445 while (1) {
446 addr = pool_get(&bmempools[n], PR_NOWAIT);
447 if (addr != NULL)
448 break;
449
450 /* No memory, see if we can free some. If so, try again */
451 if (buf_drain(1) > 0)
452 continue;
453
454 /* Wait for buffers to arrive on the LRU queue */
455 s = splbio();
456 simple_lock(&bqueue_slock);
457 needbuffer = 1;
458 ltsleep(&needbuffer, PNORELOCK | (PRIBIO+1),
459 "buf_malloc", 0, &bqueue_slock);
460 splx(s);
461 }
462
463 return addr;
464 }
465
466 static void buf_mrelease(caddr_t addr, size_t size)
467 {
468
469 pool_put(&bmempools[buf_mempoolidx(size)], addr);
470 }
471
472
473 static __inline struct buf *
474 bio_doread(vp, blkno, size, cred, async)
475 struct vnode *vp;
476 daddr_t blkno;
477 int size;
478 struct ucred *cred;
479 int async;
480 {
481 struct buf *bp;
482 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
483 struct proc *p = l->l_proc;
484
485 bp = getblk(vp, blkno, size, 0, 0);
486
487 #ifdef DIAGNOSTIC
488 if (bp == NULL) {
489 panic("bio_doread: no such buf");
490 }
491 #endif
492
493 /*
494 * If buffer does not have data valid, start a read.
495 * Note that if buffer is B_INVAL, getblk() won't return it.
496 * Therefore, it's valid if its I/O has completed or been delayed.
497 */
498 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
499 /* Start I/O for the buffer. */
500 SET(bp->b_flags, B_READ | async);
501 VOP_STRATEGY(bp);
502
503 /* Pay for the read. */
504 p->p_stats->p_ru.ru_inblock++;
505 } else if (async) {
506 brelse(bp);
507 }
508
509 return (bp);
510 }
511
512 /*
513 * Read a disk block.
514 * This algorithm described in Bach (p.54).
515 */
516 int
517 bread(vp, blkno, size, cred, bpp)
518 struct vnode *vp;
519 daddr_t blkno;
520 int size;
521 struct ucred *cred;
522 struct buf **bpp;
523 {
524 struct buf *bp;
525
526 /* Get buffer for block. */
527 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
528
529 /* Wait for the read to complete, and return result. */
530 return (biowait(bp));
531 }
532
533 /*
534 * Read-ahead multiple disk blocks. The first is sync, the rest async.
535 * Trivial modification to the breada algorithm presented in Bach (p.55).
536 */
537 int
538 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
539 struct vnode *vp;
540 daddr_t blkno; int size;
541 daddr_t rablks[]; int rasizes[];
542 int nrablks;
543 struct ucred *cred;
544 struct buf **bpp;
545 {
546 struct buf *bp;
547 int i;
548
549 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
550
551 /*
552 * For each of the read-ahead blocks, start a read, if necessary.
553 */
554 for (i = 0; i < nrablks; i++) {
555 /* If it's in the cache, just go on to next one. */
556 if (incore(vp, rablks[i]))
557 continue;
558
559 /* Get a buffer for the read-ahead block */
560 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
561 }
562
563 /* Otherwise, we had to start a read for it; wait until it's valid. */
564 return (biowait(bp));
565 }
566
567 /*
568 * Read with single-block read-ahead. Defined in Bach (p.55), but
569 * implemented as a call to breadn().
570 * XXX for compatibility with old file systems.
571 */
572 int
573 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
574 struct vnode *vp;
575 daddr_t blkno; int size;
576 daddr_t rablkno; int rabsize;
577 struct ucred *cred;
578 struct buf **bpp;
579 {
580
581 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
582 }
583
584 /*
585 * Block write. Described in Bach (p.56)
586 */
587 int
588 bwrite(bp)
589 struct buf *bp;
590 {
591 int rv, sync, wasdelayed, s;
592 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
593 struct proc *p = l->l_proc;
594 struct vnode *vp;
595 struct mount *mp;
596
597 KASSERT(ISSET(bp->b_flags, B_BUSY));
598
599 vp = bp->b_vp;
600 if (vp != NULL) {
601 if (vp->v_type == VBLK)
602 mp = vp->v_specmountpoint;
603 else
604 mp = vp->v_mount;
605 } else {
606 mp = NULL;
607 }
608
609 /*
610 * Remember buffer type, to switch on it later. If the write was
611 * synchronous, but the file system was mounted with MNT_ASYNC,
612 * convert it to a delayed write.
613 * XXX note that this relies on delayed tape writes being converted
614 * to async, not sync writes (which is safe, but ugly).
615 */
616 sync = !ISSET(bp->b_flags, B_ASYNC);
617 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
618 bdwrite(bp);
619 return (0);
620 }
621
622 /*
623 * Collect statistics on synchronous and asynchronous writes.
624 * Writes to block devices are charged to their associated
625 * filesystem (if any).
626 */
627 if (mp != NULL) {
628 if (sync)
629 mp->mnt_stat.f_syncwrites++;
630 else
631 mp->mnt_stat.f_asyncwrites++;
632 }
633
634 s = splbio();
635 simple_lock(&bp->b_interlock);
636
637 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
638
639 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
640
641 /*
642 * Pay for the I/O operation and make sure the buf is on the correct
643 * vnode queue.
644 */
645 if (wasdelayed)
646 reassignbuf(bp, bp->b_vp);
647 else
648 p->p_stats->p_ru.ru_oublock++;
649
650 /* Initiate disk write. Make sure the appropriate party is charged. */
651 V_INCR_NUMOUTPUT(bp->b_vp);
652 simple_unlock(&bp->b_interlock);
653 splx(s);
654
655 VOP_STRATEGY(bp);
656
657 if (sync) {
658 /* If I/O was synchronous, wait for it to complete. */
659 rv = biowait(bp);
660
661 /* Release the buffer. */
662 brelse(bp);
663
664 return (rv);
665 } else {
666 return (0);
667 }
668 }
669
670 int
671 vn_bwrite(v)
672 void *v;
673 {
674 struct vop_bwrite_args *ap = v;
675
676 return (bwrite(ap->a_bp));
677 }
678
679 /*
680 * Delayed write.
681 *
682 * The buffer is marked dirty, but is not queued for I/O.
683 * This routine should be used when the buffer is expected
684 * to be modified again soon, typically a small write that
685 * partially fills a buffer.
686 *
687 * NB: magnetic tapes cannot be delayed; they must be
688 * written in the order that the writes are requested.
689 *
690 * Described in Leffler, et al. (pp. 208-213).
691 */
692 void
693 bdwrite(bp)
694 struct buf *bp;
695 {
696 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
697 struct proc *p = l->l_proc;
698 const struct bdevsw *bdev;
699 int s;
700
701 /* If this is a tape block, write the block now. */
702 bdev = bdevsw_lookup(bp->b_dev);
703 if (bdev != NULL && bdev->d_type == D_TAPE) {
704 bawrite(bp);
705 return;
706 }
707
708 /*
709 * If the block hasn't been seen before:
710 * (1) Mark it as having been seen,
711 * (2) Charge for the write,
712 * (3) Make sure it's on its vnode's correct block list.
713 */
714 s = splbio();
715 simple_lock(&bp->b_interlock);
716
717 KASSERT(ISSET(bp->b_flags, B_BUSY));
718
719 if (!ISSET(bp->b_flags, B_DELWRI)) {
720 SET(bp->b_flags, B_DELWRI);
721 p->p_stats->p_ru.ru_oublock++;
722 reassignbuf(bp, bp->b_vp);
723 }
724
725 /* Otherwise, the "write" is done, so mark and release the buffer. */
726 CLR(bp->b_flags, B_DONE);
727 simple_unlock(&bp->b_interlock);
728 splx(s);
729
730 brelse(bp);
731 }
732
733 /*
734 * Asynchronous block write; just an asynchronous bwrite().
735 */
736 void
737 bawrite(bp)
738 struct buf *bp;
739 {
740 int s;
741
742 s = splbio();
743 simple_lock(&bp->b_interlock);
744
745 KASSERT(ISSET(bp->b_flags, B_BUSY));
746
747 SET(bp->b_flags, B_ASYNC);
748 simple_unlock(&bp->b_interlock);
749 splx(s);
750 VOP_BWRITE(bp);
751 }
752
753 /*
754 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
755 * Call at splbio() and with the buffer interlock locked.
756 * Note: called only from biodone() through ffs softdep's bioops.io_complete()
757 */
758 void
759 bdirty(bp)
760 struct buf *bp;
761 {
762 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
763 struct proc *p = l->l_proc;
764
765 LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
766 KASSERT(ISSET(bp->b_flags, B_BUSY));
767
768 CLR(bp->b_flags, B_AGE);
769
770 if (!ISSET(bp->b_flags, B_DELWRI)) {
771 SET(bp->b_flags, B_DELWRI);
772 p->p_stats->p_ru.ru_oublock++;
773 reassignbuf(bp, bp->b_vp);
774 }
775 }
776
777 /*
778 * Release a buffer on to the free lists.
779 * Described in Bach (p. 46).
780 */
781 void
782 brelse(bp)
783 struct buf *bp;
784 {
785 struct bqueues *bufq;
786 int s;
787
788 /* Block disk interrupts. */
789 s = splbio();
790 simple_lock(&bqueue_slock);
791 simple_lock(&bp->b_interlock);
792
793 KASSERT(ISSET(bp->b_flags, B_BUSY));
794 KASSERT(!ISSET(bp->b_flags, B_CALL));
795
796 /* Wake up any processes waiting for any buffer to become free. */
797 if (needbuffer) {
798 needbuffer = 0;
799 wakeup(&needbuffer);
800 }
801
802 /* Wake up any proceeses waiting for _this_ buffer to become free. */
803 if (ISSET(bp->b_flags, B_WANTED)) {
804 CLR(bp->b_flags, B_WANTED|B_AGE);
805 wakeup(bp);
806 }
807
808 /*
809 * Determine which queue the buffer should be on, then put it there.
810 */
811
812 /* If it's locked, don't report an error; try again later. */
813 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
814 CLR(bp->b_flags, B_ERROR);
815
816 /* If it's not cacheable, or an error, mark it invalid. */
817 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
818 SET(bp->b_flags, B_INVAL);
819
820 if (ISSET(bp->b_flags, B_VFLUSH)) {
821 /*
822 * This is a delayed write buffer that was just flushed to
823 * disk. It is still on the LRU queue. If it's become
824 * invalid, then we need to move it to a different queue;
825 * otherwise leave it in its current position.
826 */
827 CLR(bp->b_flags, B_VFLUSH);
828 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
829 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
830 goto already_queued;
831 } else {
832 bremfree(bp);
833 }
834 }
835
836 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
837 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
838 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
839
840 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
841 /*
842 * If it's invalid or empty, dissociate it from its vnode
843 * and put on the head of the appropriate queue.
844 */
845 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
846 (*bioops.io_deallocate)(bp);
847 CLR(bp->b_flags, B_DONE|B_DELWRI);
848 if (bp->b_vp) {
849 reassignbuf(bp, bp->b_vp);
850 brelvp(bp);
851 }
852 if (bp->b_bufsize <= 0)
853 /* no data */
854 goto already_queued;
855 else
856 /* invalid data */
857 bufq = &bufqueues[BQ_AGE];
858 binsheadfree(bp, bufq);
859 } else {
860 /*
861 * It has valid data. Put it on the end of the appropriate
862 * queue, so that it'll stick around for as long as possible.
863 * If buf is AGE, but has dependencies, must put it on last
864 * bufqueue to be scanned, ie LRU. This protects against the
865 * livelock where BQ_AGE only has buffers with dependencies,
866 * and we thus never get to the dependent buffers in BQ_LRU.
867 */
868 if (ISSET(bp->b_flags, B_LOCKED))
869 /* locked in core */
870 bufq = &bufqueues[BQ_LOCKED];
871 else if (!ISSET(bp->b_flags, B_AGE))
872 /* valid data */
873 bufq = &bufqueues[BQ_LRU];
874 else {
875 /* stale but valid data */
876 int has_deps;
877
878 if (LIST_FIRST(&bp->b_dep) != NULL &&
879 bioops.io_countdeps)
880 has_deps = (*bioops.io_countdeps)(bp, 0);
881 else
882 has_deps = 0;
883 bufq = has_deps ? &bufqueues[BQ_LRU] :
884 &bufqueues[BQ_AGE];
885 }
886 binstailfree(bp, bufq);
887 }
888
889 already_queued:
890 /* Unlock the buffer. */
891 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
892 SET(bp->b_flags, B_CACHE);
893
894 /* Allow disk interrupts. */
895 simple_unlock(&bp->b_interlock);
896 simple_unlock(&bqueue_slock);
897 if (bp->b_bufsize <= 0) {
898 #ifdef DEBUG
899 memset((char *)bp, 0, sizeof(*bp));
900 #endif
901 pool_put(&bufpool, bp);
902 }
903 splx(s);
904 }
905
906 /*
907 * Determine if a block is in the cache.
908 * Just look on what would be its hash chain. If it's there, return
909 * a pointer to it, unless it's marked invalid. If it's marked invalid,
910 * we normally don't return the buffer, unless the caller explicitly
911 * wants us to.
912 */
913 struct buf *
914 incore(vp, blkno)
915 struct vnode *vp;
916 daddr_t blkno;
917 {
918 struct buf *bp;
919
920 /* Search hash chain */
921 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
922 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
923 !ISSET(bp->b_flags, B_INVAL))
924 return (bp);
925 }
926
927 return (NULL);
928 }
929
930 /*
931 * Get a block of requested size that is associated with
932 * a given vnode and block offset. If it is found in the
933 * block cache, mark it as having been found, make it busy
934 * and return it. Otherwise, return an empty block of the
935 * correct size. It is up to the caller to insure that the
936 * cached blocks be of the correct size.
937 */
938 struct buf *
939 getblk(vp, blkno, size, slpflag, slptimeo)
940 struct vnode *vp;
941 daddr_t blkno;
942 int size, slpflag, slptimeo;
943 {
944 struct buf *bp;
945 int s, err;
946 int preserve;
947
948 start:
949 s = splbio();
950 simple_lock(&bqueue_slock);
951 bp = incore(vp, blkno);
952 if (bp != NULL) {
953 simple_lock(&bp->b_interlock);
954 if (ISSET(bp->b_flags, B_BUSY)) {
955 simple_unlock(&bqueue_slock);
956 if (curproc == uvm.pagedaemon_proc) {
957 simple_unlock(&bp->b_interlock);
958 splx(s);
959 return NULL;
960 }
961 SET(bp->b_flags, B_WANTED);
962 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
963 "getblk", slptimeo, &bp->b_interlock);
964 splx(s);
965 if (err)
966 return (NULL);
967 goto start;
968 }
969 #ifdef DIAGNOSTIC
970 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
971 bp->b_bcount < size && vp->v_type != VBLK)
972 panic("getblk: block size invariant failed");
973 #endif
974 SET(bp->b_flags, B_BUSY);
975 bremfree(bp);
976 preserve = 1;
977 } else {
978 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
979 simple_unlock(&bqueue_slock);
980 splx(s);
981 goto start;
982 }
983
984 binshash(bp, BUFHASH(vp, blkno));
985 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
986 bgetvp(vp, bp);
987 preserve = 0;
988 }
989 simple_unlock(&bp->b_interlock);
990 simple_unlock(&bqueue_slock);
991 splx(s);
992 /*
993 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
994 * if we re-size buffers here.
995 */
996 if (ISSET(bp->b_flags, B_LOCKED)) {
997 KASSERT(bp->b_bufsize >= size);
998 } else {
999 allocbuf(bp, size, preserve);
1000 }
1001 return (bp);
1002 }
1003
1004 /*
1005 * Get an empty, disassociated buffer of given size.
1006 */
1007 struct buf *
1008 geteblk(size)
1009 int size;
1010 {
1011 struct buf *bp;
1012 int s;
1013
1014 s = splbio();
1015 simple_lock(&bqueue_slock);
1016 while ((bp = getnewbuf(0, 0, 0)) == 0)
1017 ;
1018
1019 SET(bp->b_flags, B_INVAL);
1020 binshash(bp, &invalhash);
1021 simple_unlock(&bqueue_slock);
1022 simple_unlock(&bp->b_interlock);
1023 splx(s);
1024 allocbuf(bp, size, 0);
1025 return (bp);
1026 }
1027
1028 /*
1029 * Expand or contract the actual memory allocated to a buffer.
1030 *
1031 * If the buffer shrinks, data is lost, so it's up to the
1032 * caller to have written it out *first*; this routine will not
1033 * start a write. If the buffer grows, it's the callers
1034 * responsibility to fill out the buffer's additional contents.
1035 */
1036 void
1037 allocbuf(bp, size, preserve)
1038 struct buf *bp;
1039 int size;
1040 int preserve;
1041 {
1042 vsize_t oldsize, desired_size;
1043 caddr_t addr;
1044 int s, delta;
1045
1046 desired_size = buf_roundsize(size);
1047 if (desired_size > MAXBSIZE)
1048 printf("allocbuf: buffer larger than MAXBSIZE requested");
1049
1050 bp->b_bcount = size;
1051
1052 oldsize = bp->b_bufsize;
1053 if (oldsize == desired_size)
1054 return;
1055
1056 /*
1057 * If we want a buffer of a different size, re-allocate the
1058 * buffer's memory; copy old content only if needed.
1059 */
1060 addr = buf_malloc(desired_size);
1061 if (preserve)
1062 memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1063 if (bp->b_data != NULL)
1064 buf_mrelease(bp->b_data, oldsize);
1065 bp->b_data = addr;
1066 bp->b_bufsize = desired_size;
1067
1068 /*
1069 * Update overall buffer memory counter (protected by bqueue_slock)
1070 */
1071 delta = (long)desired_size - (long)oldsize;
1072
1073 s = splbio();
1074 simple_lock(&bqueue_slock);
1075 if ((bufmem += delta) > bufmem_hiwater) {
1076 /*
1077 * Need to trim overall memory usage.
1078 */
1079 while (buf_canrelease()) {
1080 if (buf_trim() == 0)
1081 break;
1082 }
1083 }
1084
1085 simple_unlock(&bqueue_slock);
1086 splx(s);
1087 }
1088
1089 /*
1090 * Find a buffer which is available for use.
1091 * Select something from a free list.
1092 * Preference is to AGE list, then LRU list.
1093 *
1094 * Called at splbio and with buffer queues locked.
1095 * Return buffer locked.
1096 */
1097 struct buf *
1098 getnewbuf(slpflag, slptimeo, from_bufq)
1099 int slpflag, slptimeo, from_bufq;
1100 {
1101 struct buf *bp;
1102
1103 start:
1104 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1105
1106 /*
1107 * Get a new buffer from the pool; but use NOWAIT because
1108 * we have the buffer queues locked.
1109 */
1110 if (buf_lotsfree() && !from_bufq &&
1111 (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1112 memset((char *)bp, 0, sizeof(*bp));
1113 BUF_INIT(bp);
1114 bp->b_dev = NODEV;
1115 bp->b_vnbufs.le_next = NOLIST;
1116 bp->b_flags = B_BUSY;
1117 return (bp);
1118 }
1119
1120 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
1121 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
1122 simple_lock(&bp->b_interlock);
1123 bremfree(bp);
1124 } else {
1125 /* wait for a free buffer of any kind */
1126 needbuffer = 1;
1127 ltsleep(&needbuffer, slpflag|(PRIBIO+1),
1128 "getnewbuf", slptimeo, &bqueue_slock);
1129 return (NULL);
1130 }
1131
1132 #ifdef DIAGNOSTIC
1133 if (bp->b_bufsize <= 0)
1134 panic("buffer %p: on queue but empty", bp);
1135 #endif
1136
1137 if (ISSET(bp->b_flags, B_VFLUSH)) {
1138 /*
1139 * This is a delayed write buffer being flushed to disk. Make
1140 * sure it gets aged out of the queue when it's finished, and
1141 * leave it off the LRU queue.
1142 */
1143 CLR(bp->b_flags, B_VFLUSH);
1144 SET(bp->b_flags, B_AGE);
1145 simple_unlock(&bp->b_interlock);
1146 goto start;
1147 }
1148
1149 /* Buffer is no longer on free lists. */
1150 SET(bp->b_flags, B_BUSY);
1151
1152 /*
1153 * If buffer was a delayed write, start it and return NULL
1154 * (since we might sleep while starting the write).
1155 */
1156 if (ISSET(bp->b_flags, B_DELWRI)) {
1157 /*
1158 * This buffer has gone through the LRU, so make sure it gets
1159 * reused ASAP.
1160 */
1161 SET(bp->b_flags, B_AGE);
1162 simple_unlock(&bp->b_interlock);
1163 simple_unlock(&bqueue_slock);
1164 bawrite(bp);
1165 simple_lock(&bqueue_slock);
1166 return (NULL);
1167 }
1168
1169 /* disassociate us from our vnode, if we had one... */
1170 if (bp->b_vp)
1171 brelvp(bp);
1172
1173 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1174 (*bioops.io_deallocate)(bp);
1175
1176 /* clear out various other fields */
1177 bp->b_flags = B_BUSY;
1178 bp->b_dev = NODEV;
1179 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1180 bp->b_iodone = 0;
1181 bp->b_error = 0;
1182 bp->b_resid = 0;
1183 bp->b_bcount = 0;
1184
1185 bremhash(bp);
1186 return (bp);
1187 }
1188
1189 /*
1190 * Attempt to free an aged buffer off the queues.
1191 * Called at splbio and with queue lock held.
1192 * Returns the amount of buffer memory freed.
1193 */
1194 int buf_trim(void)
1195 {
1196 struct buf *bp;
1197 long size = 0;
1198 int wanted;
1199
1200 /* Instruct getnewbuf() to get buffers off the queues */
1201 if ((bp = getnewbuf(PCATCH,1,1)) == NULL)
1202 return 0;
1203
1204 wanted = ISSET(bp->b_flags, B_WANTED);
1205 simple_unlock(&bp->b_interlock);
1206 if (wanted) {
1207 printf("buftrim: got WANTED buffer\n");
1208 SET(bp->b_flags, B_INVAL);
1209 binshash(bp, &invalhash);
1210 simple_unlock(&bqueue_slock);
1211 goto out;
1212 }
1213 size = bp->b_bufsize;
1214 bufmem -= size;
1215 simple_unlock(&bqueue_slock);
1216 if (size > 0) {
1217 buf_mrelease(bp->b_data, size);
1218 bp->b_bcount = bp->b_bufsize = 0;
1219 }
1220
1221 out:
1222 /* brelse() will return the buffer to the global buffer pool */
1223 brelse(bp);
1224 simple_lock(&bqueue_slock);
1225 return size;
1226 }
1227
1228 int buf_drain(int n)
1229 {
1230 int s, size = 0;
1231
1232 /* If not asked for a specific amount, make our own estimate */
1233 if (n == 0)
1234 n = buf_canrelease();
1235
1236 s = splbio();
1237 simple_lock(&bqueue_slock);
1238 while (n-- > 0 && bufmem > bufmem_lowater)
1239 size += buf_trim();
1240 simple_unlock(&bqueue_slock);
1241 splx(s);
1242 return size;
1243 }
1244
1245 /*
1246 * Wait for operations on the buffer to complete.
1247 * When they do, extract and return the I/O's error value.
1248 */
1249 int
1250 biowait(bp)
1251 struct buf *bp;
1252 {
1253 int s, error;
1254
1255 s = splbio();
1256 simple_lock(&bp->b_interlock);
1257 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1258 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1259
1260 /* check for interruption of I/O (e.g. via NFS), then errors. */
1261 if (ISSET(bp->b_flags, B_EINTR)) {
1262 CLR(bp->b_flags, B_EINTR);
1263 error = EINTR;
1264 } else if (ISSET(bp->b_flags, B_ERROR))
1265 error = bp->b_error ? bp->b_error : EIO;
1266 else
1267 error = 0;
1268
1269 simple_unlock(&bp->b_interlock);
1270 splx(s);
1271 return (error);
1272 }
1273
1274 /*
1275 * Mark I/O complete on a buffer.
1276 *
1277 * If a callback has been requested, e.g. the pageout
1278 * daemon, do so. Otherwise, awaken waiting processes.
1279 *
1280 * [ Leffler, et al., says on p.247:
1281 * "This routine wakes up the blocked process, frees the buffer
1282 * for an asynchronous write, or, for a request by the pagedaemon
1283 * process, invokes a procedure specified in the buffer structure" ]
1284 *
1285 * In real life, the pagedaemon (or other system processes) wants
1286 * to do async stuff to, and doesn't want the buffer brelse()'d.
1287 * (for swap pager, that puts swap buffers on the free lists (!!!),
1288 * for the vn device, that puts malloc'd buffers on the free lists!)
1289 */
1290 void
1291 biodone(bp)
1292 struct buf *bp;
1293 {
1294 int s = splbio();
1295
1296 simple_lock(&bp->b_interlock);
1297 if (ISSET(bp->b_flags, B_DONE))
1298 panic("biodone already");
1299 SET(bp->b_flags, B_DONE); /* note that it's done */
1300
1301 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1302 (*bioops.io_complete)(bp);
1303
1304 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
1305 vwakeup(bp);
1306
1307 /*
1308 * If necessary, call out. Unlock the buffer before calling
1309 * iodone() as the buffer isn't valid any more when it return.
1310 */
1311 if (ISSET(bp->b_flags, B_CALL)) {
1312 CLR(bp->b_flags, B_CALL); /* but note callout done */
1313 simple_unlock(&bp->b_interlock);
1314 (*bp->b_iodone)(bp);
1315 } else {
1316 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */
1317 simple_unlock(&bp->b_interlock);
1318 brelse(bp);
1319 } else { /* or just wakeup the buffer */
1320 CLR(bp->b_flags, B_WANTED);
1321 wakeup(bp);
1322 simple_unlock(&bp->b_interlock);
1323 }
1324 }
1325
1326 splx(s);
1327 }
1328
1329 /*
1330 * Return a count of buffers on the "locked" queue.
1331 */
1332 int
1333 count_lock_queue()
1334 {
1335 struct buf *bp;
1336 int n = 0;
1337
1338 simple_lock(&bqueue_slock);
1339 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
1340 n++;
1341 simple_unlock(&bqueue_slock);
1342 return (n);
1343 }
1344
1345 /*
1346 * Wait for all buffers to complete I/O
1347 * Return the number of "stuck" buffers.
1348 */
1349 int
1350 buf_syncwait(void)
1351 {
1352 struct buf *bp;
1353 int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1354
1355 dcount = 10000;
1356 for (iter = 0; iter < 20;) {
1357 s = splbio();
1358 simple_lock(&bqueue_slock);
1359 nbusy = 0;
1360 for (ihash = 0; ihash < bufhash+1; ihash++) {
1361 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1362 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1363 nbusy++;
1364 /*
1365 * With soft updates, some buffers that are
1366 * written will be remarked as dirty until other
1367 * buffers are written.
1368 */
1369 if (bp->b_vp && bp->b_vp->v_mount
1370 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1371 && (bp->b_flags & B_DELWRI)) {
1372 simple_lock(&bp->b_interlock);
1373 bremfree(bp);
1374 bp->b_flags |= B_BUSY;
1375 nbusy++;
1376 simple_unlock(&bp->b_interlock);
1377 simple_unlock(&bqueue_slock);
1378 bawrite(bp);
1379 if (dcount-- <= 0) {
1380 printf("softdep ");
1381 goto fail;
1382 }
1383 simple_lock(&bqueue_slock);
1384 }
1385 }
1386 }
1387
1388 simple_unlock(&bqueue_slock);
1389 splx(s);
1390
1391 if (nbusy == 0)
1392 break;
1393 if (nbusy_prev == 0)
1394 nbusy_prev = nbusy;
1395 printf("%d ", nbusy);
1396 tsleep(&nbusy, PRIBIO, "bflush",
1397 (iter == 0) ? 1 : hz / 25 * iter);
1398 if (nbusy >= nbusy_prev) /* we didn't flush anything */
1399 iter++;
1400 else
1401 nbusy_prev = nbusy;
1402 }
1403
1404 if (nbusy) {
1405 fail:;
1406 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1407 printf("giving up\nPrinting vnodes for busy buffers\n");
1408 for (ihash = 0; ihash < bufhash+1; ihash++) {
1409 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1410 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1411 vprint(NULL, bp->b_vp);
1412 }
1413 }
1414 #endif
1415 }
1416
1417 return nbusy;
1418 }
1419
1420 #define KERN_BUFSLOP 20
1421 static int
1422 sysctl_dobuf(SYSCTLFN_ARGS)
1423 {
1424 struct buf *bp;
1425 char *dp;
1426 u_int i, elem_size;
1427 size_t len, buflen, needed;
1428 int error, s;
1429
1430 dp = oldp;
1431 len = buflen = oldp != NULL ? *oldlenp : 0;
1432 error = 0;
1433 needed = 0;
1434 elem_size = sizeof(struct buf);
1435
1436 s = splbio();
1437 simple_lock(&bqueue_slock);
1438 for (i = 0; i < BQUEUES; i++) {
1439 TAILQ_FOREACH(bp, &bufqueues[i], b_freelist) {
1440 if (len >= sizeof(elem_size)) {
1441 error = copyout(bp, dp, elem_size);
1442 if (error)
1443 goto cleanup;
1444 dp += elem_size;
1445 len -= elem_size;
1446 }
1447 needed += elem_size;
1448 }
1449 }
1450 cleanup:
1451 simple_unlock(&bqueue_slock);
1452 splx(s);
1453
1454 if (oldp != NULL) {
1455 *oldlenp = (char *)dp - (char *)oldp;
1456 if (needed > *oldlenp)
1457 error = ENOMEM;
1458 } else {
1459 needed += KERN_BUFSLOP;
1460 *oldlenp = needed;
1461 }
1462
1463 return (error);
1464 }
1465
1466 static int sysctlnum_bufcache, sysctlnum_bufmemhiwater, sysctlnum_bufmemlowater;
1467
1468 static int
1469 sysctl_bufvm_update(SYSCTLFN_ARGS)
1470 {
1471 int t, error;
1472 struct sysctlnode node;
1473
1474 node = *rnode;
1475 node.sysctl_data = &t;
1476 t = *(int*)rnode->sysctl_data;
1477 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1478 if (error || newp == NULL)
1479 return (error);
1480
1481 if (rnode->sysctl_num == sysctlnum_bufcache) {
1482 if (t < 0 || t > 100)
1483 return (EINVAL);
1484 bufcache = t;
1485 bufmem_hiwater = buf_memcalc();
1486 bufmem_lowater = (bufmem_hiwater >> 4);
1487 } else if (rnode->sysctl_num == sysctlnum_bufmemlowater) {
1488 bufmem_lowater = t;
1489 } else if (rnode->sysctl_num == sysctlnum_bufmemhiwater) {
1490 bufmem_hiwater = t;
1491 } else
1492 return (EINVAL);
1493
1494 /* Drain until below new high water mark */
1495 while ((t = bufmem - bufmem_hiwater) >= 0) {
1496 if (buf_drain(t / (2*1024)) <= 0)
1497 break;
1498 }
1499
1500 return 0;
1501 }
1502
1503 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1504 {
1505 struct sysctlnode *rnode;
1506
1507 sysctl_createv(SYSCTL_PERMANENT,
1508 CTLTYPE_NODE, "buf", NULL,
1509 sysctl_dobuf, 0, NULL, 0,
1510 CTL_KERN, KERN_BUF, CTL_EOL);
1511
1512 rnode = NULL;
1513 if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1514 CTLTYPE_INT, "bufcache", &rnode,
1515 sysctl_bufvm_update, 0, &bufcache, 0,
1516 CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1517 sysctlnum_bufcache = rnode->sysctl_num;
1518
1519 rnode = NULL;
1520 if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1521 CTLTYPE_INT, "bufmem_lowater", &rnode,
1522 sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1523 CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1524 sysctlnum_bufmemlowater = rnode->sysctl_num;
1525
1526 rnode = NULL;
1527 if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1528 CTLTYPE_INT, "bufmem_hiwater", &rnode,
1529 sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1530 CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1531 sysctlnum_bufmemhiwater = rnode->sysctl_num;
1532 }
1533
1534 #ifdef DEBUG
1535 /*
1536 * Print out statistics on the current allocation of the buffer pool.
1537 * Can be enabled to print out on every ``sync'' by setting "syncprt"
1538 * in vfs_syscalls.c using sysctl.
1539 */
1540 void
1541 vfs_bufstats()
1542 {
1543 int s, i, j, count;
1544 struct buf *bp;
1545 struct bqueues *dp;
1546 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1547 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1548
1549 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1550 count = 0;
1551 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1552 counts[j] = 0;
1553 s = splbio();
1554 TAILQ_FOREACH(bp, dp, b_freelist) {
1555 counts[bp->b_bufsize/PAGE_SIZE]++;
1556 count++;
1557 }
1558 splx(s);
1559 printf("%s: total-%d", bname[i], count);
1560 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1561 if (counts[j] != 0)
1562 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1563 printf("\n");
1564 }
1565 }
1566 #endif /* DEBUG */
1567