vfs_bio.c revision 1.111 1 /* $NetBSD: vfs_bio.c,v 1.111 2004/01/19 11:57:42 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
37 */
38
39 /*-
40 * Copyright (c) 1994 Christopher G. Demetriou
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
71 */
72
73 /*
74 * Some references:
75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76 * Leffler, et al.: The Design and Implementation of the 4.3BSD
77 * UNIX Operating System (Addison Welley, 1989)
78 */
79
80 #include "opt_bufcache.h"
81 #include "opt_softdep.h"
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.111 2004/01/19 11:57:42 yamt Exp $");
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/buf.h>
91 #include <sys/vnode.h>
92 #include <sys/mount.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sysctl.h>
96 #include <sys/conf.h>
97
98 #include <uvm/uvm.h>
99
100 #include <miscfs/specfs/specdev.h>
101
102 #ifndef BUFPAGES
103 # define BUFPAGES 0
104 #endif
105
106 #ifdef BUFCACHE
107 # if (BUFCACHE < 5) || (BUFCACHE > 95)
108 # error BUFCACHE is not between 5 and 95
109 # endif
110 #else
111 # define BUFCACHE 20
112 #endif
113
114 u_int nbuf; /* XXX - for softdep_lockedbufs */
115 u_int bufpages = BUFPAGES; /* optional hardwired count */
116 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */
117
118
119 /* Macros to clear/set/test flags. */
120 #define SET(t, f) (t) |= (f)
121 #define CLR(t, f) (t) &= ~(f)
122 #define ISSET(t, f) ((t) & (f))
123
124 /*
125 * Definitions for the buffer hash lists.
126 */
127 #define BUFHASH(dvp, lbn) \
128 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
129 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
130 u_long bufhash;
131 #ifndef SOFTDEP
132 struct bio_ops bioops; /* I/O operation notification */
133 #endif
134
135 /*
136 * Insq/Remq for the buffer hash lists.
137 */
138 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
139 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
140
141 /*
142 * Definitions for the buffer free lists.
143 */
144 #define BQUEUES 3 /* number of free buffer queues */
145
146 #define BQ_LOCKED 0 /* super-blocks &c */
147 #define BQ_LRU 1 /* lru, useful buffers */
148 #define BQ_AGE 2 /* rubbish */
149
150 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
151 int needbuffer;
152
153 /*
154 * Buffer queue lock.
155 * Take this lock first if also taking some buffer's b_interlock.
156 */
157 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
158
159 /*
160 * Buffer pool for I/O buffers.
161 */
162 struct pool bufpool;
163
164 /* XXX - somewhat gross.. */
165 #if MAXBSIZE == 0x2000
166 #define NMEMPOOLS 4
167 #elif MAXBSIZE == 0x4000
168 #define NMEMPOOLS 5
169 #elif MAXBSIZE == 0x8000
170 #define NMEMPOOLS 6
171 #else
172 #define NMEMPOOLS 7
173 #endif
174
175 #define MEMPOOL_INDEX_OFFSET 10 /* smallest pool is 1k */
176 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
177 #error update vfs_bio buffer memory parameters
178 #endif
179
180 /* Buffer memory pools */
181 static struct pool bmempools[NMEMPOOLS];
182
183 struct vm_map *buf_map;
184
185 /*
186 * Buffer memory pool allocator.
187 */
188 static void *
189 bufpool_page_alloc(struct pool *pp, int flags)
190 {
191
192 return (void *)uvm_km_kmemalloc1(buf_map,
193 uvm.kernel_object, MAXBSIZE, MAXBSIZE, UVM_UNKNOWN_OFFSET,
194 (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
195 }
196
197 static void
198 bufpool_page_free(struct pool *pp, void *v)
199 {
200 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE);
201 }
202
203 static struct pool_allocator bufmempool_allocator = {
204 bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
205 };
206
207 /* Buffer memory management variables */
208 u_long bufmem_valimit;
209 u_long bufmem_hiwater;
210 u_long bufmem_lowater;
211 u_long bufmem;
212
213 /*
214 * MD code can call this to set a hard limit on the amount
215 * of virtual memory used by the buffer cache.
216 */
217 int
218 buf_setvalimit(vsize_t sz)
219 {
220
221 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
222 if (sz < NMEMPOOLS * MAXBSIZE)
223 return EINVAL;
224
225 bufmem_valimit = sz;
226 return 0;
227 }
228
229 static int buf_trim(void);
230
231 /*
232 * bread()/breadn() helper.
233 */
234 static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
235 struct ucred *, int);
236 int count_lock_queue(void);
237
238 /*
239 * Insq/Remq for the buffer free lists.
240 * Call with buffer queue locked.
241 */
242 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
243 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
244
245 #ifdef DEBUG
246 int debug_verify_freelist = 0;
247 static int checkfreelist(struct buf *bp, struct bqueues *dp)
248 {
249 struct buf *b;
250 TAILQ_FOREACH(b, dp, b_freelist) {
251 if (b == bp)
252 return 1;
253 }
254 return 0;
255 }
256 #endif
257
258 void
259 bremfree(struct buf *bp)
260 {
261 struct bqueues *dp = NULL;
262
263 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
264
265 KDASSERT(!debug_verify_freelist ||
266 checkfreelist(bp, &bufqueues[BQ_AGE]) ||
267 checkfreelist(bp, &bufqueues[BQ_LRU]) ||
268 checkfreelist(bp, &bufqueues[BQ_LOCKED]) );
269
270 /*
271 * We only calculate the head of the freelist when removing
272 * the last element of the list as that is the only time that
273 * it is needed (e.g. to reset the tail pointer).
274 *
275 * NB: This makes an assumption about how tailq's are implemented.
276 *
277 * We break the TAILQ abstraction in order to efficiently remove a
278 * buffer from its freelist without having to know exactly which
279 * freelist it is on.
280 */
281 if (TAILQ_NEXT(bp, b_freelist) == NULL) {
282 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
283 if (dp->tqh_last == &bp->b_freelist.tqe_next)
284 break;
285 if (dp == &bufqueues[BQUEUES])
286 panic("bremfree: lost tail");
287 }
288 TAILQ_REMOVE(dp, bp, b_freelist);
289 }
290
291 u_long
292 buf_memcalc(void)
293 {
294 u_long n;
295
296 /*
297 * Determine the upper bound of memory to use for buffers.
298 *
299 * - If bufpages is specified, use that as the number
300 * pages.
301 *
302 * - Otherwise, use bufcache as the percentage of
303 * physical memory.
304 */
305 if (bufpages != 0) {
306 n = bufpages;
307 } else {
308 if (bufcache < 5) {
309 printf("forcing bufcache %d -> 5", bufcache);
310 bufcache = 5;
311 }
312 if (bufcache > 95) {
313 printf("forcing bufcache %d -> 95", bufcache);
314 bufcache = 95;
315 }
316 n = physmem / 100 * bufcache;
317 }
318
319 n <<= PAGE_SHIFT;
320 if (bufmem_valimit != 0 && n > bufmem_valimit)
321 n = bufmem_valimit;
322
323 return (n);
324 }
325
326 /*
327 * Initialize buffers and hash links for buffers.
328 */
329 void
330 bufinit(void)
331 {
332 struct bqueues *dp;
333 int smallmem;
334 u_int i;
335
336 /*
337 * Initialize buffer cache memory parameters.
338 */
339 bufmem = 0;
340 bufmem_hiwater = buf_memcalc();
341 /* lowater is approx. 2% of memory (with bufcache=30) */
342 bufmem_lowater = (bufmem_hiwater >> 4);
343 if (bufmem_lowater < 64 * 1024)
344 /* Ensure a reasonable minimum value */
345 bufmem_lowater = 64 * 1024;
346
347 if (bufmem_valimit != 0) {
348 vaddr_t minaddr = 0, maxaddr;
349 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
350 bufmem_valimit, VM_MAP_PAGEABLE,
351 FALSE, 0);
352 if (buf_map == NULL)
353 panic("bufinit: cannot allocate submap");
354 } else
355 buf_map = kernel_map;
356
357 /*
358 * Initialize the buffer pools.
359 */
360 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
361
362 /* On "small" machines use small pool page sizes where possible */
363 smallmem = (physmem < atop(16*1024*1024));
364
365 for (i = 0; i < NMEMPOOLS; i++) {
366 struct pool_allocator *pa;
367 struct pool *pp = &bmempools[i];
368 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
369 char *name = malloc(8, M_TEMP, M_WAITOK);
370 snprintf(name, 8, "buf%dk", 1 << i);
371 pa = (size <= PAGE_SIZE && smallmem)
372 ? &pool_allocator_nointr
373 : &bufmempool_allocator;
374 pool_init(pp, size, 0, 0, PR_IMMEDRELEASE, name, pa);
375 pool_setlowat(pp, 1);
376 }
377
378 /* Initialize the buffer queues */
379 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
380 TAILQ_INIT(dp);
381
382 /*
383 * Estimate hash table size based on the amount of memory we
384 * intend to use for the buffer cache. The average buffer
385 * size is dependent on our clients (i.e. filesystems).
386 *
387 * For now, use an empirical 3K per buffer.
388 */
389 nbuf = (bufmem_hiwater / 1024) / 3;
390 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
391 }
392
393 static int
394 buf_lotsfree(void)
395 {
396 return (bufmem < bufmem_lowater ||
397 (bufmem < bufmem_hiwater && uvmexp.free > 2*uvmexp.freetarg));
398 }
399
400 /*
401 * Return estimate of # of buffers we think need to be
402 * released to help resolve low memory conditions.
403 */
404 static int
405 buf_canrelease(void)
406 {
407 int n;
408
409 if (bufmem < bufmem_lowater)
410 return 0;
411
412 n = uvmexp.freetarg - uvmexp.free;
413 if (n < 0)
414 n = 0;
415 return 2*n;
416 }
417
418 /*
419 * Buffer memory allocation helper functions
420 */
421 static __inline u_long
422 buf_mempoolidx(u_long size)
423 {
424 u_int n = 0;
425
426 size -= 1;
427 size >>= MEMPOOL_INDEX_OFFSET;
428 while (size) {
429 size >>= 1;
430 n += 1;
431 }
432 if (n >= NMEMPOOLS)
433 panic("buf mem pool index %d", n);
434 return n;
435 }
436
437 static __inline u_long
438 buf_roundsize(u_long size)
439 {
440 /* Round up to nearest power of 2 */
441 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
442 }
443
444 static __inline caddr_t
445 buf_malloc(size_t size)
446 {
447 u_int n = buf_mempoolidx(size);
448 caddr_t addr;
449 int s;
450
451 while (1) {
452 addr = pool_get(&bmempools[n], PR_NOWAIT);
453 if (addr != NULL)
454 break;
455
456 /* No memory, see if we can free some. If so, try again */
457 if (buf_drain(1) > 0)
458 continue;
459
460 /* Wait for buffers to arrive on the LRU queue */
461 s = splbio();
462 simple_lock(&bqueue_slock);
463 needbuffer = 1;
464 ltsleep(&needbuffer, PNORELOCK | (PRIBIO+1),
465 "buf_malloc", 0, &bqueue_slock);
466 splx(s);
467 }
468
469 return addr;
470 }
471
472 static void
473 buf_mrelease(caddr_t addr, size_t size)
474 {
475
476 pool_put(&bmempools[buf_mempoolidx(size)], addr);
477 }
478
479
480 static __inline struct buf *
481 bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
482 int async)
483 {
484 struct buf *bp;
485 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
486 struct proc *p = l->l_proc;
487
488 bp = getblk(vp, blkno, size, 0, 0);
489
490 #ifdef DIAGNOSTIC
491 if (bp == NULL) {
492 panic("bio_doread: no such buf");
493 }
494 #endif
495
496 /*
497 * If buffer does not have data valid, start a read.
498 * Note that if buffer is B_INVAL, getblk() won't return it.
499 * Therefore, it's valid if its I/O has completed or been delayed.
500 */
501 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
502 /* Start I/O for the buffer. */
503 SET(bp->b_flags, B_READ | async);
504 if (async)
505 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
506 else
507 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
508 VOP_STRATEGY(bp);
509
510 /* Pay for the read. */
511 p->p_stats->p_ru.ru_inblock++;
512 } else if (async) {
513 brelse(bp);
514 }
515
516 return (bp);
517 }
518
519 /*
520 * Read a disk block.
521 * This algorithm described in Bach (p.54).
522 */
523 int
524 bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
525 struct buf **bpp)
526 {
527 struct buf *bp;
528
529 /* Get buffer for block. */
530 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
531
532 /* Wait for the read to complete, and return result. */
533 return (biowait(bp));
534 }
535
536 /*
537 * Read-ahead multiple disk blocks. The first is sync, the rest async.
538 * Trivial modification to the breada algorithm presented in Bach (p.55).
539 */
540 int
541 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
542 int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp)
543 {
544 struct buf *bp;
545 int i;
546
547 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
548
549 /*
550 * For each of the read-ahead blocks, start a read, if necessary.
551 */
552 for (i = 0; i < nrablks; i++) {
553 /* If it's in the cache, just go on to next one. */
554 if (incore(vp, rablks[i]))
555 continue;
556
557 /* Get a buffer for the read-ahead block */
558 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
559 }
560
561 /* Otherwise, we had to start a read for it; wait until it's valid. */
562 return (biowait(bp));
563 }
564
565 /*
566 * Read with single-block read-ahead. Defined in Bach (p.55), but
567 * implemented as a call to breadn().
568 * XXX for compatibility with old file systems.
569 */
570 int
571 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
572 int rabsize, struct ucred *cred, struct buf **bpp)
573 {
574
575 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
576 }
577
578 /*
579 * Block write. Described in Bach (p.56)
580 */
581 int
582 bwrite(struct buf *bp)
583 {
584 int rv, sync, wasdelayed, s;
585 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
586 struct proc *p = l->l_proc;
587 struct vnode *vp;
588 struct mount *mp;
589
590 KASSERT(ISSET(bp->b_flags, B_BUSY));
591
592 vp = bp->b_vp;
593 if (vp != NULL) {
594 if (vp->v_type == VBLK)
595 mp = vp->v_specmountpoint;
596 else
597 mp = vp->v_mount;
598 } else {
599 mp = NULL;
600 }
601
602 /*
603 * Remember buffer type, to switch on it later. If the write was
604 * synchronous, but the file system was mounted with MNT_ASYNC,
605 * convert it to a delayed write.
606 * XXX note that this relies on delayed tape writes being converted
607 * to async, not sync writes (which is safe, but ugly).
608 */
609 sync = !ISSET(bp->b_flags, B_ASYNC);
610 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
611 bdwrite(bp);
612 return (0);
613 }
614
615 /*
616 * Collect statistics on synchronous and asynchronous writes.
617 * Writes to block devices are charged to their associated
618 * filesystem (if any).
619 */
620 if (mp != NULL) {
621 if (sync)
622 mp->mnt_stat.f_syncwrites++;
623 else
624 mp->mnt_stat.f_asyncwrites++;
625 }
626
627 s = splbio();
628 simple_lock(&bp->b_interlock);
629
630 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
631
632 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
633
634 /*
635 * Pay for the I/O operation and make sure the buf is on the correct
636 * vnode queue.
637 */
638 if (wasdelayed)
639 reassignbuf(bp, bp->b_vp);
640 else
641 p->p_stats->p_ru.ru_oublock++;
642
643 /* Initiate disk write. Make sure the appropriate party is charged. */
644 V_INCR_NUMOUTPUT(bp->b_vp);
645 simple_unlock(&bp->b_interlock);
646 splx(s);
647
648 if (sync)
649 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
650 else
651 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
652
653 VOP_STRATEGY(bp);
654
655 if (sync) {
656 /* If I/O was synchronous, wait for it to complete. */
657 rv = biowait(bp);
658
659 /* Release the buffer. */
660 brelse(bp);
661
662 return (rv);
663 } else {
664 return (0);
665 }
666 }
667
668 int
669 vn_bwrite(void *v)
670 {
671 struct vop_bwrite_args *ap = v;
672
673 return (bwrite(ap->a_bp));
674 }
675
676 /*
677 * Delayed write.
678 *
679 * The buffer is marked dirty, but is not queued for I/O.
680 * This routine should be used when the buffer is expected
681 * to be modified again soon, typically a small write that
682 * partially fills a buffer.
683 *
684 * NB: magnetic tapes cannot be delayed; they must be
685 * written in the order that the writes are requested.
686 *
687 * Described in Leffler, et al. (pp. 208-213).
688 */
689 void
690 bdwrite(struct buf *bp)
691 {
692 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
693 struct proc *p = l->l_proc;
694 const struct bdevsw *bdev;
695 int s;
696
697 /* If this is a tape block, write the block now. */
698 bdev = bdevsw_lookup(bp->b_dev);
699 if (bdev != NULL && bdev->d_type == D_TAPE) {
700 bawrite(bp);
701 return;
702 }
703
704 /*
705 * If the block hasn't been seen before:
706 * (1) Mark it as having been seen,
707 * (2) Charge for the write,
708 * (3) Make sure it's on its vnode's correct block list.
709 */
710 s = splbio();
711 simple_lock(&bp->b_interlock);
712
713 KASSERT(ISSET(bp->b_flags, B_BUSY));
714
715 if (!ISSET(bp->b_flags, B_DELWRI)) {
716 SET(bp->b_flags, B_DELWRI);
717 p->p_stats->p_ru.ru_oublock++;
718 reassignbuf(bp, bp->b_vp);
719 }
720
721 /* Otherwise, the "write" is done, so mark and release the buffer. */
722 CLR(bp->b_flags, B_DONE);
723 simple_unlock(&bp->b_interlock);
724 splx(s);
725
726 brelse(bp);
727 }
728
729 /*
730 * Asynchronous block write; just an asynchronous bwrite().
731 */
732 void
733 bawrite(struct buf *bp)
734 {
735 int s;
736
737 s = splbio();
738 simple_lock(&bp->b_interlock);
739
740 KASSERT(ISSET(bp->b_flags, B_BUSY));
741
742 SET(bp->b_flags, B_ASYNC);
743 simple_unlock(&bp->b_interlock);
744 splx(s);
745 VOP_BWRITE(bp);
746 }
747
748 /*
749 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
750 * Call at splbio() and with the buffer interlock locked.
751 * Note: called only from biodone() through ffs softdep's bioops.io_complete()
752 */
753 void
754 bdirty(struct buf *bp)
755 {
756 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
757 struct proc *p = l->l_proc;
758
759 LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
760 KASSERT(ISSET(bp->b_flags, B_BUSY));
761
762 CLR(bp->b_flags, B_AGE);
763
764 if (!ISSET(bp->b_flags, B_DELWRI)) {
765 SET(bp->b_flags, B_DELWRI);
766 p->p_stats->p_ru.ru_oublock++;
767 reassignbuf(bp, bp->b_vp);
768 }
769 }
770
771 /*
772 * Release a buffer on to the free lists.
773 * Described in Bach (p. 46).
774 */
775 void
776 brelse(struct buf *bp)
777 {
778 struct bqueues *bufq;
779 int s;
780
781 /* Block disk interrupts. */
782 s = splbio();
783 simple_lock(&bqueue_slock);
784 simple_lock(&bp->b_interlock);
785
786 KASSERT(ISSET(bp->b_flags, B_BUSY));
787 KASSERT(!ISSET(bp->b_flags, B_CALL));
788
789 /* Wake up any processes waiting for any buffer to become free. */
790 if (needbuffer) {
791 needbuffer = 0;
792 wakeup(&needbuffer);
793 }
794
795 /* Wake up any proceeses waiting for _this_ buffer to become free. */
796 if (ISSET(bp->b_flags, B_WANTED)) {
797 CLR(bp->b_flags, B_WANTED|B_AGE);
798 wakeup(bp);
799 }
800
801 /*
802 * Determine which queue the buffer should be on, then put it there.
803 */
804
805 /* If it's locked, don't report an error; try again later. */
806 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
807 CLR(bp->b_flags, B_ERROR);
808
809 /* If it's not cacheable, or an error, mark it invalid. */
810 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
811 SET(bp->b_flags, B_INVAL);
812
813 if (ISSET(bp->b_flags, B_VFLUSH)) {
814 /*
815 * This is a delayed write buffer that was just flushed to
816 * disk. It is still on the LRU queue. If it's become
817 * invalid, then we need to move it to a different queue;
818 * otherwise leave it in its current position.
819 */
820 CLR(bp->b_flags, B_VFLUSH);
821 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
822 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
823 goto already_queued;
824 } else {
825 bremfree(bp);
826 }
827 }
828
829 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
830 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
831 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
832
833 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
834 /*
835 * If it's invalid or empty, dissociate it from its vnode
836 * and put on the head of the appropriate queue.
837 */
838 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
839 (*bioops.io_deallocate)(bp);
840 CLR(bp->b_flags, B_DONE|B_DELWRI);
841 if (bp->b_vp) {
842 reassignbuf(bp, bp->b_vp);
843 brelvp(bp);
844 }
845 if (bp->b_bufsize <= 0)
846 /* no data */
847 goto already_queued;
848 else
849 /* invalid data */
850 bufq = &bufqueues[BQ_AGE];
851 binsheadfree(bp, bufq);
852 } else {
853 /*
854 * It has valid data. Put it on the end of the appropriate
855 * queue, so that it'll stick around for as long as possible.
856 * If buf is AGE, but has dependencies, must put it on last
857 * bufqueue to be scanned, ie LRU. This protects against the
858 * livelock where BQ_AGE only has buffers with dependencies,
859 * and we thus never get to the dependent buffers in BQ_LRU.
860 */
861 if (ISSET(bp->b_flags, B_LOCKED))
862 /* locked in core */
863 bufq = &bufqueues[BQ_LOCKED];
864 else if (!ISSET(bp->b_flags, B_AGE))
865 /* valid data */
866 bufq = &bufqueues[BQ_LRU];
867 else {
868 /* stale but valid data */
869 int has_deps;
870
871 if (LIST_FIRST(&bp->b_dep) != NULL &&
872 bioops.io_countdeps)
873 has_deps = (*bioops.io_countdeps)(bp, 0);
874 else
875 has_deps = 0;
876 bufq = has_deps ? &bufqueues[BQ_LRU] :
877 &bufqueues[BQ_AGE];
878 }
879 binstailfree(bp, bufq);
880 }
881
882 already_queued:
883 /* Unlock the buffer. */
884 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
885 SET(bp->b_flags, B_CACHE);
886
887 /* Allow disk interrupts. */
888 simple_unlock(&bp->b_interlock);
889 simple_unlock(&bqueue_slock);
890 if (bp->b_bufsize <= 0) {
891 #ifdef DEBUG
892 memset((char *)bp, 0, sizeof(*bp));
893 #endif
894 pool_put(&bufpool, bp);
895 }
896 splx(s);
897 }
898
899 /*
900 * Determine if a block is in the cache.
901 * Just look on what would be its hash chain. If it's there, return
902 * a pointer to it, unless it's marked invalid. If it's marked invalid,
903 * we normally don't return the buffer, unless the caller explicitly
904 * wants us to.
905 */
906 struct buf *
907 incore(struct vnode *vp, daddr_t blkno)
908 {
909 struct buf *bp;
910
911 /* Search hash chain */
912 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
913 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
914 !ISSET(bp->b_flags, B_INVAL))
915 return (bp);
916 }
917
918 return (NULL);
919 }
920
921 /*
922 * Get a block of requested size that is associated with
923 * a given vnode and block offset. If it is found in the
924 * block cache, mark it as having been found, make it busy
925 * and return it. Otherwise, return an empty block of the
926 * correct size. It is up to the caller to insure that the
927 * cached blocks be of the correct size.
928 */
929 struct buf *
930 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
931 {
932 struct buf *bp;
933 int s, err;
934 int preserve;
935
936 start:
937 s = splbio();
938 simple_lock(&bqueue_slock);
939 bp = incore(vp, blkno);
940 if (bp != NULL) {
941 simple_lock(&bp->b_interlock);
942 if (ISSET(bp->b_flags, B_BUSY)) {
943 simple_unlock(&bqueue_slock);
944 if (curproc == uvm.pagedaemon_proc) {
945 simple_unlock(&bp->b_interlock);
946 splx(s);
947 return NULL;
948 }
949 SET(bp->b_flags, B_WANTED);
950 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
951 "getblk", slptimeo, &bp->b_interlock);
952 splx(s);
953 if (err)
954 return (NULL);
955 goto start;
956 }
957 #ifdef DIAGNOSTIC
958 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
959 bp->b_bcount < size && vp->v_type != VBLK)
960 panic("getblk: block size invariant failed");
961 #endif
962 SET(bp->b_flags, B_BUSY);
963 bremfree(bp);
964 preserve = 1;
965 } else {
966 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
967 simple_unlock(&bqueue_slock);
968 splx(s);
969 goto start;
970 }
971
972 binshash(bp, BUFHASH(vp, blkno));
973 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
974 bgetvp(vp, bp);
975 preserve = 0;
976 }
977 simple_unlock(&bp->b_interlock);
978 simple_unlock(&bqueue_slock);
979 splx(s);
980 /*
981 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
982 * if we re-size buffers here.
983 */
984 if (ISSET(bp->b_flags, B_LOCKED)) {
985 KASSERT(bp->b_bufsize >= size);
986 } else {
987 allocbuf(bp, size, preserve);
988 }
989 BIO_SETPRIO(bp, BPRIO_DEFAULT);
990 return (bp);
991 }
992
993 /*
994 * Get an empty, disassociated buffer of given size.
995 */
996 struct buf *
997 geteblk(int size)
998 {
999 struct buf *bp;
1000 int s;
1001
1002 s = splbio();
1003 simple_lock(&bqueue_slock);
1004 while ((bp = getnewbuf(0, 0, 0)) == 0)
1005 ;
1006
1007 SET(bp->b_flags, B_INVAL);
1008 binshash(bp, &invalhash);
1009 simple_unlock(&bqueue_slock);
1010 simple_unlock(&bp->b_interlock);
1011 splx(s);
1012 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1013 allocbuf(bp, size, 0);
1014 return (bp);
1015 }
1016
1017 /*
1018 * Expand or contract the actual memory allocated to a buffer.
1019 *
1020 * If the buffer shrinks, data is lost, so it's up to the
1021 * caller to have written it out *first*; this routine will not
1022 * start a write. If the buffer grows, it's the callers
1023 * responsibility to fill out the buffer's additional contents.
1024 */
1025 void
1026 allocbuf(struct buf *bp, int size, int preserve)
1027 {
1028 vsize_t oldsize, desired_size;
1029 caddr_t addr;
1030 int s, delta;
1031
1032 desired_size = buf_roundsize(size);
1033 if (desired_size > MAXBSIZE)
1034 printf("allocbuf: buffer larger than MAXBSIZE requested");
1035
1036 bp->b_bcount = size;
1037
1038 oldsize = bp->b_bufsize;
1039 if (oldsize == desired_size)
1040 return;
1041
1042 /*
1043 * If we want a buffer of a different size, re-allocate the
1044 * buffer's memory; copy old content only if needed.
1045 */
1046 addr = buf_malloc(desired_size);
1047 if (preserve)
1048 memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1049 if (bp->b_data != NULL)
1050 buf_mrelease(bp->b_data, oldsize);
1051 bp->b_data = addr;
1052 bp->b_bufsize = desired_size;
1053
1054 /*
1055 * Update overall buffer memory counter (protected by bqueue_slock)
1056 */
1057 delta = (long)desired_size - (long)oldsize;
1058
1059 s = splbio();
1060 simple_lock(&bqueue_slock);
1061 if ((bufmem += delta) > bufmem_hiwater) {
1062 /*
1063 * Need to trim overall memory usage.
1064 */
1065 while (buf_canrelease()) {
1066 if (buf_trim() == 0)
1067 break;
1068 }
1069 }
1070
1071 simple_unlock(&bqueue_slock);
1072 splx(s);
1073 }
1074
1075 /*
1076 * Find a buffer which is available for use.
1077 * Select something from a free list.
1078 * Preference is to AGE list, then LRU list.
1079 *
1080 * Called at splbio and with buffer queues locked.
1081 * Return buffer locked.
1082 */
1083 struct buf *
1084 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1085 {
1086 struct buf *bp;
1087
1088 start:
1089 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1090
1091 /*
1092 * Get a new buffer from the pool; but use NOWAIT because
1093 * we have the buffer queues locked.
1094 */
1095 if (buf_lotsfree() && !from_bufq &&
1096 (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1097 memset((char *)bp, 0, sizeof(*bp));
1098 BUF_INIT(bp);
1099 bp->b_dev = NODEV;
1100 bp->b_vnbufs.le_next = NOLIST;
1101 bp->b_flags = B_BUSY;
1102 simple_lock(&bp->b_interlock);
1103 return (bp);
1104 }
1105
1106 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
1107 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
1108 simple_lock(&bp->b_interlock);
1109 bremfree(bp);
1110 } else {
1111 /* wait for a free buffer of any kind */
1112 needbuffer = 1;
1113 ltsleep(&needbuffer, slpflag|(PRIBIO+1),
1114 "getnewbuf", slptimeo, &bqueue_slock);
1115 return (NULL);
1116 }
1117
1118 #ifdef DIAGNOSTIC
1119 if (bp->b_bufsize <= 0)
1120 panic("buffer %p: on queue but empty", bp);
1121 #endif
1122
1123 if (ISSET(bp->b_flags, B_VFLUSH)) {
1124 /*
1125 * This is a delayed write buffer being flushed to disk. Make
1126 * sure it gets aged out of the queue when it's finished, and
1127 * leave it off the LRU queue.
1128 */
1129 CLR(bp->b_flags, B_VFLUSH);
1130 SET(bp->b_flags, B_AGE);
1131 simple_unlock(&bp->b_interlock);
1132 goto start;
1133 }
1134
1135 /* Buffer is no longer on free lists. */
1136 SET(bp->b_flags, B_BUSY);
1137
1138 /*
1139 * If buffer was a delayed write, start it and return NULL
1140 * (since we might sleep while starting the write).
1141 */
1142 if (ISSET(bp->b_flags, B_DELWRI)) {
1143 /*
1144 * This buffer has gone through the LRU, so make sure it gets
1145 * reused ASAP.
1146 */
1147 SET(bp->b_flags, B_AGE);
1148 simple_unlock(&bp->b_interlock);
1149 simple_unlock(&bqueue_slock);
1150 bawrite(bp);
1151 simple_lock(&bqueue_slock);
1152 return (NULL);
1153 }
1154
1155 /* disassociate us from our vnode, if we had one... */
1156 if (bp->b_vp)
1157 brelvp(bp);
1158
1159 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1160 (*bioops.io_deallocate)(bp);
1161
1162 /* clear out various other fields */
1163 bp->b_flags = B_BUSY;
1164 bp->b_dev = NODEV;
1165 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1166 bp->b_iodone = 0;
1167 bp->b_error = 0;
1168 bp->b_resid = 0;
1169 bp->b_bcount = 0;
1170
1171 bremhash(bp);
1172 return (bp);
1173 }
1174
1175 /*
1176 * Attempt to free an aged buffer off the queues.
1177 * Called at splbio and with queue lock held.
1178 * Returns the amount of buffer memory freed.
1179 */
1180 int
1181 buf_trim(void)
1182 {
1183 struct buf *bp;
1184 long size = 0;
1185 int wanted;
1186
1187 /* Instruct getnewbuf() to get buffers off the queues */
1188 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1189 return 0;
1190
1191 wanted = ISSET(bp->b_flags, B_WANTED);
1192 simple_unlock(&bp->b_interlock);
1193 if (wanted) {
1194 printf("buftrim: got WANTED buffer\n");
1195 SET(bp->b_flags, B_INVAL);
1196 binshash(bp, &invalhash);
1197 simple_unlock(&bqueue_slock);
1198 goto out;
1199 }
1200 size = bp->b_bufsize;
1201 bufmem -= size;
1202 simple_unlock(&bqueue_slock);
1203 if (size > 0) {
1204 buf_mrelease(bp->b_data, size);
1205 bp->b_bcount = bp->b_bufsize = 0;
1206 }
1207
1208 out:
1209 /* brelse() will return the buffer to the global buffer pool */
1210 brelse(bp);
1211 simple_lock(&bqueue_slock);
1212 return size;
1213 }
1214
1215 int
1216 buf_drain(int n)
1217 {
1218 int s, size = 0;
1219
1220 /* If not asked for a specific amount, make our own estimate */
1221 if (n == 0)
1222 n = buf_canrelease();
1223
1224 s = splbio();
1225 simple_lock(&bqueue_slock);
1226 while (n-- > 0 && bufmem > bufmem_lowater)
1227 size += buf_trim();
1228 simple_unlock(&bqueue_slock);
1229 splx(s);
1230 return size;
1231 }
1232
1233 /*
1234 * Wait for operations on the buffer to complete.
1235 * When they do, extract and return the I/O's error value.
1236 */
1237 int
1238 biowait(struct buf *bp)
1239 {
1240 int s, error;
1241
1242 s = splbio();
1243 simple_lock(&bp->b_interlock);
1244 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1245 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1246
1247 /* check for interruption of I/O (e.g. via NFS), then errors. */
1248 if (ISSET(bp->b_flags, B_EINTR)) {
1249 CLR(bp->b_flags, B_EINTR);
1250 error = EINTR;
1251 } else if (ISSET(bp->b_flags, B_ERROR))
1252 error = bp->b_error ? bp->b_error : EIO;
1253 else
1254 error = 0;
1255
1256 simple_unlock(&bp->b_interlock);
1257 splx(s);
1258 return (error);
1259 }
1260
1261 /*
1262 * Mark I/O complete on a buffer.
1263 *
1264 * If a callback has been requested, e.g. the pageout
1265 * daemon, do so. Otherwise, awaken waiting processes.
1266 *
1267 * [ Leffler, et al., says on p.247:
1268 * "This routine wakes up the blocked process, frees the buffer
1269 * for an asynchronous write, or, for a request by the pagedaemon
1270 * process, invokes a procedure specified in the buffer structure" ]
1271 *
1272 * In real life, the pagedaemon (or other system processes) wants
1273 * to do async stuff to, and doesn't want the buffer brelse()'d.
1274 * (for swap pager, that puts swap buffers on the free lists (!!!),
1275 * for the vn device, that puts malloc'd buffers on the free lists!)
1276 */
1277 void
1278 biodone(struct buf *bp)
1279 {
1280 int s = splbio();
1281
1282 simple_lock(&bp->b_interlock);
1283 if (ISSET(bp->b_flags, B_DONE))
1284 panic("biodone already");
1285 SET(bp->b_flags, B_DONE); /* note that it's done */
1286 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1287
1288 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1289 (*bioops.io_complete)(bp);
1290
1291 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
1292 vwakeup(bp);
1293
1294 /*
1295 * If necessary, call out. Unlock the buffer before calling
1296 * iodone() as the buffer isn't valid any more when it return.
1297 */
1298 if (ISSET(bp->b_flags, B_CALL)) {
1299 CLR(bp->b_flags, B_CALL); /* but note callout done */
1300 simple_unlock(&bp->b_interlock);
1301 (*bp->b_iodone)(bp);
1302 } else {
1303 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */
1304 simple_unlock(&bp->b_interlock);
1305 brelse(bp);
1306 } else { /* or just wakeup the buffer */
1307 CLR(bp->b_flags, B_WANTED);
1308 wakeup(bp);
1309 simple_unlock(&bp->b_interlock);
1310 }
1311 }
1312
1313 splx(s);
1314 }
1315
1316 /*
1317 * Return a count of buffers on the "locked" queue.
1318 */
1319 int
1320 count_lock_queue(void)
1321 {
1322 struct buf *bp;
1323 int n = 0;
1324
1325 simple_lock(&bqueue_slock);
1326 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
1327 n++;
1328 simple_unlock(&bqueue_slock);
1329 return (n);
1330 }
1331
1332 /*
1333 * Wait for all buffers to complete I/O
1334 * Return the number of "stuck" buffers.
1335 */
1336 int
1337 buf_syncwait(void)
1338 {
1339 struct buf *bp;
1340 int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1341
1342 dcount = 10000;
1343 for (iter = 0; iter < 20;) {
1344 s = splbio();
1345 simple_lock(&bqueue_slock);
1346 nbusy = 0;
1347 for (ihash = 0; ihash < bufhash+1; ihash++) {
1348 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1349 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1350 nbusy++;
1351 /*
1352 * With soft updates, some buffers that are
1353 * written will be remarked as dirty until other
1354 * buffers are written.
1355 */
1356 if (bp->b_vp && bp->b_vp->v_mount
1357 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1358 && (bp->b_flags & B_DELWRI)) {
1359 simple_lock(&bp->b_interlock);
1360 bremfree(bp);
1361 bp->b_flags |= B_BUSY;
1362 nbusy++;
1363 simple_unlock(&bp->b_interlock);
1364 simple_unlock(&bqueue_slock);
1365 bawrite(bp);
1366 if (dcount-- <= 0) {
1367 printf("softdep ");
1368 goto fail;
1369 }
1370 simple_lock(&bqueue_slock);
1371 }
1372 }
1373 }
1374
1375 simple_unlock(&bqueue_slock);
1376 splx(s);
1377
1378 if (nbusy == 0)
1379 break;
1380 if (nbusy_prev == 0)
1381 nbusy_prev = nbusy;
1382 printf("%d ", nbusy);
1383 tsleep(&nbusy, PRIBIO, "bflush",
1384 (iter == 0) ? 1 : hz / 25 * iter);
1385 if (nbusy >= nbusy_prev) /* we didn't flush anything */
1386 iter++;
1387 else
1388 nbusy_prev = nbusy;
1389 }
1390
1391 if (nbusy) {
1392 fail:;
1393 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1394 printf("giving up\nPrinting vnodes for busy buffers\n");
1395 for (ihash = 0; ihash < bufhash+1; ihash++) {
1396 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1397 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1398 vprint(NULL, bp->b_vp);
1399 }
1400 }
1401 #endif
1402 }
1403
1404 return nbusy;
1405 }
1406
1407 #define KERN_BUFSLOP 20
1408 static int
1409 sysctl_dobuf(SYSCTLFN_ARGS)
1410 {
1411 struct buf *bp;
1412 char *dp;
1413 u_int i, elem_size;
1414 size_t len, buflen, needed;
1415 int error, s;
1416
1417 dp = oldp;
1418 len = buflen = oldp != NULL ? *oldlenp : 0;
1419 error = 0;
1420 needed = 0;
1421 elem_size = sizeof(struct buf);
1422
1423 s = splbio();
1424 simple_lock(&bqueue_slock);
1425 for (i = 0; i < BQUEUES; i++) {
1426 TAILQ_FOREACH(bp, &bufqueues[i], b_freelist) {
1427 if (len >= elem_size) {
1428 error = copyout(bp, dp, elem_size);
1429 if (error)
1430 goto cleanup;
1431 dp += elem_size;
1432 len -= elem_size;
1433 }
1434 needed += elem_size;
1435 }
1436 }
1437 cleanup:
1438 simple_unlock(&bqueue_slock);
1439 splx(s);
1440
1441 if (oldp != NULL) {
1442 *oldlenp = (char *)dp - (char *)oldp;
1443 if (needed > *oldlenp)
1444 error = ENOMEM;
1445 } else {
1446 needed += KERN_BUFSLOP;
1447 *oldlenp = needed;
1448 }
1449
1450 return (error);
1451 }
1452
1453 static int sysctlnum_bufcache, sysctlnum_bufmemhiwater, sysctlnum_bufmemlowater;
1454
1455 static int
1456 sysctl_bufvm_update(SYSCTLFN_ARGS)
1457 {
1458 int t, error;
1459 struct sysctlnode node;
1460
1461 node = *rnode;
1462 node.sysctl_data = &t;
1463 t = *(int*)rnode->sysctl_data;
1464 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1465 if (error || newp == NULL)
1466 return (error);
1467
1468 if (rnode->sysctl_num == sysctlnum_bufcache) {
1469 if (t < 0 || t > 100)
1470 return (EINVAL);
1471 bufcache = t;
1472 bufmem_hiwater = buf_memcalc();
1473 bufmem_lowater = (bufmem_hiwater >> 4);
1474 } else if (rnode->sysctl_num == sysctlnum_bufmemlowater) {
1475 bufmem_lowater = t;
1476 } else if (rnode->sysctl_num == sysctlnum_bufmemhiwater) {
1477 bufmem_hiwater = t;
1478 } else
1479 return (EINVAL);
1480
1481 /* Drain until below new high water mark */
1482 while ((t = bufmem - bufmem_hiwater) >= 0) {
1483 if (buf_drain(t / (2*1024)) <= 0)
1484 break;
1485 }
1486
1487 return 0;
1488 }
1489
1490 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1491 {
1492
1493 sysctl_createv(SYSCTL_PERMANENT,
1494 CTLTYPE_NODE, "kern", NULL,
1495 NULL, 0, NULL, 0,
1496 CTL_KERN, CTL_EOL);
1497 sysctl_createv(SYSCTL_PERMANENT,
1498 CTLTYPE_NODE, "buf", NULL,
1499 sysctl_dobuf, 0, NULL, 0,
1500 CTL_KERN, KERN_BUF, CTL_EOL);
1501 }
1502
1503 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
1504 {
1505 struct sysctlnode *rnode;
1506
1507 sysctl_createv(SYSCTL_PERMANENT,
1508 CTLTYPE_NODE, "vm", NULL,
1509 NULL, 0, NULL, 0,
1510 CTL_VM, CTL_EOL);
1511
1512 rnode = NULL;
1513 if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1514 CTLTYPE_INT, "bufcache", &rnode,
1515 sysctl_bufvm_update, 0, &bufcache, 0,
1516 CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1517 sysctlnum_bufcache = rnode->sysctl_num;
1518
1519 rnode = NULL;
1520 if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1521 CTLTYPE_INT, "bufmem_lowater", &rnode,
1522 sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1523 CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1524 sysctlnum_bufmemlowater = rnode->sysctl_num;
1525
1526 rnode = NULL;
1527 if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1528 CTLTYPE_INT, "bufmem_hiwater", &rnode,
1529 sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1530 CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1531 sysctlnum_bufmemhiwater = rnode->sysctl_num;
1532 }
1533
1534 #ifdef DEBUG
1535 /*
1536 * Print out statistics on the current allocation of the buffer pool.
1537 * Can be enabled to print out on every ``sync'' by setting "syncprt"
1538 * in vfs_syscalls.c using sysctl.
1539 */
1540 void
1541 vfs_bufstats(void)
1542 {
1543 int s, i, j, count;
1544 struct buf *bp;
1545 struct bqueues *dp;
1546 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1547 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1548
1549 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1550 count = 0;
1551 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1552 counts[j] = 0;
1553 s = splbio();
1554 TAILQ_FOREACH(bp, dp, b_freelist) {
1555 counts[bp->b_bufsize/PAGE_SIZE]++;
1556 count++;
1557 }
1558 splx(s);
1559 printf("%s: total-%d", bname[i], count);
1560 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1561 if (counts[j] != 0)
1562 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1563 printf("\n");
1564 }
1565 }
1566 #endif /* DEBUG */
1567