vfs_bio.c revision 1.124 1 /* $NetBSD: vfs_bio.c,v 1.124 2004/04/25 12:41:12 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
37 */
38
39 /*-
40 * Copyright (c) 1994 Christopher G. Demetriou
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
71 */
72
73 /*
74 * Some references:
75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76 * Leffler, et al.: The Design and Implementation of the 4.3BSD
77 * UNIX Operating System (Addison Welley, 1989)
78 */
79
80 #include "opt_bufcache.h"
81 #include "opt_softdep.h"
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.124 2004/04/25 12:41:12 yamt Exp $");
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/buf.h>
91 #include <sys/vnode.h>
92 #include <sys/mount.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sysctl.h>
96 #include <sys/conf.h>
97
98 #include <uvm/uvm.h>
99
100 #include <miscfs/specfs/specdev.h>
101
102 #ifndef BUFPAGES
103 # define BUFPAGES 0
104 #endif
105
106 #ifdef BUFCACHE
107 # if (BUFCACHE < 5) || (BUFCACHE > 95)
108 # error BUFCACHE is not between 5 and 95
109 # endif
110 #else
111 # define BUFCACHE 15
112 #endif
113
114 u_int nbuf; /* XXX - for softdep_lockedbufs */
115 u_int bufpages = BUFPAGES; /* optional hardwired count */
116 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */
117
118
119 /* Macros to clear/set/test flags. */
120 #define SET(t, f) (t) |= (f)
121 #define CLR(t, f) (t) &= ~(f)
122 #define ISSET(t, f) ((t) & (f))
123
124 /*
125 * Definitions for the buffer hash lists.
126 */
127 #define BUFHASH(dvp, lbn) \
128 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
129 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
130 u_long bufhash;
131 #ifndef SOFTDEP
132 struct bio_ops bioops; /* I/O operation notification */
133 #endif
134
135 /*
136 * Insq/Remq for the buffer hash lists.
137 */
138 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
139 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
140
141 /*
142 * Definitions for the buffer free lists.
143 */
144 #define BQUEUES 3 /* number of free buffer queues */
145
146 #define BQ_LOCKED 0 /* super-blocks &c */
147 #define BQ_LRU 1 /* lru, useful buffers */
148 #define BQ_AGE 2 /* rubbish */
149
150 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
151 int needbuffer;
152
153 /*
154 * Buffer queue lock.
155 * Take this lock first if also taking some buffer's b_interlock.
156 */
157 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
158
159 /*
160 * Buffer pool for I/O buffers.
161 */
162 struct pool bufpool;
163
164 /* XXX - somewhat gross.. */
165 #if MAXBSIZE == 0x2000
166 #define NMEMPOOLS 4
167 #elif MAXBSIZE == 0x4000
168 #define NMEMPOOLS 5
169 #elif MAXBSIZE == 0x8000
170 #define NMEMPOOLS 6
171 #else
172 #define NMEMPOOLS 7
173 #endif
174
175 #define MEMPOOL_INDEX_OFFSET 10 /* smallest pool is 1k */
176 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
177 #error update vfs_bio buffer memory parameters
178 #endif
179
180 /* Buffer memory pools */
181 static struct pool bmempools[NMEMPOOLS];
182
183 struct vm_map *buf_map;
184
185 /*
186 * Buffer memory pool allocator.
187 */
188 static void *
189 bufpool_page_alloc(struct pool *pp, int flags)
190 {
191
192 return (void *)uvm_km_kmemalloc1(buf_map,
193 uvm.kernel_object, MAXBSIZE, MAXBSIZE, UVM_UNKNOWN_OFFSET,
194 (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
195 }
196
197 static void
198 bufpool_page_free(struct pool *pp, void *v)
199 {
200 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE);
201 }
202
203 static struct pool_allocator bufmempool_allocator = {
204 bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
205 };
206
207 /* Buffer memory management variables */
208 u_long bufmem_valimit;
209 u_long bufmem_hiwater;
210 u_long bufmem_lowater;
211 u_long bufmem;
212
213 /*
214 * MD code can call this to set a hard limit on the amount
215 * of virtual memory used by the buffer cache.
216 */
217 int
218 buf_setvalimit(vsize_t sz)
219 {
220
221 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
222 if (sz < NMEMPOOLS * MAXBSIZE)
223 return EINVAL;
224
225 bufmem_valimit = sz;
226 return 0;
227 }
228
229 static int buf_trim(void);
230
231 /*
232 * bread()/breadn() helper.
233 */
234 static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
235 struct ucred *, int);
236 int count_lock_queue(void);
237
238 /*
239 * Insq/Remq for the buffer free lists.
240 * Call with buffer queue locked.
241 */
242 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
243 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
244
245 #ifdef DEBUG
246 int debug_verify_freelist = 0;
247 static int checkfreelist(struct buf *bp, struct bqueues *dp)
248 {
249 struct buf *b;
250 TAILQ_FOREACH(b, dp, b_freelist) {
251 if (b == bp)
252 return 1;
253 }
254 return 0;
255 }
256 #endif
257
258 void
259 bremfree(struct buf *bp)
260 {
261 struct bqueues *dp = NULL;
262
263 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
264
265 KDASSERT(!debug_verify_freelist ||
266 checkfreelist(bp, &bufqueues[BQ_AGE]) ||
267 checkfreelist(bp, &bufqueues[BQ_LRU]) ||
268 checkfreelist(bp, &bufqueues[BQ_LOCKED]) );
269
270 /*
271 * We only calculate the head of the freelist when removing
272 * the last element of the list as that is the only time that
273 * it is needed (e.g. to reset the tail pointer).
274 *
275 * NB: This makes an assumption about how tailq's are implemented.
276 *
277 * We break the TAILQ abstraction in order to efficiently remove a
278 * buffer from its freelist without having to know exactly which
279 * freelist it is on.
280 */
281 if (TAILQ_NEXT(bp, b_freelist) == NULL) {
282 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
283 if (dp->tqh_last == &bp->b_freelist.tqe_next)
284 break;
285 if (dp == &bufqueues[BQUEUES])
286 panic("bremfree: lost tail");
287 }
288 TAILQ_REMOVE(dp, bp, b_freelist);
289 }
290
291 u_long
292 buf_memcalc(void)
293 {
294 u_long n;
295
296 /*
297 * Determine the upper bound of memory to use for buffers.
298 *
299 * - If bufpages is specified, use that as the number
300 * pages.
301 *
302 * - Otherwise, use bufcache as the percentage of
303 * physical memory.
304 */
305 if (bufpages != 0) {
306 n = bufpages;
307 } else {
308 if (bufcache < 5) {
309 printf("forcing bufcache %d -> 5", bufcache);
310 bufcache = 5;
311 }
312 if (bufcache > 95) {
313 printf("forcing bufcache %d -> 95", bufcache);
314 bufcache = 95;
315 }
316 n = physmem / 100 * bufcache;
317 }
318
319 n <<= PAGE_SHIFT;
320 if (bufmem_valimit != 0 && n > bufmem_valimit)
321 n = bufmem_valimit;
322
323 return (n);
324 }
325
326 /*
327 * Initialize buffers and hash links for buffers.
328 */
329 void
330 bufinit(void)
331 {
332 struct bqueues *dp;
333 int smallmem;
334 u_int i;
335
336 /*
337 * Initialize buffer cache memory parameters.
338 */
339 bufmem = 0;
340 bufmem_hiwater = buf_memcalc();
341 /* lowater is approx. 2% of memory (with bufcache=15) */
342 bufmem_lowater = (bufmem_hiwater >> 3);
343 if (bufmem_lowater < 64 * 1024)
344 /* Ensure a reasonable minimum value */
345 bufmem_lowater = 64 * 1024;
346
347 if (bufmem_valimit != 0) {
348 vaddr_t minaddr = 0, maxaddr;
349 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
350 bufmem_valimit, VM_MAP_PAGEABLE,
351 FALSE, 0);
352 if (buf_map == NULL)
353 panic("bufinit: cannot allocate submap");
354 } else
355 buf_map = kernel_map;
356
357 /*
358 * Initialize the buffer pools.
359 */
360 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
361
362 /* On "small" machines use small pool page sizes where possible */
363 smallmem = (physmem < atop(16*1024*1024));
364
365 for (i = 0; i < NMEMPOOLS; i++) {
366 struct pool_allocator *pa;
367 struct pool *pp = &bmempools[i];
368 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
369 char *name = malloc(8, M_TEMP, M_WAITOK);
370 snprintf(name, 8, "buf%dk", 1 << i);
371 pa = (size <= PAGE_SIZE && smallmem)
372 ? &pool_allocator_nointr
373 : &bufmempool_allocator;
374 pool_init(pp, size, 0, 0, PR_IMMEDRELEASE, name, pa);
375 pool_setlowat(pp, 1);
376 }
377
378 /* Initialize the buffer queues */
379 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
380 TAILQ_INIT(dp);
381
382 /*
383 * Estimate hash table size based on the amount of memory we
384 * intend to use for the buffer cache. The average buffer
385 * size is dependent on our clients (i.e. filesystems).
386 *
387 * For now, use an empirical 3K per buffer.
388 */
389 nbuf = (bufmem_hiwater / 1024) / 3;
390 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
391 }
392
393 static int
394 buf_lotsfree(void)
395 {
396 int try, thresh;
397
398 /* Always allocate if less than the low water mark. */
399 if (bufmem < bufmem_lowater)
400 return 1;
401
402 /* Never allocate if greater than the high water mark. */
403 if (bufmem > bufmem_hiwater)
404 return 0;
405
406 /* If there's anything on the AGE list, it should be eaten. */
407 if (TAILQ_FIRST(&bufqueues[BQ_AGE]) != NULL)
408 return 0;
409
410 /*
411 * The probabily of getting a new allocation is inversely
412 * proportional to the current size of the cache, using
413 * a granularity of 16 steps.
414 */
415 try = random() & 0x0000000fL;
416
417 /* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */
418 thresh = bufmem / (bufmem_hiwater / 16);
419
420 if ((try > thresh) && (uvmexp.free > (2 * uvmexp.freetarg))) {
421 return 1;
422 }
423
424 /* Otherwise don't allocate. */
425 return 0;
426 }
427
428 /*
429 * Return estimate of bytes we think need to be
430 * released to help resolve low memory conditions.
431 *
432 * => called at splbio.
433 * => called with bqueue_slock held.
434 */
435 static int
436 buf_canrelease(void)
437 {
438 int pagedemand, ninvalid = 0;
439 struct buf *bp;
440
441 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
442
443 if (bufmem < bufmem_lowater)
444 return 0;
445
446 TAILQ_FOREACH(bp, &bufqueues[BQ_AGE], b_freelist)
447 ninvalid += bp->b_bufsize;
448
449 pagedemand = uvmexp.freetarg - uvmexp.free;
450 if (pagedemand < 0)
451 return ninvalid;
452 return MAX(ninvalid, MIN(2 * MAXBSIZE,
453 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
454 }
455
456 /*
457 * Buffer memory allocation helper functions
458 */
459 static __inline u_long
460 buf_mempoolidx(u_long size)
461 {
462 u_int n = 0;
463
464 size -= 1;
465 size >>= MEMPOOL_INDEX_OFFSET;
466 while (size) {
467 size >>= 1;
468 n += 1;
469 }
470 if (n >= NMEMPOOLS)
471 panic("buf mem pool index %d", n);
472 return n;
473 }
474
475 static __inline u_long
476 buf_roundsize(u_long size)
477 {
478 /* Round up to nearest power of 2 */
479 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
480 }
481
482 static __inline caddr_t
483 buf_malloc(size_t size)
484 {
485 u_int n = buf_mempoolidx(size);
486 caddr_t addr;
487 int s;
488
489 while (1) {
490 addr = pool_get(&bmempools[n], PR_NOWAIT);
491 if (addr != NULL)
492 break;
493
494 /* No memory, see if we can free some. If so, try again */
495 if (buf_drain(1) > 0)
496 continue;
497
498 /* Wait for buffers to arrive on the LRU queue */
499 s = splbio();
500 simple_lock(&bqueue_slock);
501 needbuffer = 1;
502 ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1),
503 "buf_malloc", 0, &bqueue_slock);
504 splx(s);
505 }
506
507 return addr;
508 }
509
510 static void
511 buf_mrelease(caddr_t addr, size_t size)
512 {
513
514 pool_put(&bmempools[buf_mempoolidx(size)], addr);
515 }
516
517
518 static __inline struct buf *
519 bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
520 int async)
521 {
522 struct buf *bp;
523 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
524 struct proc *p = l->l_proc;
525 struct mount *mp;
526
527 bp = getblk(vp, blkno, size, 0, 0);
528
529 #ifdef DIAGNOSTIC
530 if (bp == NULL) {
531 panic("bio_doread: no such buf");
532 }
533 #endif
534
535 /*
536 * If buffer does not have data valid, start a read.
537 * Note that if buffer is B_INVAL, getblk() won't return it.
538 * Therefore, it's valid if its I/O has completed or been delayed.
539 */
540 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
541 /* Start I/O for the buffer. */
542 SET(bp->b_flags, B_READ | async);
543 if (async)
544 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
545 else
546 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
547 VOP_STRATEGY(vp, bp);
548
549 /* Pay for the read. */
550 p->p_stats->p_ru.ru_inblock++;
551 } else if (async) {
552 brelse(bp);
553 }
554
555 if (vp->v_type == VBLK)
556 mp = vp->v_specmountpoint;
557 else
558 mp = vp->v_mount;
559
560 /*
561 * Collect statistics on synchronous and asynchronous reads.
562 * Reads from block devices are charged to their associated
563 * filesystem (if any).
564 */
565 if (mp != NULL) {
566 if (async == 0)
567 mp->mnt_stat.f_syncreads++;
568 else
569 mp->mnt_stat.f_asyncreads++;
570 }
571
572 return (bp);
573 }
574
575 /*
576 * Read a disk block.
577 * This algorithm described in Bach (p.54).
578 */
579 int
580 bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
581 struct buf **bpp)
582 {
583 struct buf *bp;
584
585 /* Get buffer for block. */
586 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
587
588 /* Wait for the read to complete, and return result. */
589 return (biowait(bp));
590 }
591
592 /*
593 * Read-ahead multiple disk blocks. The first is sync, the rest async.
594 * Trivial modification to the breada algorithm presented in Bach (p.55).
595 */
596 int
597 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
598 int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp)
599 {
600 struct buf *bp;
601 int i;
602
603 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
604
605 /*
606 * For each of the read-ahead blocks, start a read, if necessary.
607 */
608 for (i = 0; i < nrablks; i++) {
609 /* If it's in the cache, just go on to next one. */
610 if (incore(vp, rablks[i]))
611 continue;
612
613 /* Get a buffer for the read-ahead block */
614 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
615 }
616
617 /* Otherwise, we had to start a read for it; wait until it's valid. */
618 return (biowait(bp));
619 }
620
621 /*
622 * Read with single-block read-ahead. Defined in Bach (p.55), but
623 * implemented as a call to breadn().
624 * XXX for compatibility with old file systems.
625 */
626 int
627 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
628 int rabsize, struct ucred *cred, struct buf **bpp)
629 {
630
631 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
632 }
633
634 /*
635 * Block write. Described in Bach (p.56)
636 */
637 int
638 bwrite(struct buf *bp)
639 {
640 int rv, sync, wasdelayed, s;
641 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
642 struct proc *p = l->l_proc;
643 struct vnode *vp;
644 struct mount *mp;
645
646 KASSERT(ISSET(bp->b_flags, B_BUSY));
647
648 vp = bp->b_vp;
649 if (vp != NULL) {
650 if (vp->v_type == VBLK)
651 mp = vp->v_specmountpoint;
652 else
653 mp = vp->v_mount;
654 } else {
655 mp = NULL;
656 }
657
658 /*
659 * Remember buffer type, to switch on it later. If the write was
660 * synchronous, but the file system was mounted with MNT_ASYNC,
661 * convert it to a delayed write.
662 * XXX note that this relies on delayed tape writes being converted
663 * to async, not sync writes (which is safe, but ugly).
664 */
665 sync = !ISSET(bp->b_flags, B_ASYNC);
666 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
667 bdwrite(bp);
668 return (0);
669 }
670
671 /*
672 * Collect statistics on synchronous and asynchronous writes.
673 * Writes to block devices are charged to their associated
674 * filesystem (if any).
675 */
676 if (mp != NULL) {
677 if (sync)
678 mp->mnt_stat.f_syncwrites++;
679 else
680 mp->mnt_stat.f_asyncwrites++;
681 }
682
683 s = splbio();
684 simple_lock(&bp->b_interlock);
685
686 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
687
688 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
689
690 /*
691 * Pay for the I/O operation and make sure the buf is on the correct
692 * vnode queue.
693 */
694 if (wasdelayed)
695 reassignbuf(bp, bp->b_vp);
696 else
697 p->p_stats->p_ru.ru_oublock++;
698
699 /* Initiate disk write. Make sure the appropriate party is charged. */
700 V_INCR_NUMOUTPUT(bp->b_vp);
701 simple_unlock(&bp->b_interlock);
702 splx(s);
703
704 if (sync)
705 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
706 else
707 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
708
709 VOP_STRATEGY(vp, bp);
710
711 if (sync) {
712 /* If I/O was synchronous, wait for it to complete. */
713 rv = biowait(bp);
714
715 /* Release the buffer. */
716 brelse(bp);
717
718 return (rv);
719 } else {
720 return (0);
721 }
722 }
723
724 int
725 vn_bwrite(void *v)
726 {
727 struct vop_bwrite_args *ap = v;
728
729 return (bwrite(ap->a_bp));
730 }
731
732 /*
733 * Delayed write.
734 *
735 * The buffer is marked dirty, but is not queued for I/O.
736 * This routine should be used when the buffer is expected
737 * to be modified again soon, typically a small write that
738 * partially fills a buffer.
739 *
740 * NB: magnetic tapes cannot be delayed; they must be
741 * written in the order that the writes are requested.
742 *
743 * Described in Leffler, et al. (pp. 208-213).
744 */
745 void
746 bdwrite(struct buf *bp)
747 {
748 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
749 struct proc *p = l->l_proc;
750 const struct bdevsw *bdev;
751 int s;
752
753 /* If this is a tape block, write the block now. */
754 bdev = bdevsw_lookup(bp->b_dev);
755 if (bdev != NULL && bdev->d_type == D_TAPE) {
756 bawrite(bp);
757 return;
758 }
759
760 /*
761 * If the block hasn't been seen before:
762 * (1) Mark it as having been seen,
763 * (2) Charge for the write,
764 * (3) Make sure it's on its vnode's correct block list.
765 */
766 s = splbio();
767 simple_lock(&bp->b_interlock);
768
769 KASSERT(ISSET(bp->b_flags, B_BUSY));
770
771 if (!ISSET(bp->b_flags, B_DELWRI)) {
772 SET(bp->b_flags, B_DELWRI);
773 p->p_stats->p_ru.ru_oublock++;
774 reassignbuf(bp, bp->b_vp);
775 }
776
777 /* Otherwise, the "write" is done, so mark and release the buffer. */
778 CLR(bp->b_flags, B_DONE);
779 simple_unlock(&bp->b_interlock);
780 splx(s);
781
782 brelse(bp);
783 }
784
785 /*
786 * Asynchronous block write; just an asynchronous bwrite().
787 */
788 void
789 bawrite(struct buf *bp)
790 {
791 int s;
792
793 s = splbio();
794 simple_lock(&bp->b_interlock);
795
796 KASSERT(ISSET(bp->b_flags, B_BUSY));
797
798 SET(bp->b_flags, B_ASYNC);
799 simple_unlock(&bp->b_interlock);
800 splx(s);
801 VOP_BWRITE(bp);
802 }
803
804 /*
805 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
806 * Call at splbio() and with the buffer interlock locked.
807 * Note: called only from biodone() through ffs softdep's bioops.io_complete()
808 */
809 void
810 bdirty(struct buf *bp)
811 {
812 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
813 struct proc *p = l->l_proc;
814
815 LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
816 KASSERT(ISSET(bp->b_flags, B_BUSY));
817
818 CLR(bp->b_flags, B_AGE);
819
820 if (!ISSET(bp->b_flags, B_DELWRI)) {
821 SET(bp->b_flags, B_DELWRI);
822 p->p_stats->p_ru.ru_oublock++;
823 reassignbuf(bp, bp->b_vp);
824 }
825 }
826
827 /*
828 * Release a buffer on to the free lists.
829 * Described in Bach (p. 46).
830 */
831 void
832 brelse(struct buf *bp)
833 {
834 struct bqueues *bufq;
835 int s;
836
837 /* Block disk interrupts. */
838 s = splbio();
839 simple_lock(&bqueue_slock);
840 simple_lock(&bp->b_interlock);
841
842 KASSERT(ISSET(bp->b_flags, B_BUSY));
843 KASSERT(!ISSET(bp->b_flags, B_CALL));
844
845 /* Wake up any processes waiting for any buffer to become free. */
846 if (needbuffer) {
847 needbuffer = 0;
848 wakeup(&needbuffer);
849 }
850
851 /* Wake up any proceeses waiting for _this_ buffer to become free. */
852 if (ISSET(bp->b_flags, B_WANTED)) {
853 CLR(bp->b_flags, B_WANTED|B_AGE);
854 wakeup(bp);
855 }
856
857 /*
858 * Determine which queue the buffer should be on, then put it there.
859 */
860
861 /* If it's locked, don't report an error; try again later. */
862 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
863 CLR(bp->b_flags, B_ERROR);
864
865 /* If it's not cacheable, or an error, mark it invalid. */
866 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
867 SET(bp->b_flags, B_INVAL);
868
869 if (ISSET(bp->b_flags, B_VFLUSH)) {
870 /*
871 * This is a delayed write buffer that was just flushed to
872 * disk. It is still on the LRU queue. If it's become
873 * invalid, then we need to move it to a different queue;
874 * otherwise leave it in its current position.
875 */
876 CLR(bp->b_flags, B_VFLUSH);
877 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
878 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
879 goto already_queued;
880 } else {
881 bremfree(bp);
882 }
883 }
884
885 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
886 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
887 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
888
889 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
890 /*
891 * If it's invalid or empty, dissociate it from its vnode
892 * and put on the head of the appropriate queue.
893 */
894 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
895 (*bioops.io_deallocate)(bp);
896 CLR(bp->b_flags, B_DONE|B_DELWRI);
897 if (bp->b_vp) {
898 reassignbuf(bp, bp->b_vp);
899 brelvp(bp);
900 }
901 if (bp->b_bufsize <= 0)
902 /* no data */
903 goto already_queued;
904 else
905 /* invalid data */
906 bufq = &bufqueues[BQ_AGE];
907 binsheadfree(bp, bufq);
908 } else {
909 /*
910 * It has valid data. Put it on the end of the appropriate
911 * queue, so that it'll stick around for as long as possible.
912 * If buf is AGE, but has dependencies, must put it on last
913 * bufqueue to be scanned, ie LRU. This protects against the
914 * livelock where BQ_AGE only has buffers with dependencies,
915 * and we thus never get to the dependent buffers in BQ_LRU.
916 */
917 if (ISSET(bp->b_flags, B_LOCKED))
918 /* locked in core */
919 bufq = &bufqueues[BQ_LOCKED];
920 else if (!ISSET(bp->b_flags, B_AGE))
921 /* valid data */
922 bufq = &bufqueues[BQ_LRU];
923 else {
924 /* stale but valid data */
925 int has_deps;
926
927 if (LIST_FIRST(&bp->b_dep) != NULL &&
928 bioops.io_countdeps)
929 has_deps = (*bioops.io_countdeps)(bp, 0);
930 else
931 has_deps = 0;
932 bufq = has_deps ? &bufqueues[BQ_LRU] :
933 &bufqueues[BQ_AGE];
934 }
935 binstailfree(bp, bufq);
936 }
937
938 already_queued:
939 /* Unlock the buffer. */
940 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
941 SET(bp->b_flags, B_CACHE);
942
943 /* Allow disk interrupts. */
944 simple_unlock(&bp->b_interlock);
945 simple_unlock(&bqueue_slock);
946 if (bp->b_bufsize <= 0) {
947 #ifdef DEBUG
948 memset((char *)bp, 0, sizeof(*bp));
949 #endif
950 pool_put(&bufpool, bp);
951 }
952 splx(s);
953 }
954
955 /*
956 * Determine if a block is in the cache.
957 * Just look on what would be its hash chain. If it's there, return
958 * a pointer to it, unless it's marked invalid. If it's marked invalid,
959 * we normally don't return the buffer, unless the caller explicitly
960 * wants us to.
961 */
962 struct buf *
963 incore(struct vnode *vp, daddr_t blkno)
964 {
965 struct buf *bp;
966
967 /* Search hash chain */
968 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
969 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
970 !ISSET(bp->b_flags, B_INVAL))
971 return (bp);
972 }
973
974 return (NULL);
975 }
976
977 /*
978 * Get a block of requested size that is associated with
979 * a given vnode and block offset. If it is found in the
980 * block cache, mark it as having been found, make it busy
981 * and return it. Otherwise, return an empty block of the
982 * correct size. It is up to the caller to insure that the
983 * cached blocks be of the correct size.
984 */
985 struct buf *
986 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
987 {
988 struct buf *bp;
989 int s, err;
990 int preserve;
991
992 start:
993 s = splbio();
994 simple_lock(&bqueue_slock);
995 bp = incore(vp, blkno);
996 if (bp != NULL) {
997 simple_lock(&bp->b_interlock);
998 if (ISSET(bp->b_flags, B_BUSY)) {
999 simple_unlock(&bqueue_slock);
1000 if (curproc == uvm.pagedaemon_proc) {
1001 simple_unlock(&bp->b_interlock);
1002 splx(s);
1003 return NULL;
1004 }
1005 SET(bp->b_flags, B_WANTED);
1006 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
1007 "getblk", slptimeo, &bp->b_interlock);
1008 splx(s);
1009 if (err)
1010 return (NULL);
1011 goto start;
1012 }
1013 #ifdef DIAGNOSTIC
1014 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
1015 bp->b_bcount < size && vp->v_type != VBLK)
1016 panic("getblk: block size invariant failed");
1017 #endif
1018 SET(bp->b_flags, B_BUSY);
1019 bremfree(bp);
1020 preserve = 1;
1021 } else {
1022 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
1023 simple_unlock(&bqueue_slock);
1024 splx(s);
1025 goto start;
1026 }
1027
1028 binshash(bp, BUFHASH(vp, blkno));
1029 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1030 bgetvp(vp, bp);
1031 preserve = 0;
1032 }
1033 simple_unlock(&bp->b_interlock);
1034 simple_unlock(&bqueue_slock);
1035 splx(s);
1036 /*
1037 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1038 * if we re-size buffers here.
1039 */
1040 if (ISSET(bp->b_flags, B_LOCKED)) {
1041 KASSERT(bp->b_bufsize >= size);
1042 } else {
1043 allocbuf(bp, size, preserve);
1044 }
1045 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1046 return (bp);
1047 }
1048
1049 /*
1050 * Get an empty, disassociated buffer of given size.
1051 */
1052 struct buf *
1053 geteblk(int size)
1054 {
1055 struct buf *bp;
1056 int s;
1057
1058 s = splbio();
1059 simple_lock(&bqueue_slock);
1060 while ((bp = getnewbuf(0, 0, 0)) == 0)
1061 ;
1062
1063 SET(bp->b_flags, B_INVAL);
1064 binshash(bp, &invalhash);
1065 simple_unlock(&bqueue_slock);
1066 simple_unlock(&bp->b_interlock);
1067 splx(s);
1068 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1069 allocbuf(bp, size, 0);
1070 return (bp);
1071 }
1072
1073 /*
1074 * Expand or contract the actual memory allocated to a buffer.
1075 *
1076 * If the buffer shrinks, data is lost, so it's up to the
1077 * caller to have written it out *first*; this routine will not
1078 * start a write. If the buffer grows, it's the callers
1079 * responsibility to fill out the buffer's additional contents.
1080 */
1081 void
1082 allocbuf(struct buf *bp, int size, int preserve)
1083 {
1084 vsize_t oldsize, desired_size;
1085 caddr_t addr;
1086 int s, delta;
1087
1088 desired_size = buf_roundsize(size);
1089 if (desired_size > MAXBSIZE)
1090 printf("allocbuf: buffer larger than MAXBSIZE requested");
1091
1092 bp->b_bcount = size;
1093
1094 oldsize = bp->b_bufsize;
1095 if (oldsize == desired_size)
1096 return;
1097
1098 /*
1099 * If we want a buffer of a different size, re-allocate the
1100 * buffer's memory; copy old content only if needed.
1101 */
1102 addr = buf_malloc(desired_size);
1103 if (preserve)
1104 memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1105 if (bp->b_data != NULL)
1106 buf_mrelease(bp->b_data, oldsize);
1107 bp->b_data = addr;
1108 bp->b_bufsize = desired_size;
1109
1110 /*
1111 * Update overall buffer memory counter (protected by bqueue_slock)
1112 */
1113 delta = (long)desired_size - (long)oldsize;
1114
1115 s = splbio();
1116 simple_lock(&bqueue_slock);
1117 if ((bufmem += delta) > bufmem_hiwater) {
1118 /*
1119 * Need to trim overall memory usage.
1120 */
1121 while (buf_canrelease()) {
1122 if (buf_trim() == 0)
1123 break;
1124 }
1125 }
1126
1127 simple_unlock(&bqueue_slock);
1128 splx(s);
1129 }
1130
1131 /*
1132 * Find a buffer which is available for use.
1133 * Select something from a free list.
1134 * Preference is to AGE list, then LRU list.
1135 *
1136 * Called at splbio and with buffer queues locked.
1137 * Return buffer locked.
1138 */
1139 struct buf *
1140 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1141 {
1142 struct buf *bp;
1143
1144 start:
1145 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1146
1147 /*
1148 * Get a new buffer from the pool; but use NOWAIT because
1149 * we have the buffer queues locked.
1150 */
1151 if (buf_lotsfree() && !from_bufq &&
1152 (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1153 memset((char *)bp, 0, sizeof(*bp));
1154 BUF_INIT(bp);
1155 bp->b_dev = NODEV;
1156 bp->b_vnbufs.le_next = NOLIST;
1157 bp->b_flags = B_BUSY;
1158 simple_lock(&bp->b_interlock);
1159 return (bp);
1160 }
1161
1162 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
1163 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
1164 simple_lock(&bp->b_interlock);
1165 bremfree(bp);
1166 } else {
1167 /* wait for a free buffer of any kind */
1168 needbuffer = 1;
1169 ltsleep(&needbuffer, slpflag|(PRIBIO + 1),
1170 "getnewbuf", slptimeo, &bqueue_slock);
1171 return (NULL);
1172 }
1173
1174 #ifdef DIAGNOSTIC
1175 if (bp->b_bufsize <= 0)
1176 panic("buffer %p: on queue but empty", bp);
1177 #endif
1178
1179 if (ISSET(bp->b_flags, B_VFLUSH)) {
1180 /*
1181 * This is a delayed write buffer being flushed to disk. Make
1182 * sure it gets aged out of the queue when it's finished, and
1183 * leave it off the LRU queue.
1184 */
1185 CLR(bp->b_flags, B_VFLUSH);
1186 SET(bp->b_flags, B_AGE);
1187 simple_unlock(&bp->b_interlock);
1188 goto start;
1189 }
1190
1191 /* Buffer is no longer on free lists. */
1192 SET(bp->b_flags, B_BUSY);
1193
1194 /*
1195 * If buffer was a delayed write, start it and return NULL
1196 * (since we might sleep while starting the write).
1197 */
1198 if (ISSET(bp->b_flags, B_DELWRI)) {
1199 /*
1200 * This buffer has gone through the LRU, so make sure it gets
1201 * reused ASAP.
1202 */
1203 SET(bp->b_flags, B_AGE);
1204 simple_unlock(&bp->b_interlock);
1205 simple_unlock(&bqueue_slock);
1206 bawrite(bp);
1207 simple_lock(&bqueue_slock);
1208 return (NULL);
1209 }
1210
1211 /* disassociate us from our vnode, if we had one... */
1212 if (bp->b_vp)
1213 brelvp(bp);
1214
1215 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1216 (*bioops.io_deallocate)(bp);
1217
1218 /* clear out various other fields */
1219 bp->b_flags = B_BUSY;
1220 bp->b_dev = NODEV;
1221 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1222 bp->b_iodone = 0;
1223 bp->b_error = 0;
1224 bp->b_resid = 0;
1225 bp->b_bcount = 0;
1226
1227 bremhash(bp);
1228 return (bp);
1229 }
1230
1231 /*
1232 * Attempt to free an aged buffer off the queues.
1233 * Called at splbio and with queue lock held.
1234 * Returns the amount of buffer memory freed.
1235 */
1236 int
1237 buf_trim(void)
1238 {
1239 struct buf *bp;
1240 long size = 0;
1241 int wanted;
1242
1243 /* Instruct getnewbuf() to get buffers off the queues */
1244 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1245 return 0;
1246
1247 wanted = ISSET(bp->b_flags, B_WANTED);
1248 simple_unlock(&bp->b_interlock);
1249 if (wanted) {
1250 printf("buftrim: got WANTED buffer\n");
1251 SET(bp->b_flags, B_INVAL);
1252 binshash(bp, &invalhash);
1253 simple_unlock(&bqueue_slock);
1254 goto out;
1255 }
1256 size = bp->b_bufsize;
1257 bufmem -= size;
1258 simple_unlock(&bqueue_slock);
1259 if (size > 0) {
1260 buf_mrelease(bp->b_data, size);
1261 bp->b_bcount = bp->b_bufsize = 0;
1262 }
1263
1264 out:
1265 /* brelse() will return the buffer to the global buffer pool */
1266 brelse(bp);
1267 simple_lock(&bqueue_slock);
1268 return size;
1269 }
1270
1271 int
1272 buf_drain(int n)
1273 {
1274 int s, size = 0;
1275
1276 s = splbio();
1277 simple_lock(&bqueue_slock);
1278
1279 /* If not asked for a specific amount, make our own estimate */
1280 if (n == 0)
1281 n = buf_canrelease();
1282
1283 while (size < n && bufmem > bufmem_lowater)
1284 size += buf_trim();
1285
1286 simple_unlock(&bqueue_slock);
1287 splx(s);
1288 return size;
1289 }
1290
1291 /*
1292 * Wait for operations on the buffer to complete.
1293 * When they do, extract and return the I/O's error value.
1294 */
1295 int
1296 biowait(struct buf *bp)
1297 {
1298 int s, error;
1299
1300 s = splbio();
1301 simple_lock(&bp->b_interlock);
1302 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1303 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1304
1305 /* check for interruption of I/O (e.g. via NFS), then errors. */
1306 if (ISSET(bp->b_flags, B_EINTR)) {
1307 CLR(bp->b_flags, B_EINTR);
1308 error = EINTR;
1309 } else if (ISSET(bp->b_flags, B_ERROR))
1310 error = bp->b_error ? bp->b_error : EIO;
1311 else
1312 error = 0;
1313
1314 simple_unlock(&bp->b_interlock);
1315 splx(s);
1316 return (error);
1317 }
1318
1319 /*
1320 * Mark I/O complete on a buffer.
1321 *
1322 * If a callback has been requested, e.g. the pageout
1323 * daemon, do so. Otherwise, awaken waiting processes.
1324 *
1325 * [ Leffler, et al., says on p.247:
1326 * "This routine wakes up the blocked process, frees the buffer
1327 * for an asynchronous write, or, for a request by the pagedaemon
1328 * process, invokes a procedure specified in the buffer structure" ]
1329 *
1330 * In real life, the pagedaemon (or other system processes) wants
1331 * to do async stuff to, and doesn't want the buffer brelse()'d.
1332 * (for swap pager, that puts swap buffers on the free lists (!!!),
1333 * for the vn device, that puts malloc'd buffers on the free lists!)
1334 */
1335 void
1336 biodone(struct buf *bp)
1337 {
1338 int s = splbio();
1339
1340 simple_lock(&bp->b_interlock);
1341 if (ISSET(bp->b_flags, B_DONE))
1342 panic("biodone already");
1343 SET(bp->b_flags, B_DONE); /* note that it's done */
1344 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1345
1346 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1347 (*bioops.io_complete)(bp);
1348
1349 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
1350 vwakeup(bp);
1351
1352 /*
1353 * If necessary, call out. Unlock the buffer before calling
1354 * iodone() as the buffer isn't valid any more when it return.
1355 */
1356 if (ISSET(bp->b_flags, B_CALL)) {
1357 CLR(bp->b_flags, B_CALL); /* but note callout done */
1358 simple_unlock(&bp->b_interlock);
1359 (*bp->b_iodone)(bp);
1360 } else {
1361 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */
1362 simple_unlock(&bp->b_interlock);
1363 brelse(bp);
1364 } else { /* or just wakeup the buffer */
1365 CLR(bp->b_flags, B_WANTED);
1366 wakeup(bp);
1367 simple_unlock(&bp->b_interlock);
1368 }
1369 }
1370
1371 splx(s);
1372 }
1373
1374 /*
1375 * Return a count of buffers on the "locked" queue.
1376 */
1377 int
1378 count_lock_queue(void)
1379 {
1380 struct buf *bp;
1381 int n = 0;
1382
1383 simple_lock(&bqueue_slock);
1384 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
1385 n++;
1386 simple_unlock(&bqueue_slock);
1387 return (n);
1388 }
1389
1390 /*
1391 * Wait for all buffers to complete I/O
1392 * Return the number of "stuck" buffers.
1393 */
1394 int
1395 buf_syncwait(void)
1396 {
1397 struct buf *bp;
1398 int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1399
1400 dcount = 10000;
1401 for (iter = 0; iter < 20;) {
1402 s = splbio();
1403 simple_lock(&bqueue_slock);
1404 nbusy = 0;
1405 for (ihash = 0; ihash < bufhash+1; ihash++) {
1406 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1407 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1408 nbusy++;
1409 /*
1410 * With soft updates, some buffers that are
1411 * written will be remarked as dirty until other
1412 * buffers are written.
1413 */
1414 if (bp->b_vp && bp->b_vp->v_mount
1415 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1416 && (bp->b_flags & B_DELWRI)) {
1417 simple_lock(&bp->b_interlock);
1418 bremfree(bp);
1419 bp->b_flags |= B_BUSY;
1420 nbusy++;
1421 simple_unlock(&bp->b_interlock);
1422 simple_unlock(&bqueue_slock);
1423 bawrite(bp);
1424 if (dcount-- <= 0) {
1425 printf("softdep ");
1426 goto fail;
1427 }
1428 simple_lock(&bqueue_slock);
1429 }
1430 }
1431 }
1432
1433 simple_unlock(&bqueue_slock);
1434 splx(s);
1435
1436 if (nbusy == 0)
1437 break;
1438 if (nbusy_prev == 0)
1439 nbusy_prev = nbusy;
1440 printf("%d ", nbusy);
1441 tsleep(&nbusy, PRIBIO, "bflush",
1442 (iter == 0) ? 1 : hz / 25 * iter);
1443 if (nbusy >= nbusy_prev) /* we didn't flush anything */
1444 iter++;
1445 else
1446 nbusy_prev = nbusy;
1447 }
1448
1449 if (nbusy) {
1450 fail:;
1451 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1452 printf("giving up\nPrinting vnodes for busy buffers\n");
1453 for (ihash = 0; ihash < bufhash+1; ihash++) {
1454 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1455 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1456 vprint(NULL, bp->b_vp);
1457 }
1458 }
1459 #endif
1460 }
1461
1462 return nbusy;
1463 }
1464
1465 static void
1466 sysctl_fillbuf(struct buf *i, struct buf_sysctl *o)
1467 {
1468
1469 o->b_flags = i->b_flags;
1470 o->b_error = i->b_error;
1471 o->b_prio = i->b_prio;
1472 o->b_dev = i->b_dev;
1473 o->b_bufsize = i->b_bufsize;
1474 o->b_bcount = i->b_bcount;
1475 o->b_resid = i->b_resid;
1476 o->b_addr = PTRTOUINT64(i->b_un.b_addr);
1477 o->b_blkno = i->b_blkno;
1478 o->b_rawblkno = i->b_rawblkno;
1479 o->b_iodone = PTRTOUINT64(i->b_iodone);
1480 o->b_proc = PTRTOUINT64(i->b_proc);
1481 o->b_vp = PTRTOUINT64(i->b_vp);
1482 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr);
1483 o->b_lblkno = i->b_lblkno;
1484 }
1485
1486 #define KERN_BUFSLOP 20
1487 static int
1488 sysctl_dobuf(SYSCTLFN_ARGS)
1489 {
1490 struct buf *bp;
1491 struct buf_sysctl bs;
1492 char *dp;
1493 u_int i, op, arg;
1494 size_t len, needed, elem_size, out_size;
1495 int error, s, elem_count;
1496
1497 if (namelen == 1 && name[0] == CTL_QUERY)
1498 return (sysctl_query(SYSCTLFN_CALL(rnode)));
1499
1500 if (namelen != 4)
1501 return (EINVAL);
1502
1503 dp = oldp;
1504 len = (oldp != NULL) ? *oldlenp : 0;
1505 op = name[0];
1506 arg = name[1];
1507 elem_size = name[2];
1508 elem_count = name[3];
1509 out_size = MIN(sizeof(bs), elem_size);
1510
1511 /*
1512 * at the moment, these are just "placeholders" to make the
1513 * API for retrieving kern.buf data more extensible in the
1514 * future.
1515 *
1516 * XXX kern.buf currently has "netbsd32" issues. hopefully
1517 * these will be resolved at a later point.
1518 */
1519 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL ||
1520 elem_size < 1 || elem_count < 0)
1521 return (EINVAL);
1522
1523 error = 0;
1524 needed = 0;
1525 s = splbio();
1526 simple_lock(&bqueue_slock);
1527 for (i = 0; i < BQUEUES; i++) {
1528 TAILQ_FOREACH(bp, &bufqueues[i], b_freelist) {
1529 if (len >= elem_size && elem_count > 0) {
1530 sysctl_fillbuf(bp, &bs);
1531 error = copyout(&bs, dp, out_size);
1532 if (error)
1533 goto cleanup;
1534 dp += elem_size;
1535 len -= elem_size;
1536 }
1537 if (elem_count > 0) {
1538 needed += elem_size;
1539 if (elem_count != INT_MAX)
1540 elem_count--;
1541 }
1542 }
1543 }
1544 cleanup:
1545 simple_unlock(&bqueue_slock);
1546 splx(s);
1547
1548 *oldlenp = needed;
1549 if (oldp == NULL)
1550 *oldlenp += KERN_BUFSLOP * sizeof(struct buf);
1551
1552 return (error);
1553 }
1554
1555 static int
1556 sysctl_bufvm_update(SYSCTLFN_ARGS)
1557 {
1558 int t, error;
1559 struct sysctlnode node;
1560
1561 node = *rnode;
1562 node.sysctl_data = &t;
1563 t = *(int*)rnode->sysctl_data;
1564 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1565 if (error || newp == NULL)
1566 return (error);
1567
1568 if (rnode->sysctl_data == &bufcache) {
1569 if (t < 0 || t > 100)
1570 return (EINVAL);
1571 bufcache = t;
1572 bufmem_hiwater = buf_memcalc();
1573 bufmem_lowater = (bufmem_hiwater >> 3);
1574 if (bufmem_lowater < 64 * 1024)
1575 /* Ensure a reasonable minimum value */
1576 bufmem_lowater = 64 * 1024;
1577
1578 } else if (rnode->sysctl_data == &bufmem_lowater) {
1579 bufmem_lowater = t;
1580 } else if (rnode->sysctl_data == &bufmem_hiwater) {
1581 bufmem_hiwater = t;
1582 } else
1583 return (EINVAL);
1584
1585 /* Drain until below new high water mark */
1586 while ((t = bufmem - bufmem_hiwater) >= 0) {
1587 if (buf_drain(t / (2*1024)) <= 0)
1588 break;
1589 }
1590
1591 return 0;
1592 }
1593
1594 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1595 {
1596
1597 sysctl_createv(clog, 0, NULL, NULL,
1598 CTLFLAG_PERMANENT,
1599 CTLTYPE_NODE, "kern", NULL,
1600 NULL, 0, NULL, 0,
1601 CTL_KERN, CTL_EOL);
1602 sysctl_createv(clog, 0, NULL, NULL,
1603 CTLFLAG_PERMANENT,
1604 CTLTYPE_NODE, "buf", NULL,
1605 sysctl_dobuf, 0, NULL, 0,
1606 CTL_KERN, KERN_BUF, CTL_EOL);
1607 }
1608
1609 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
1610 {
1611
1612 sysctl_createv(clog, 0, NULL, NULL,
1613 CTLFLAG_PERMANENT,
1614 CTLTYPE_NODE, "vm", NULL,
1615 NULL, 0, NULL, 0,
1616 CTL_VM, CTL_EOL);
1617
1618 sysctl_createv(clog, 0, NULL, NULL,
1619 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1620 CTLTYPE_INT, "bufcache", NULL,
1621 sysctl_bufvm_update, 0, &bufcache, 0,
1622 CTL_VM, CTL_CREATE, CTL_EOL);
1623 sysctl_createv(clog, 0, NULL, NULL,
1624 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1625 CTLTYPE_INT, "bufmem", NULL,
1626 NULL, 0, &bufmem, 0,
1627 CTL_VM, CTL_CREATE, CTL_EOL);
1628 sysctl_createv(clog, 0, NULL, NULL,
1629 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1630 CTLTYPE_INT, "bufmem_lowater", NULL,
1631 sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1632 CTL_VM, CTL_CREATE, CTL_EOL);
1633 sysctl_createv(clog, 0, NULL, NULL,
1634 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1635 CTLTYPE_INT, "bufmem_hiwater", NULL,
1636 sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1637 CTL_VM, CTL_CREATE, CTL_EOL);
1638 }
1639
1640 #ifdef DEBUG
1641 /*
1642 * Print out statistics on the current allocation of the buffer pool.
1643 * Can be enabled to print out on every ``sync'' by setting "syncprt"
1644 * in vfs_syscalls.c using sysctl.
1645 */
1646 void
1647 vfs_bufstats(void)
1648 {
1649 int s, i, j, count;
1650 struct buf *bp;
1651 struct bqueues *dp;
1652 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1653 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1654
1655 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1656 count = 0;
1657 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1658 counts[j] = 0;
1659 s = splbio();
1660 TAILQ_FOREACH(bp, dp, b_freelist) {
1661 counts[bp->b_bufsize/PAGE_SIZE]++;
1662 count++;
1663 }
1664 splx(s);
1665 printf("%s: total-%d", bname[i], count);
1666 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1667 if (counts[j] != 0)
1668 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1669 printf("\n");
1670 }
1671 }
1672 #endif /* DEBUG */
1673