vfs_bio.c revision 1.134 1 /* $NetBSD: vfs_bio.c,v 1.134 2004/10/03 08:47:48 enami Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
37 */
38
39 /*-
40 * Copyright (c) 1994 Christopher G. Demetriou
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
71 */
72
73 /*
74 * Some references:
75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76 * Leffler, et al.: The Design and Implementation of the 4.3BSD
77 * UNIX Operating System (Addison Welley, 1989)
78 */
79
80 #include "opt_bufcache.h"
81 #include "opt_softdep.h"
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.134 2004/10/03 08:47:48 enami Exp $");
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/buf.h>
91 #include <sys/vnode.h>
92 #include <sys/mount.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sysctl.h>
96 #include <sys/conf.h>
97
98 #include <uvm/uvm.h>
99
100 #include <miscfs/specfs/specdev.h>
101
102 #ifndef BUFPAGES
103 # define BUFPAGES 0
104 #endif
105
106 #ifdef BUFCACHE
107 # if (BUFCACHE < 5) || (BUFCACHE > 95)
108 # error BUFCACHE is not between 5 and 95
109 # endif
110 #else
111 # define BUFCACHE 15
112 #endif
113
114 u_int nbuf; /* XXX - for softdep_lockedbufs */
115 u_int bufpages = BUFPAGES; /* optional hardwired count */
116 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */
117
118 /* Function prototypes */
119 struct bqueue;
120
121 static int buf_trim(void);
122 static void *bufpool_page_alloc(struct pool *, int);
123 static void bufpool_page_free(struct pool *, void *);
124 static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
125 struct ucred *, int);
126 static int buf_lotsfree(void);
127 static int buf_canrelease(void);
128 static __inline u_long buf_mempoolidx(u_long);
129 static __inline u_long buf_roundsize(u_long);
130 static __inline caddr_t buf_malloc(size_t);
131 static void buf_mrelease(caddr_t, size_t);
132 static __inline void binsheadfree(struct buf *, struct bqueue *);
133 static __inline void binstailfree(struct buf *, struct bqueue *);
134 int count_lock_queue(void); /* XXX */
135 #ifdef DEBUG
136 static int checkfreelist(struct buf *, struct bqueue *);
137 #endif
138
139 /* Macros to clear/set/test flags. */
140 #define SET(t, f) (t) |= (f)
141 #define CLR(t, f) (t) &= ~(f)
142 #define ISSET(t, f) ((t) & (f))
143
144 /*
145 * Definitions for the buffer hash lists.
146 */
147 #define BUFHASH(dvp, lbn) \
148 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
149 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
150 u_long bufhash;
151 #ifndef SOFTDEP
152 struct bio_ops bioops; /* I/O operation notification */
153 #endif
154
155 /*
156 * Insq/Remq for the buffer hash lists.
157 */
158 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
159 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
160
161 /*
162 * Definitions for the buffer free lists.
163 */
164 #define BQUEUES 3 /* number of free buffer queues */
165
166 #define BQ_LOCKED 0 /* super-blocks &c */
167 #define BQ_LRU 1 /* lru, useful buffers */
168 #define BQ_AGE 2 /* rubbish */
169
170 struct bqueue {
171 TAILQ_HEAD(, buf) bq_queue;
172 uint64_t bq_bytes;
173 } bufqueues[BQUEUES];
174 int needbuffer;
175
176 /*
177 * Buffer queue lock.
178 * Take this lock first if also taking some buffer's b_interlock.
179 */
180 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
181
182 /*
183 * Buffer pool for I/O buffers.
184 */
185 struct pool bufpool;
186
187 /* XXX - somewhat gross.. */
188 #if MAXBSIZE == 0x2000
189 #define NMEMPOOLS 4
190 #elif MAXBSIZE == 0x4000
191 #define NMEMPOOLS 5
192 #elif MAXBSIZE == 0x8000
193 #define NMEMPOOLS 6
194 #else
195 #define NMEMPOOLS 7
196 #endif
197
198 #define MEMPOOL_INDEX_OFFSET 10 /* smallest pool is 1k */
199 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
200 #error update vfs_bio buffer memory parameters
201 #endif
202
203 /* Buffer memory pools */
204 static struct pool bmempools[NMEMPOOLS];
205
206 struct vm_map *buf_map;
207
208 /*
209 * Buffer memory pool allocator.
210 */
211 static void *
212 bufpool_page_alloc(struct pool *pp, int flags)
213 {
214
215 return (void *)uvm_km_kmemalloc1(buf_map,
216 uvm.kernel_object, MAXBSIZE, MAXBSIZE, UVM_UNKNOWN_OFFSET,
217 (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
218 }
219
220 static void
221 bufpool_page_free(struct pool *pp, void *v)
222 {
223 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE);
224 }
225
226 static struct pool_allocator bufmempool_allocator = {
227 bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
228 };
229
230 /* Buffer memory management variables */
231 u_long bufmem_valimit;
232 u_long bufmem_hiwater;
233 u_long bufmem_lowater;
234 u_long bufmem;
235
236 /*
237 * MD code can call this to set a hard limit on the amount
238 * of virtual memory used by the buffer cache.
239 */
240 int
241 buf_setvalimit(vsize_t sz)
242 {
243
244 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
245 if (sz < NMEMPOOLS * MAXBSIZE)
246 return EINVAL;
247
248 bufmem_valimit = sz;
249 return 0;
250 }
251
252 #ifdef DEBUG
253 int debug_verify_freelist = 0;
254 static int
255 checkfreelist(struct buf *bp, struct bqueue *dp)
256 {
257 struct buf *b;
258
259 TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) {
260 if (b == bp)
261 return 1;
262 }
263 return 0;
264 }
265 #endif
266
267 /*
268 * Insq/Remq for the buffer hash lists.
269 * Call with buffer queue locked.
270 */
271 static __inline void
272 binsheadfree(struct buf *bp, struct bqueue *dp)
273 {
274
275 KASSERT(bp->b_freelistindex == -1);
276 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist);
277 dp->bq_bytes += bp->b_bufsize;
278 bp->b_freelistindex = dp - bufqueues;
279 }
280
281 static __inline void
282 binstailfree(struct buf *bp, struct bqueue *dp)
283 {
284
285 KASSERT(bp->b_freelistindex == -1);
286 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist);
287 dp->bq_bytes += bp->b_bufsize;
288 bp->b_freelistindex = dp - bufqueues;
289 }
290
291 void
292 bremfree(struct buf *bp)
293 {
294 struct bqueue *dp;
295 int bqidx = bp->b_freelistindex;
296
297 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
298
299 KASSERT(bqidx != -1);
300 dp = &bufqueues[bqidx];
301 KDASSERT(!debug_verify_freelist || checkfreelist(bp, dp));
302 KASSERT(dp->bq_bytes >= bp->b_bufsize);
303 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist);
304 dp->bq_bytes -= bp->b_bufsize;
305 #if defined(DIAGNOSTIC)
306 bp->b_freelistindex = -1;
307 #endif /* defined(DIAGNOSTIC) */
308 }
309
310 u_long
311 buf_memcalc(void)
312 {
313 u_long n;
314
315 /*
316 * Determine the upper bound of memory to use for buffers.
317 *
318 * - If bufpages is specified, use that as the number
319 * pages.
320 *
321 * - Otherwise, use bufcache as the percentage of
322 * physical memory.
323 */
324 if (bufpages != 0) {
325 n = bufpages;
326 } else {
327 if (bufcache < 5) {
328 printf("forcing bufcache %d -> 5", bufcache);
329 bufcache = 5;
330 }
331 if (bufcache > 95) {
332 printf("forcing bufcache %d -> 95", bufcache);
333 bufcache = 95;
334 }
335 n = physmem / 100 * bufcache;
336 }
337
338 n <<= PAGE_SHIFT;
339 if (bufmem_valimit != 0 && n > bufmem_valimit)
340 n = bufmem_valimit;
341
342 return (n);
343 }
344
345 /*
346 * Initialize buffers and hash links for buffers.
347 */
348 void
349 bufinit(void)
350 {
351 struct bqueue *dp;
352 int use_std;
353 u_int i;
354
355 /*
356 * Initialize buffer cache memory parameters.
357 */
358 bufmem = 0;
359 bufmem_hiwater = buf_memcalc();
360 /* lowater is approx. 2% of memory (with bufcache=15) */
361 bufmem_lowater = (bufmem_hiwater >> 3);
362 if (bufmem_lowater < 64 * 1024)
363 /* Ensure a reasonable minimum value */
364 bufmem_lowater = 64 * 1024;
365
366 if (bufmem_valimit != 0) {
367 vaddr_t minaddr = 0, maxaddr;
368 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
369 bufmem_valimit, VM_MAP_PAGEABLE,
370 FALSE, 0);
371 if (buf_map == NULL)
372 panic("bufinit: cannot allocate submap");
373 } else
374 buf_map = kernel_map;
375
376 /*
377 * Initialize the buffer pools.
378 */
379 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
380
381 /* On "small" machines use small pool page sizes where possible */
382 use_std = (physmem < atop(16*1024*1024));
383
384 /*
385 * Also use them on systems that can map the pool pages using
386 * a direct-mapped segment.
387 */
388 #ifdef PMAP_MAP_POOLPAGE
389 use_std = 1;
390 #endif
391
392 for (i = 0; i < NMEMPOOLS; i++) {
393 struct pool_allocator *pa;
394 struct pool *pp = &bmempools[i];
395 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
396 char *name = malloc(8, M_TEMP, M_WAITOK);
397 snprintf(name, 8, "buf%dk", 1 << i);
398 pa = (size <= PAGE_SIZE && use_std)
399 ? &pool_allocator_nointr
400 : &bufmempool_allocator;
401 pool_init(pp, size, 0, 0, 0, name, pa);
402 pool_setlowat(pp, 1);
403 pool_sethiwat(pp, 1);
404 }
405
406 /* Initialize the buffer queues */
407 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
408 TAILQ_INIT(&dp->bq_queue);
409 dp->bq_bytes = 0;
410 }
411
412 /*
413 * Estimate hash table size based on the amount of memory we
414 * intend to use for the buffer cache. The average buffer
415 * size is dependent on our clients (i.e. filesystems).
416 *
417 * For now, use an empirical 3K per buffer.
418 */
419 nbuf = (bufmem_hiwater / 1024) / 3;
420 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
421 }
422
423 static int
424 buf_lotsfree(void)
425 {
426 int try, thresh;
427 struct lwp *l = curlwp;
428
429 /* Always allocate if doing copy on write */
430 if (l->l_flag & L_COWINPROGRESS)
431 return 1;
432
433 /* Always allocate if less than the low water mark. */
434 if (bufmem < bufmem_lowater)
435 return 1;
436
437 /* Never allocate if greater than the high water mark. */
438 if (bufmem > bufmem_hiwater)
439 return 0;
440
441 /* If there's anything on the AGE list, it should be eaten. */
442 if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL)
443 return 0;
444
445 /*
446 * The probabily of getting a new allocation is inversely
447 * proportional to the current size of the cache, using
448 * a granularity of 16 steps.
449 */
450 try = random() & 0x0000000fL;
451
452 /* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */
453 thresh = bufmem / (bufmem_hiwater / 16);
454
455 if (try >= thresh && uvmexp.free > (2 * uvmexp.freetarg))
456 return 1;
457
458 /* Otherwise don't allocate. */
459 return 0;
460 }
461
462 /*
463 * Return estimate of bytes we think need to be
464 * released to help resolve low memory conditions.
465 *
466 * => called at splbio.
467 * => called with bqueue_slock held.
468 */
469 static int
470 buf_canrelease(void)
471 {
472 int pagedemand, ninvalid = 0;
473
474 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
475
476 if (bufmem < bufmem_lowater)
477 return 0;
478
479 ninvalid += bufqueues[BQ_AGE].bq_bytes;
480
481 pagedemand = uvmexp.freetarg - uvmexp.free;
482 if (pagedemand < 0)
483 return ninvalid;
484 return MAX(ninvalid, MIN(2 * MAXBSIZE,
485 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
486 }
487
488 /*
489 * Buffer memory allocation helper functions
490 */
491 static __inline u_long
492 buf_mempoolidx(u_long size)
493 {
494 u_int n = 0;
495
496 size -= 1;
497 size >>= MEMPOOL_INDEX_OFFSET;
498 while (size) {
499 size >>= 1;
500 n += 1;
501 }
502 if (n >= NMEMPOOLS)
503 panic("buf mem pool index %d", n);
504 return n;
505 }
506
507 static __inline u_long
508 buf_roundsize(u_long size)
509 {
510 /* Round up to nearest power of 2 */
511 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
512 }
513
514 static __inline caddr_t
515 buf_malloc(size_t size)
516 {
517 u_int n = buf_mempoolidx(size);
518 caddr_t addr;
519 int s;
520
521 while (1) {
522 addr = pool_get(&bmempools[n], PR_NOWAIT);
523 if (addr != NULL)
524 break;
525
526 /* No memory, see if we can free some. If so, try again */
527 if (buf_drain(1) > 0)
528 continue;
529
530 /* Wait for buffers to arrive on the LRU queue */
531 s = splbio();
532 simple_lock(&bqueue_slock);
533 needbuffer = 1;
534 ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1),
535 "buf_malloc", 0, &bqueue_slock);
536 splx(s);
537 }
538
539 return addr;
540 }
541
542 static void
543 buf_mrelease(caddr_t addr, size_t size)
544 {
545
546 pool_put(&bmempools[buf_mempoolidx(size)], addr);
547 }
548
549 /*
550 * bread()/breadn() helper.
551 */
552 static __inline struct buf *
553 bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
554 int async)
555 {
556 struct buf *bp;
557 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
558 struct proc *p = l->l_proc;
559 struct mount *mp;
560
561 bp = getblk(vp, blkno, size, 0, 0);
562
563 #ifdef DIAGNOSTIC
564 if (bp == NULL) {
565 panic("bio_doread: no such buf");
566 }
567 #endif
568
569 /*
570 * If buffer does not have data valid, start a read.
571 * Note that if buffer is B_INVAL, getblk() won't return it.
572 * Therefore, it's valid if its I/O has completed or been delayed.
573 */
574 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
575 /* Start I/O for the buffer. */
576 SET(bp->b_flags, B_READ | async);
577 if (async)
578 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
579 else
580 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
581 VOP_STRATEGY(vp, bp);
582
583 /* Pay for the read. */
584 p->p_stats->p_ru.ru_inblock++;
585 } else if (async) {
586 brelse(bp);
587 }
588
589 if (vp->v_type == VBLK)
590 mp = vp->v_specmountpoint;
591 else
592 mp = vp->v_mount;
593
594 /*
595 * Collect statistics on synchronous and asynchronous reads.
596 * Reads from block devices are charged to their associated
597 * filesystem (if any).
598 */
599 if (mp != NULL) {
600 if (async == 0)
601 mp->mnt_stat.f_syncreads++;
602 else
603 mp->mnt_stat.f_asyncreads++;
604 }
605
606 return (bp);
607 }
608
609 /*
610 * Read a disk block.
611 * This algorithm described in Bach (p.54).
612 */
613 int
614 bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
615 struct buf **bpp)
616 {
617 struct buf *bp;
618
619 /* Get buffer for block. */
620 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
621
622 /* Wait for the read to complete, and return result. */
623 return (biowait(bp));
624 }
625
626 /*
627 * Read-ahead multiple disk blocks. The first is sync, the rest async.
628 * Trivial modification to the breada algorithm presented in Bach (p.55).
629 */
630 int
631 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
632 int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp)
633 {
634 struct buf *bp;
635 int i;
636
637 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
638
639 /*
640 * For each of the read-ahead blocks, start a read, if necessary.
641 */
642 for (i = 0; i < nrablks; i++) {
643 /* If it's in the cache, just go on to next one. */
644 if (incore(vp, rablks[i]))
645 continue;
646
647 /* Get a buffer for the read-ahead block */
648 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
649 }
650
651 /* Otherwise, we had to start a read for it; wait until it's valid. */
652 return (biowait(bp));
653 }
654
655 /*
656 * Read with single-block read-ahead. Defined in Bach (p.55), but
657 * implemented as a call to breadn().
658 * XXX for compatibility with old file systems.
659 */
660 int
661 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
662 int rabsize, struct ucred *cred, struct buf **bpp)
663 {
664
665 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
666 }
667
668 /*
669 * Block write. Described in Bach (p.56)
670 */
671 int
672 bwrite(struct buf *bp)
673 {
674 int rv, sync, wasdelayed, s;
675 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
676 struct proc *p = l->l_proc;
677 struct vnode *vp;
678 struct mount *mp;
679
680 KASSERT(ISSET(bp->b_flags, B_BUSY));
681
682 vp = bp->b_vp;
683 if (vp != NULL) {
684 if (vp->v_type == VBLK)
685 mp = vp->v_specmountpoint;
686 else
687 mp = vp->v_mount;
688 } else {
689 mp = NULL;
690 }
691
692 /*
693 * Remember buffer type, to switch on it later. If the write was
694 * synchronous, but the file system was mounted with MNT_ASYNC,
695 * convert it to a delayed write.
696 * XXX note that this relies on delayed tape writes being converted
697 * to async, not sync writes (which is safe, but ugly).
698 */
699 sync = !ISSET(bp->b_flags, B_ASYNC);
700 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
701 bdwrite(bp);
702 return (0);
703 }
704
705 /*
706 * Collect statistics on synchronous and asynchronous writes.
707 * Writes to block devices are charged to their associated
708 * filesystem (if any).
709 */
710 if (mp != NULL) {
711 if (sync)
712 mp->mnt_stat.f_syncwrites++;
713 else
714 mp->mnt_stat.f_asyncwrites++;
715 }
716
717 s = splbio();
718 simple_lock(&bp->b_interlock);
719
720 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
721
722 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
723
724 /*
725 * Pay for the I/O operation and make sure the buf is on the correct
726 * vnode queue.
727 */
728 if (wasdelayed)
729 reassignbuf(bp, bp->b_vp);
730 else
731 p->p_stats->p_ru.ru_oublock++;
732
733 /* Initiate disk write. Make sure the appropriate party is charged. */
734 V_INCR_NUMOUTPUT(bp->b_vp);
735 simple_unlock(&bp->b_interlock);
736 splx(s);
737
738 if (sync)
739 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
740 else
741 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
742
743 VOP_STRATEGY(vp, bp);
744
745 if (sync) {
746 /* If I/O was synchronous, wait for it to complete. */
747 rv = biowait(bp);
748
749 /* Release the buffer. */
750 brelse(bp);
751
752 return (rv);
753 } else {
754 return (0);
755 }
756 }
757
758 int
759 vn_bwrite(void *v)
760 {
761 struct vop_bwrite_args *ap = v;
762
763 return (bwrite(ap->a_bp));
764 }
765
766 /*
767 * Delayed write.
768 *
769 * The buffer is marked dirty, but is not queued for I/O.
770 * This routine should be used when the buffer is expected
771 * to be modified again soon, typically a small write that
772 * partially fills a buffer.
773 *
774 * NB: magnetic tapes cannot be delayed; they must be
775 * written in the order that the writes are requested.
776 *
777 * Described in Leffler, et al. (pp. 208-213).
778 */
779 void
780 bdwrite(struct buf *bp)
781 {
782 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
783 struct proc *p = l->l_proc;
784 const struct bdevsw *bdev;
785 int s;
786
787 /* If this is a tape block, write the block now. */
788 bdev = bdevsw_lookup(bp->b_dev);
789 if (bdev != NULL && bdev->d_type == D_TAPE) {
790 bawrite(bp);
791 return;
792 }
793
794 /*
795 * If the block hasn't been seen before:
796 * (1) Mark it as having been seen,
797 * (2) Charge for the write,
798 * (3) Make sure it's on its vnode's correct block list.
799 */
800 s = splbio();
801 simple_lock(&bp->b_interlock);
802
803 KASSERT(ISSET(bp->b_flags, B_BUSY));
804
805 if (!ISSET(bp->b_flags, B_DELWRI)) {
806 SET(bp->b_flags, B_DELWRI);
807 p->p_stats->p_ru.ru_oublock++;
808 reassignbuf(bp, bp->b_vp);
809 }
810
811 /* Otherwise, the "write" is done, so mark and release the buffer. */
812 CLR(bp->b_flags, B_DONE);
813 simple_unlock(&bp->b_interlock);
814 splx(s);
815
816 brelse(bp);
817 }
818
819 /*
820 * Asynchronous block write; just an asynchronous bwrite().
821 */
822 void
823 bawrite(struct buf *bp)
824 {
825 int s;
826
827 s = splbio();
828 simple_lock(&bp->b_interlock);
829
830 KASSERT(ISSET(bp->b_flags, B_BUSY));
831
832 SET(bp->b_flags, B_ASYNC);
833 simple_unlock(&bp->b_interlock);
834 splx(s);
835 VOP_BWRITE(bp);
836 }
837
838 /*
839 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
840 * Call at splbio() and with the buffer interlock locked.
841 * Note: called only from biodone() through ffs softdep's bioops.io_complete()
842 */
843 void
844 bdirty(struct buf *bp)
845 {
846 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
847 struct proc *p = l->l_proc;
848
849 LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
850 KASSERT(ISSET(bp->b_flags, B_BUSY));
851
852 CLR(bp->b_flags, B_AGE);
853
854 if (!ISSET(bp->b_flags, B_DELWRI)) {
855 SET(bp->b_flags, B_DELWRI);
856 p->p_stats->p_ru.ru_oublock++;
857 reassignbuf(bp, bp->b_vp);
858 }
859 }
860
861 /*
862 * Release a buffer on to the free lists.
863 * Described in Bach (p. 46).
864 */
865 void
866 brelse(struct buf *bp)
867 {
868 struct bqueue *bufq;
869 int s;
870
871 /* Block disk interrupts. */
872 s = splbio();
873 simple_lock(&bqueue_slock);
874 simple_lock(&bp->b_interlock);
875
876 KASSERT(ISSET(bp->b_flags, B_BUSY));
877 KASSERT(!ISSET(bp->b_flags, B_CALL));
878
879 /* Wake up any processes waiting for any buffer to become free. */
880 if (needbuffer) {
881 needbuffer = 0;
882 wakeup(&needbuffer);
883 }
884
885 /* Wake up any proceeses waiting for _this_ buffer to become free. */
886 if (ISSET(bp->b_flags, B_WANTED)) {
887 CLR(bp->b_flags, B_WANTED|B_AGE);
888 wakeup(bp);
889 }
890
891 /*
892 * Determine which queue the buffer should be on, then put it there.
893 */
894
895 /* If it's locked, don't report an error; try again later. */
896 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
897 CLR(bp->b_flags, B_ERROR);
898
899 /* If it's not cacheable, or an error, mark it invalid. */
900 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
901 SET(bp->b_flags, B_INVAL);
902
903 if (ISSET(bp->b_flags, B_VFLUSH)) {
904 /*
905 * This is a delayed write buffer that was just flushed to
906 * disk. It is still on the LRU queue. If it's become
907 * invalid, then we need to move it to a different queue;
908 * otherwise leave it in its current position.
909 */
910 CLR(bp->b_flags, B_VFLUSH);
911 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
912 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
913 goto already_queued;
914 } else {
915 bremfree(bp);
916 }
917 }
918
919 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
920 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
921 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
922
923 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
924 /*
925 * If it's invalid or empty, dissociate it from its vnode
926 * and put on the head of the appropriate queue.
927 */
928 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
929 (*bioops.io_deallocate)(bp);
930 CLR(bp->b_flags, B_DONE|B_DELWRI);
931 if (bp->b_vp) {
932 reassignbuf(bp, bp->b_vp);
933 brelvp(bp);
934 }
935 if (bp->b_bufsize <= 0)
936 /* no data */
937 goto already_queued;
938 else
939 /* invalid data */
940 bufq = &bufqueues[BQ_AGE];
941 binsheadfree(bp, bufq);
942 } else {
943 /*
944 * It has valid data. Put it on the end of the appropriate
945 * queue, so that it'll stick around for as long as possible.
946 * If buf is AGE, but has dependencies, must put it on last
947 * bufqueue to be scanned, ie LRU. This protects against the
948 * livelock where BQ_AGE only has buffers with dependencies,
949 * and we thus never get to the dependent buffers in BQ_LRU.
950 */
951 if (ISSET(bp->b_flags, B_LOCKED))
952 /* locked in core */
953 bufq = &bufqueues[BQ_LOCKED];
954 else if (!ISSET(bp->b_flags, B_AGE))
955 /* valid data */
956 bufq = &bufqueues[BQ_LRU];
957 else {
958 /* stale but valid data */
959 int has_deps;
960
961 if (LIST_FIRST(&bp->b_dep) != NULL &&
962 bioops.io_countdeps)
963 has_deps = (*bioops.io_countdeps)(bp, 0);
964 else
965 has_deps = 0;
966 bufq = has_deps ? &bufqueues[BQ_LRU] :
967 &bufqueues[BQ_AGE];
968 }
969 binstailfree(bp, bufq);
970 }
971
972 already_queued:
973 /* Unlock the buffer. */
974 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
975 SET(bp->b_flags, B_CACHE);
976
977 /* Allow disk interrupts. */
978 simple_unlock(&bp->b_interlock);
979 simple_unlock(&bqueue_slock);
980 if (bp->b_bufsize <= 0) {
981 #ifdef DEBUG
982 memset((char *)bp, 0, sizeof(*bp));
983 #endif
984 pool_put(&bufpool, bp);
985 }
986 splx(s);
987 }
988
989 /*
990 * Determine if a block is in the cache.
991 * Just look on what would be its hash chain. If it's there, return
992 * a pointer to it, unless it's marked invalid. If it's marked invalid,
993 * we normally don't return the buffer, unless the caller explicitly
994 * wants us to.
995 */
996 struct buf *
997 incore(struct vnode *vp, daddr_t blkno)
998 {
999 struct buf *bp;
1000
1001 /* Search hash chain */
1002 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
1003 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
1004 !ISSET(bp->b_flags, B_INVAL))
1005 return (bp);
1006 }
1007
1008 return (NULL);
1009 }
1010
1011 /*
1012 * Get a block of requested size that is associated with
1013 * a given vnode and block offset. If it is found in the
1014 * block cache, mark it as having been found, make it busy
1015 * and return it. Otherwise, return an empty block of the
1016 * correct size. It is up to the caller to insure that the
1017 * cached blocks be of the correct size.
1018 */
1019 struct buf *
1020 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1021 {
1022 struct buf *bp;
1023 int s, err;
1024 int preserve;
1025
1026 start:
1027 s = splbio();
1028 simple_lock(&bqueue_slock);
1029 bp = incore(vp, blkno);
1030 if (bp != NULL) {
1031 simple_lock(&bp->b_interlock);
1032 if (ISSET(bp->b_flags, B_BUSY)) {
1033 simple_unlock(&bqueue_slock);
1034 if (curproc == uvm.pagedaemon_proc) {
1035 simple_unlock(&bp->b_interlock);
1036 splx(s);
1037 return NULL;
1038 }
1039 SET(bp->b_flags, B_WANTED);
1040 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
1041 "getblk", slptimeo, &bp->b_interlock);
1042 splx(s);
1043 if (err)
1044 return (NULL);
1045 goto start;
1046 }
1047 #ifdef DIAGNOSTIC
1048 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
1049 bp->b_bcount < size && vp->v_type != VBLK)
1050 panic("getblk: block size invariant failed");
1051 #endif
1052 SET(bp->b_flags, B_BUSY);
1053 bremfree(bp);
1054 preserve = 1;
1055 } else {
1056 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
1057 simple_unlock(&bqueue_slock);
1058 splx(s);
1059 goto start;
1060 }
1061
1062 binshash(bp, BUFHASH(vp, blkno));
1063 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1064 bgetvp(vp, bp);
1065 preserve = 0;
1066 }
1067 simple_unlock(&bp->b_interlock);
1068 simple_unlock(&bqueue_slock);
1069 splx(s);
1070 /*
1071 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1072 * if we re-size buffers here.
1073 */
1074 if (ISSET(bp->b_flags, B_LOCKED)) {
1075 KASSERT(bp->b_bufsize >= size);
1076 } else {
1077 allocbuf(bp, size, preserve);
1078 }
1079 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1080 return (bp);
1081 }
1082
1083 /*
1084 * Get an empty, disassociated buffer of given size.
1085 */
1086 struct buf *
1087 geteblk(int size)
1088 {
1089 struct buf *bp;
1090 int s;
1091
1092 s = splbio();
1093 simple_lock(&bqueue_slock);
1094 while ((bp = getnewbuf(0, 0, 0)) == 0)
1095 ;
1096
1097 SET(bp->b_flags, B_INVAL);
1098 binshash(bp, &invalhash);
1099 simple_unlock(&bqueue_slock);
1100 simple_unlock(&bp->b_interlock);
1101 splx(s);
1102 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1103 allocbuf(bp, size, 0);
1104 return (bp);
1105 }
1106
1107 /*
1108 * Expand or contract the actual memory allocated to a buffer.
1109 *
1110 * If the buffer shrinks, data is lost, so it's up to the
1111 * caller to have written it out *first*; this routine will not
1112 * start a write. If the buffer grows, it's the callers
1113 * responsibility to fill out the buffer's additional contents.
1114 */
1115 void
1116 allocbuf(struct buf *bp, int size, int preserve)
1117 {
1118 vsize_t oldsize, desired_size;
1119 caddr_t addr;
1120 int s, delta;
1121
1122 desired_size = buf_roundsize(size);
1123 if (desired_size > MAXBSIZE)
1124 printf("allocbuf: buffer larger than MAXBSIZE requested");
1125
1126 bp->b_bcount = size;
1127
1128 oldsize = bp->b_bufsize;
1129 if (oldsize == desired_size)
1130 return;
1131
1132 /*
1133 * If we want a buffer of a different size, re-allocate the
1134 * buffer's memory; copy old content only if needed.
1135 */
1136 addr = buf_malloc(desired_size);
1137 if (preserve)
1138 memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1139 if (bp->b_data != NULL)
1140 buf_mrelease(bp->b_data, oldsize);
1141 bp->b_data = addr;
1142 bp->b_bufsize = desired_size;
1143
1144 /*
1145 * Update overall buffer memory counter (protected by bqueue_slock)
1146 */
1147 delta = (long)desired_size - (long)oldsize;
1148
1149 s = splbio();
1150 simple_lock(&bqueue_slock);
1151 if ((bufmem += delta) > bufmem_hiwater) {
1152 /*
1153 * Need to trim overall memory usage.
1154 */
1155 while (buf_canrelease()) {
1156 if (buf_trim() == 0)
1157 break;
1158 }
1159 }
1160
1161 simple_unlock(&bqueue_slock);
1162 splx(s);
1163 }
1164
1165 /*
1166 * Find a buffer which is available for use.
1167 * Select something from a free list.
1168 * Preference is to AGE list, then LRU list.
1169 *
1170 * Called at splbio and with buffer queues locked.
1171 * Return buffer locked.
1172 */
1173 struct buf *
1174 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1175 {
1176 struct buf *bp;
1177
1178 start:
1179 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1180
1181 /*
1182 * Get a new buffer from the pool; but use NOWAIT because
1183 * we have the buffer queues locked.
1184 */
1185 if (!from_bufq && buf_lotsfree() &&
1186 (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1187 memset((char *)bp, 0, sizeof(*bp));
1188 BUF_INIT(bp);
1189 bp->b_dev = NODEV;
1190 bp->b_vnbufs.le_next = NOLIST;
1191 bp->b_flags = B_BUSY;
1192 simple_lock(&bp->b_interlock);
1193 #if defined(DIAGNOSTIC)
1194 bp->b_freelistindex = -1;
1195 #endif /* defined(DIAGNOSTIC) */
1196 return (bp);
1197 }
1198
1199 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL ||
1200 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) {
1201 simple_lock(&bp->b_interlock);
1202 bremfree(bp);
1203 } else {
1204 /*
1205 * XXX: !from_bufq should be removed.
1206 */
1207 if (!from_bufq || curproc != uvm.pagedaemon_proc) {
1208 /* wait for a free buffer of any kind */
1209 needbuffer = 1;
1210 ltsleep(&needbuffer, slpflag|(PRIBIO + 1),
1211 "getnewbuf", slptimeo, &bqueue_slock);
1212 }
1213 return (NULL);
1214 }
1215
1216 #ifdef DIAGNOSTIC
1217 if (bp->b_bufsize <= 0)
1218 panic("buffer %p: on queue but empty", bp);
1219 #endif
1220
1221 if (ISSET(bp->b_flags, B_VFLUSH)) {
1222 /*
1223 * This is a delayed write buffer being flushed to disk. Make
1224 * sure it gets aged out of the queue when it's finished, and
1225 * leave it off the LRU queue.
1226 */
1227 CLR(bp->b_flags, B_VFLUSH);
1228 SET(bp->b_flags, B_AGE);
1229 simple_unlock(&bp->b_interlock);
1230 goto start;
1231 }
1232
1233 /* Buffer is no longer on free lists. */
1234 SET(bp->b_flags, B_BUSY);
1235
1236 /*
1237 * If buffer was a delayed write, start it and return NULL
1238 * (since we might sleep while starting the write).
1239 */
1240 if (ISSET(bp->b_flags, B_DELWRI)) {
1241 /*
1242 * This buffer has gone through the LRU, so make sure it gets
1243 * reused ASAP.
1244 */
1245 SET(bp->b_flags, B_AGE);
1246 simple_unlock(&bp->b_interlock);
1247 simple_unlock(&bqueue_slock);
1248 bawrite(bp);
1249 simple_lock(&bqueue_slock);
1250 return (NULL);
1251 }
1252
1253 /* disassociate us from our vnode, if we had one... */
1254 if (bp->b_vp)
1255 brelvp(bp);
1256
1257 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1258 (*bioops.io_deallocate)(bp);
1259
1260 /* clear out various other fields */
1261 bp->b_flags = B_BUSY;
1262 bp->b_dev = NODEV;
1263 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1264 bp->b_iodone = 0;
1265 bp->b_error = 0;
1266 bp->b_resid = 0;
1267 bp->b_bcount = 0;
1268
1269 bremhash(bp);
1270 return (bp);
1271 }
1272
1273 /*
1274 * Attempt to free an aged buffer off the queues.
1275 * Called at splbio and with queue lock held.
1276 * Returns the amount of buffer memory freed.
1277 */
1278 static int
1279 buf_trim(void)
1280 {
1281 struct buf *bp;
1282 long size = 0;
1283
1284 /* Instruct getnewbuf() to get buffers off the queues */
1285 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1286 return 0;
1287
1288 KASSERT(!ISSET(bp->b_flags, B_WANTED));
1289 simple_unlock(&bp->b_interlock);
1290 size = bp->b_bufsize;
1291 bufmem -= size;
1292 simple_unlock(&bqueue_slock);
1293 if (size > 0) {
1294 buf_mrelease(bp->b_data, size);
1295 bp->b_bcount = bp->b_bufsize = 0;
1296 }
1297 /* brelse() will return the buffer to the global buffer pool */
1298 brelse(bp);
1299 simple_lock(&bqueue_slock);
1300 return size;
1301 }
1302
1303 int
1304 buf_drain(int n)
1305 {
1306 int s, size = 0, sz;
1307
1308 s = splbio();
1309 simple_lock(&bqueue_slock);
1310
1311 while (size < n && bufmem > bufmem_lowater) {
1312 sz = buf_trim();
1313 if (sz <= 0)
1314 break;
1315 size += sz;
1316 }
1317
1318 simple_unlock(&bqueue_slock);
1319 splx(s);
1320 return size;
1321 }
1322
1323 /*
1324 * Wait for operations on the buffer to complete.
1325 * When they do, extract and return the I/O's error value.
1326 */
1327 int
1328 biowait(struct buf *bp)
1329 {
1330 int s, error;
1331
1332 s = splbio();
1333 simple_lock(&bp->b_interlock);
1334 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1335 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1336
1337 /* check for interruption of I/O (e.g. via NFS), then errors. */
1338 if (ISSET(bp->b_flags, B_EINTR)) {
1339 CLR(bp->b_flags, B_EINTR);
1340 error = EINTR;
1341 } else if (ISSET(bp->b_flags, B_ERROR))
1342 error = bp->b_error ? bp->b_error : EIO;
1343 else
1344 error = 0;
1345
1346 simple_unlock(&bp->b_interlock);
1347 splx(s);
1348 return (error);
1349 }
1350
1351 /*
1352 * Mark I/O complete on a buffer.
1353 *
1354 * If a callback has been requested, e.g. the pageout
1355 * daemon, do so. Otherwise, awaken waiting processes.
1356 *
1357 * [ Leffler, et al., says on p.247:
1358 * "This routine wakes up the blocked process, frees the buffer
1359 * for an asynchronous write, or, for a request by the pagedaemon
1360 * process, invokes a procedure specified in the buffer structure" ]
1361 *
1362 * In real life, the pagedaemon (or other system processes) wants
1363 * to do async stuff to, and doesn't want the buffer brelse()'d.
1364 * (for swap pager, that puts swap buffers on the free lists (!!!),
1365 * for the vn device, that puts malloc'd buffers on the free lists!)
1366 */
1367 void
1368 biodone(struct buf *bp)
1369 {
1370 int s = splbio();
1371
1372 simple_lock(&bp->b_interlock);
1373 if (ISSET(bp->b_flags, B_DONE))
1374 panic("biodone already");
1375 SET(bp->b_flags, B_DONE); /* note that it's done */
1376 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1377
1378 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1379 (*bioops.io_complete)(bp);
1380
1381 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
1382 vwakeup(bp);
1383
1384 /*
1385 * If necessary, call out. Unlock the buffer before calling
1386 * iodone() as the buffer isn't valid any more when it return.
1387 */
1388 if (ISSET(bp->b_flags, B_CALL)) {
1389 CLR(bp->b_flags, B_CALL); /* but note callout done */
1390 simple_unlock(&bp->b_interlock);
1391 (*bp->b_iodone)(bp);
1392 } else {
1393 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */
1394 simple_unlock(&bp->b_interlock);
1395 brelse(bp);
1396 } else { /* or just wakeup the buffer */
1397 CLR(bp->b_flags, B_WANTED);
1398 wakeup(bp);
1399 simple_unlock(&bp->b_interlock);
1400 }
1401 }
1402
1403 splx(s);
1404 }
1405
1406 /*
1407 * Return a count of buffers on the "locked" queue.
1408 */
1409 int
1410 count_lock_queue(void)
1411 {
1412 struct buf *bp;
1413 int n = 0;
1414
1415 simple_lock(&bqueue_slock);
1416 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist)
1417 n++;
1418 simple_unlock(&bqueue_slock);
1419 return (n);
1420 }
1421
1422 /*
1423 * Wait for all buffers to complete I/O
1424 * Return the number of "stuck" buffers.
1425 */
1426 int
1427 buf_syncwait(void)
1428 {
1429 struct buf *bp;
1430 int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1431
1432 dcount = 10000;
1433 for (iter = 0; iter < 20;) {
1434 s = splbio();
1435 simple_lock(&bqueue_slock);
1436 nbusy = 0;
1437 for (ihash = 0; ihash < bufhash+1; ihash++) {
1438 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1439 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1440 nbusy++;
1441 /*
1442 * With soft updates, some buffers that are
1443 * written will be remarked as dirty until other
1444 * buffers are written.
1445 */
1446 if (bp->b_vp && bp->b_vp->v_mount
1447 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1448 && (bp->b_flags & B_DELWRI)) {
1449 simple_lock(&bp->b_interlock);
1450 bremfree(bp);
1451 bp->b_flags |= B_BUSY;
1452 nbusy++;
1453 simple_unlock(&bp->b_interlock);
1454 simple_unlock(&bqueue_slock);
1455 bawrite(bp);
1456 if (dcount-- <= 0) {
1457 printf("softdep ");
1458 goto fail;
1459 }
1460 simple_lock(&bqueue_slock);
1461 }
1462 }
1463 }
1464
1465 simple_unlock(&bqueue_slock);
1466 splx(s);
1467
1468 if (nbusy == 0)
1469 break;
1470 if (nbusy_prev == 0)
1471 nbusy_prev = nbusy;
1472 printf("%d ", nbusy);
1473 tsleep(&nbusy, PRIBIO, "bflush",
1474 (iter == 0) ? 1 : hz / 25 * iter);
1475 if (nbusy >= nbusy_prev) /* we didn't flush anything */
1476 iter++;
1477 else
1478 nbusy_prev = nbusy;
1479 }
1480
1481 if (nbusy) {
1482 fail:;
1483 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1484 printf("giving up\nPrinting vnodes for busy buffers\n");
1485 for (ihash = 0; ihash < bufhash+1; ihash++) {
1486 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1487 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1488 vprint(NULL, bp->b_vp);
1489 }
1490 }
1491 #endif
1492 }
1493
1494 return nbusy;
1495 }
1496
1497 static void
1498 sysctl_fillbuf(struct buf *i, struct buf_sysctl *o)
1499 {
1500
1501 o->b_flags = i->b_flags;
1502 o->b_error = i->b_error;
1503 o->b_prio = i->b_prio;
1504 o->b_dev = i->b_dev;
1505 o->b_bufsize = i->b_bufsize;
1506 o->b_bcount = i->b_bcount;
1507 o->b_resid = i->b_resid;
1508 o->b_addr = PTRTOUINT64(i->b_un.b_addr);
1509 o->b_blkno = i->b_blkno;
1510 o->b_rawblkno = i->b_rawblkno;
1511 o->b_iodone = PTRTOUINT64(i->b_iodone);
1512 o->b_proc = PTRTOUINT64(i->b_proc);
1513 o->b_vp = PTRTOUINT64(i->b_vp);
1514 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr);
1515 o->b_lblkno = i->b_lblkno;
1516 }
1517
1518 #define KERN_BUFSLOP 20
1519 static int
1520 sysctl_dobuf(SYSCTLFN_ARGS)
1521 {
1522 struct buf *bp;
1523 struct buf_sysctl bs;
1524 char *dp;
1525 u_int i, op, arg;
1526 size_t len, needed, elem_size, out_size;
1527 int error, s, elem_count;
1528
1529 if (namelen == 1 && name[0] == CTL_QUERY)
1530 return (sysctl_query(SYSCTLFN_CALL(rnode)));
1531
1532 if (namelen != 4)
1533 return (EINVAL);
1534
1535 dp = oldp;
1536 len = (oldp != NULL) ? *oldlenp : 0;
1537 op = name[0];
1538 arg = name[1];
1539 elem_size = name[2];
1540 elem_count = name[3];
1541 out_size = MIN(sizeof(bs), elem_size);
1542
1543 /*
1544 * at the moment, these are just "placeholders" to make the
1545 * API for retrieving kern.buf data more extensible in the
1546 * future.
1547 *
1548 * XXX kern.buf currently has "netbsd32" issues. hopefully
1549 * these will be resolved at a later point.
1550 */
1551 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL ||
1552 elem_size < 1 || elem_count < 0)
1553 return (EINVAL);
1554
1555 error = 0;
1556 needed = 0;
1557 s = splbio();
1558 simple_lock(&bqueue_slock);
1559 for (i = 0; i < BQUEUES; i++) {
1560 TAILQ_FOREACH(bp, &bufqueues[i].bq_queue, b_freelist) {
1561 if (len >= elem_size && elem_count > 0) {
1562 sysctl_fillbuf(bp, &bs);
1563 error = copyout(&bs, dp, out_size);
1564 if (error)
1565 goto cleanup;
1566 dp += elem_size;
1567 len -= elem_size;
1568 }
1569 if (elem_count > 0) {
1570 needed += elem_size;
1571 if (elem_count != INT_MAX)
1572 elem_count--;
1573 }
1574 }
1575 }
1576 cleanup:
1577 simple_unlock(&bqueue_slock);
1578 splx(s);
1579
1580 *oldlenp = needed;
1581 if (oldp == NULL)
1582 *oldlenp += KERN_BUFSLOP * sizeof(struct buf);
1583
1584 return (error);
1585 }
1586
1587 static int
1588 sysctl_bufvm_update(SYSCTLFN_ARGS)
1589 {
1590 int t, error;
1591 struct sysctlnode node;
1592
1593 node = *rnode;
1594 node.sysctl_data = &t;
1595 t = *(int*)rnode->sysctl_data;
1596 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1597 if (error || newp == NULL)
1598 return (error);
1599
1600 if (rnode->sysctl_data == &bufcache) {
1601 if (t < 0 || t > 100)
1602 return (EINVAL);
1603 bufcache = t;
1604 bufmem_hiwater = buf_memcalc();
1605 bufmem_lowater = (bufmem_hiwater >> 3);
1606 if (bufmem_lowater < 64 * 1024)
1607 /* Ensure a reasonable minimum value */
1608 bufmem_lowater = 64 * 1024;
1609
1610 } else if (rnode->sysctl_data == &bufmem_lowater) {
1611 bufmem_lowater = t;
1612 } else if (rnode->sysctl_data == &bufmem_hiwater) {
1613 bufmem_hiwater = t;
1614 } else
1615 return (EINVAL);
1616
1617 /* Drain until below new high water mark */
1618 while ((t = bufmem - bufmem_hiwater) >= 0) {
1619 if (buf_drain(t / (2*1024)) <= 0)
1620 break;
1621 }
1622
1623 return 0;
1624 }
1625
1626 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1627 {
1628
1629 sysctl_createv(clog, 0, NULL, NULL,
1630 CTLFLAG_PERMANENT,
1631 CTLTYPE_NODE, "kern", NULL,
1632 NULL, 0, NULL, 0,
1633 CTL_KERN, CTL_EOL);
1634 sysctl_createv(clog, 0, NULL, NULL,
1635 CTLFLAG_PERMANENT,
1636 CTLTYPE_NODE, "buf",
1637 SYSCTL_DESCR("Kernel buffer cache information"),
1638 sysctl_dobuf, 0, NULL, 0,
1639 CTL_KERN, KERN_BUF, CTL_EOL);
1640 }
1641
1642 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
1643 {
1644
1645 sysctl_createv(clog, 0, NULL, NULL,
1646 CTLFLAG_PERMANENT,
1647 CTLTYPE_NODE, "vm", NULL,
1648 NULL, 0, NULL, 0,
1649 CTL_VM, CTL_EOL);
1650
1651 sysctl_createv(clog, 0, NULL, NULL,
1652 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1653 CTLTYPE_INT, "bufcache",
1654 SYSCTL_DESCR("Percentage of kernel memory to use for "
1655 "buffer cache"),
1656 sysctl_bufvm_update, 0, &bufcache, 0,
1657 CTL_VM, CTL_CREATE, CTL_EOL);
1658 sysctl_createv(clog, 0, NULL, NULL,
1659 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1660 CTLTYPE_INT, "bufmem",
1661 SYSCTL_DESCR("Amount of kernel memory used by buffer "
1662 "cache"),
1663 NULL, 0, &bufmem, 0,
1664 CTL_VM, CTL_CREATE, CTL_EOL);
1665 sysctl_createv(clog, 0, NULL, NULL,
1666 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1667 CTLTYPE_INT, "bufmem_lowater",
1668 SYSCTL_DESCR("Minimum amount of kernel memory to "
1669 "reserve for buffer cache"),
1670 sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1671 CTL_VM, CTL_CREATE, CTL_EOL);
1672 sysctl_createv(clog, 0, NULL, NULL,
1673 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1674 CTLTYPE_INT, "bufmem_hiwater",
1675 SYSCTL_DESCR("Maximum amount of kernel memory to use "
1676 "for buffer cache"),
1677 sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1678 CTL_VM, CTL_CREATE, CTL_EOL);
1679 }
1680
1681 #ifdef DEBUG
1682 /*
1683 * Print out statistics on the current allocation of the buffer pool.
1684 * Can be enabled to print out on every ``sync'' by setting "syncprt"
1685 * in vfs_syscalls.c using sysctl.
1686 */
1687 void
1688 vfs_bufstats(void)
1689 {
1690 int s, i, j, count;
1691 struct buf *bp;
1692 struct bqueue *dp;
1693 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1694 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1695
1696 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1697 count = 0;
1698 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1699 counts[j] = 0;
1700 s = splbio();
1701 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) {
1702 counts[bp->b_bufsize/PAGE_SIZE]++;
1703 count++;
1704 }
1705 splx(s);
1706 printf("%s: total-%d", bname[i], count);
1707 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1708 if (counts[j] != 0)
1709 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1710 printf("\n");
1711 }
1712 }
1713 #endif /* DEBUG */
1714