vfs_bio.c revision 1.130 1 /* $NetBSD: vfs_bio.c,v 1.130 2004/09/18 16:01:03 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
37 */
38
39 /*-
40 * Copyright (c) 1994 Christopher G. Demetriou
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
71 */
72
73 /*
74 * Some references:
75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76 * Leffler, et al.: The Design and Implementation of the 4.3BSD
77 * UNIX Operating System (Addison Welley, 1989)
78 */
79
80 #include "opt_bufcache.h"
81 #include "opt_softdep.h"
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.130 2004/09/18 16:01:03 yamt Exp $");
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/proc.h>
90 #include <sys/buf.h>
91 #include <sys/vnode.h>
92 #include <sys/mount.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sysctl.h>
96 #include <sys/conf.h>
97
98 #include <uvm/uvm.h>
99
100 #include <miscfs/specfs/specdev.h>
101
102 #ifndef BUFPAGES
103 # define BUFPAGES 0
104 #endif
105
106 #ifdef BUFCACHE
107 # if (BUFCACHE < 5) || (BUFCACHE > 95)
108 # error BUFCACHE is not between 5 and 95
109 # endif
110 #else
111 # define BUFCACHE 15
112 #endif
113
114 u_int nbuf; /* XXX - for softdep_lockedbufs */
115 u_int bufpages = BUFPAGES; /* optional hardwired count */
116 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */
117
118 /* Function prototypes */
119 struct bqueues;
120
121 static int buf_trim(void);
122 static void *bufpool_page_alloc(struct pool *, int);
123 static void bufpool_page_free(struct pool *, void *);
124 static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
125 struct ucred *, int);
126 static int buf_lotsfree(void);
127 static int buf_canrelease(void);
128 static __inline u_long buf_mempoolidx(u_long);
129 static __inline u_long buf_roundsize(u_long);
130 static __inline caddr_t buf_malloc(size_t);
131 static void buf_mrelease(caddr_t, size_t);
132 int count_lock_queue(void); /* XXX */
133 #ifdef DEBUG
134 static int checkfreelist(struct buf *, struct bqueues *);
135 #endif
136
137 /* Macros to clear/set/test flags. */
138 #define SET(t, f) (t) |= (f)
139 #define CLR(t, f) (t) &= ~(f)
140 #define ISSET(t, f) ((t) & (f))
141
142 /*
143 * Definitions for the buffer hash lists.
144 */
145 #define BUFHASH(dvp, lbn) \
146 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
147 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
148 u_long bufhash;
149 #ifndef SOFTDEP
150 struct bio_ops bioops; /* I/O operation notification */
151 #endif
152
153 /*
154 * Insq/Remq for the buffer hash lists.
155 */
156 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
157 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
158
159 /*
160 * Definitions for the buffer free lists.
161 */
162 #define BQUEUES 3 /* number of free buffer queues */
163
164 #define BQ_LOCKED 0 /* super-blocks &c */
165 #define BQ_LRU 1 /* lru, useful buffers */
166 #define BQ_AGE 2 /* rubbish */
167
168 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
169 int needbuffer;
170
171 /*
172 * Buffer queue lock.
173 * Take this lock first if also taking some buffer's b_interlock.
174 */
175 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
176
177 /*
178 * Buffer pool for I/O buffers.
179 */
180 struct pool bufpool;
181
182 /* XXX - somewhat gross.. */
183 #if MAXBSIZE == 0x2000
184 #define NMEMPOOLS 4
185 #elif MAXBSIZE == 0x4000
186 #define NMEMPOOLS 5
187 #elif MAXBSIZE == 0x8000
188 #define NMEMPOOLS 6
189 #else
190 #define NMEMPOOLS 7
191 #endif
192
193 #define MEMPOOL_INDEX_OFFSET 10 /* smallest pool is 1k */
194 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
195 #error update vfs_bio buffer memory parameters
196 #endif
197
198 /* Buffer memory pools */
199 static struct pool bmempools[NMEMPOOLS];
200
201 struct vm_map *buf_map;
202
203 /*
204 * Buffer memory pool allocator.
205 */
206 static void *
207 bufpool_page_alloc(struct pool *pp, int flags)
208 {
209
210 return (void *)uvm_km_kmemalloc1(buf_map,
211 uvm.kernel_object, MAXBSIZE, MAXBSIZE, UVM_UNKNOWN_OFFSET,
212 (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
213 }
214
215 static void
216 bufpool_page_free(struct pool *pp, void *v)
217 {
218 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE);
219 }
220
221 static struct pool_allocator bufmempool_allocator = {
222 bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
223 };
224
225 /* Buffer memory management variables */
226 u_long bufmem_valimit;
227 u_long bufmem_hiwater;
228 u_long bufmem_lowater;
229 u_long bufmem;
230
231 /*
232 * MD code can call this to set a hard limit on the amount
233 * of virtual memory used by the buffer cache.
234 */
235 int
236 buf_setvalimit(vsize_t sz)
237 {
238
239 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
240 if (sz < NMEMPOOLS * MAXBSIZE)
241 return EINVAL;
242
243 bufmem_valimit = sz;
244 return 0;
245 }
246
247 /*
248 * Insq/Remq for the buffer free lists.
249 * Call with buffer queue locked.
250 */
251 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
252 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
253
254 #ifdef DEBUG
255 int debug_verify_freelist = 0;
256 static int checkfreelist(struct buf *bp, struct bqueues *dp)
257 {
258 struct buf *b;
259 TAILQ_FOREACH(b, dp, b_freelist) {
260 if (b == bp)
261 return 1;
262 }
263 return 0;
264 }
265 #endif
266
267 void
268 bremfree(struct buf *bp)
269 {
270 struct bqueues *dp = NULL;
271
272 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
273
274 KDASSERT(!debug_verify_freelist ||
275 checkfreelist(bp, &bufqueues[BQ_AGE]) ||
276 checkfreelist(bp, &bufqueues[BQ_LRU]) ||
277 checkfreelist(bp, &bufqueues[BQ_LOCKED]) );
278
279 /*
280 * We only calculate the head of the freelist when removing
281 * the last element of the list as that is the only time that
282 * it is needed (e.g. to reset the tail pointer).
283 *
284 * NB: This makes an assumption about how tailq's are implemented.
285 *
286 * We break the TAILQ abstraction in order to efficiently remove a
287 * buffer from its freelist without having to know exactly which
288 * freelist it is on.
289 */
290 if (TAILQ_NEXT(bp, b_freelist) == NULL) {
291 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
292 if (dp->tqh_last == &bp->b_freelist.tqe_next)
293 break;
294 if (dp == &bufqueues[BQUEUES])
295 panic("bremfree: lost tail");
296 }
297 TAILQ_REMOVE(dp, bp, b_freelist);
298 }
299
300 u_long
301 buf_memcalc(void)
302 {
303 u_long n;
304
305 /*
306 * Determine the upper bound of memory to use for buffers.
307 *
308 * - If bufpages is specified, use that as the number
309 * pages.
310 *
311 * - Otherwise, use bufcache as the percentage of
312 * physical memory.
313 */
314 if (bufpages != 0) {
315 n = bufpages;
316 } else {
317 if (bufcache < 5) {
318 printf("forcing bufcache %d -> 5", bufcache);
319 bufcache = 5;
320 }
321 if (bufcache > 95) {
322 printf("forcing bufcache %d -> 95", bufcache);
323 bufcache = 95;
324 }
325 n = physmem / 100 * bufcache;
326 }
327
328 n <<= PAGE_SHIFT;
329 if (bufmem_valimit != 0 && n > bufmem_valimit)
330 n = bufmem_valimit;
331
332 return (n);
333 }
334
335 /*
336 * Initialize buffers and hash links for buffers.
337 */
338 void
339 bufinit(void)
340 {
341 struct bqueues *dp;
342 int use_std;
343 u_int i;
344
345 /*
346 * Initialize buffer cache memory parameters.
347 */
348 bufmem = 0;
349 bufmem_hiwater = buf_memcalc();
350 /* lowater is approx. 2% of memory (with bufcache=15) */
351 bufmem_lowater = (bufmem_hiwater >> 3);
352 if (bufmem_lowater < 64 * 1024)
353 /* Ensure a reasonable minimum value */
354 bufmem_lowater = 64 * 1024;
355
356 if (bufmem_valimit != 0) {
357 vaddr_t minaddr = 0, maxaddr;
358 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
359 bufmem_valimit, VM_MAP_PAGEABLE,
360 FALSE, 0);
361 if (buf_map == NULL)
362 panic("bufinit: cannot allocate submap");
363 } else
364 buf_map = kernel_map;
365
366 /*
367 * Initialize the buffer pools.
368 */
369 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
370
371 /* On "small" machines use small pool page sizes where possible */
372 use_std = (physmem < atop(16*1024*1024));
373
374 /*
375 * Also use them on systems that can map the pool pages using
376 * a direct-mapped segment.
377 */
378 #ifdef PMAP_MAP_POOLPAGE
379 use_std = 1;
380 #endif
381
382 for (i = 0; i < NMEMPOOLS; i++) {
383 struct pool_allocator *pa;
384 struct pool *pp = &bmempools[i];
385 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
386 char *name = malloc(8, M_TEMP, M_WAITOK);
387 snprintf(name, 8, "buf%dk", 1 << i);
388 pa = (size <= PAGE_SIZE && use_std)
389 ? &pool_allocator_nointr
390 : &bufmempool_allocator;
391 pool_init(pp, size, 0, 0, 0, name, pa);
392 pool_setlowat(pp, 1);
393 pool_sethiwat(pp, 1);
394 }
395
396 /* Initialize the buffer queues */
397 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
398 TAILQ_INIT(dp);
399
400 /*
401 * Estimate hash table size based on the amount of memory we
402 * intend to use for the buffer cache. The average buffer
403 * size is dependent on our clients (i.e. filesystems).
404 *
405 * For now, use an empirical 3K per buffer.
406 */
407 nbuf = (bufmem_hiwater / 1024) / 3;
408 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
409 }
410
411 static int
412 buf_lotsfree(void)
413 {
414 int try, thresh;
415 struct lwp *l = curlwp;
416
417 /* Always allocate if doing copy on write */
418 if (l->l_flag & L_COWINPROGRESS)
419 return 1;
420
421 /* Always allocate if less than the low water mark. */
422 if (bufmem < bufmem_lowater)
423 return 1;
424
425 /* Never allocate if greater than the high water mark. */
426 if (bufmem > bufmem_hiwater)
427 return 0;
428
429 /* If there's anything on the AGE list, it should be eaten. */
430 if (TAILQ_FIRST(&bufqueues[BQ_AGE]) != NULL)
431 return 0;
432
433 /*
434 * The probabily of getting a new allocation is inversely
435 * proportional to the current size of the cache, using
436 * a granularity of 16 steps.
437 */
438 try = random() & 0x0000000fL;
439
440 /* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */
441 thresh = bufmem / (bufmem_hiwater / 16);
442
443 if ((try > thresh) && (uvmexp.free > (2 * uvmexp.freetarg))) {
444 return 1;
445 }
446
447 /* Otherwise don't allocate. */
448 return 0;
449 }
450
451 /*
452 * Return estimate of bytes we think need to be
453 * released to help resolve low memory conditions.
454 *
455 * => called at splbio.
456 * => called with bqueue_slock held.
457 */
458 static int
459 buf_canrelease(void)
460 {
461 int pagedemand, ninvalid = 0;
462 struct buf *bp;
463
464 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
465
466 if (bufmem < bufmem_lowater)
467 return 0;
468
469 TAILQ_FOREACH(bp, &bufqueues[BQ_AGE], b_freelist)
470 ninvalid += bp->b_bufsize;
471
472 pagedemand = uvmexp.freetarg - uvmexp.free;
473 if (pagedemand < 0)
474 return ninvalid;
475 return MAX(ninvalid, MIN(2 * MAXBSIZE,
476 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
477 }
478
479 /*
480 * Buffer memory allocation helper functions
481 */
482 static __inline u_long
483 buf_mempoolidx(u_long size)
484 {
485 u_int n = 0;
486
487 size -= 1;
488 size >>= MEMPOOL_INDEX_OFFSET;
489 while (size) {
490 size >>= 1;
491 n += 1;
492 }
493 if (n >= NMEMPOOLS)
494 panic("buf mem pool index %d", n);
495 return n;
496 }
497
498 static __inline u_long
499 buf_roundsize(u_long size)
500 {
501 /* Round up to nearest power of 2 */
502 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
503 }
504
505 static __inline caddr_t
506 buf_malloc(size_t size)
507 {
508 u_int n = buf_mempoolidx(size);
509 caddr_t addr;
510 int s;
511
512 while (1) {
513 addr = pool_get(&bmempools[n], PR_NOWAIT);
514 if (addr != NULL)
515 break;
516
517 /* No memory, see if we can free some. If so, try again */
518 if (buf_drain(1) > 0)
519 continue;
520
521 /* Wait for buffers to arrive on the LRU queue */
522 s = splbio();
523 simple_lock(&bqueue_slock);
524 needbuffer = 1;
525 ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1),
526 "buf_malloc", 0, &bqueue_slock);
527 splx(s);
528 }
529
530 return addr;
531 }
532
533 static void
534 buf_mrelease(caddr_t addr, size_t size)
535 {
536
537 pool_put(&bmempools[buf_mempoolidx(size)], addr);
538 }
539
540 /*
541 * bread()/breadn() helper.
542 */
543 static __inline struct buf *
544 bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
545 int async)
546 {
547 struct buf *bp;
548 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
549 struct proc *p = l->l_proc;
550 struct mount *mp;
551
552 bp = getblk(vp, blkno, size, 0, 0);
553
554 #ifdef DIAGNOSTIC
555 if (bp == NULL) {
556 panic("bio_doread: no such buf");
557 }
558 #endif
559
560 /*
561 * If buffer does not have data valid, start a read.
562 * Note that if buffer is B_INVAL, getblk() won't return it.
563 * Therefore, it's valid if its I/O has completed or been delayed.
564 */
565 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
566 /* Start I/O for the buffer. */
567 SET(bp->b_flags, B_READ | async);
568 if (async)
569 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
570 else
571 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
572 VOP_STRATEGY(vp, bp);
573
574 /* Pay for the read. */
575 p->p_stats->p_ru.ru_inblock++;
576 } else if (async) {
577 brelse(bp);
578 }
579
580 if (vp->v_type == VBLK)
581 mp = vp->v_specmountpoint;
582 else
583 mp = vp->v_mount;
584
585 /*
586 * Collect statistics on synchronous and asynchronous reads.
587 * Reads from block devices are charged to their associated
588 * filesystem (if any).
589 */
590 if (mp != NULL) {
591 if (async == 0)
592 mp->mnt_stat.f_syncreads++;
593 else
594 mp->mnt_stat.f_asyncreads++;
595 }
596
597 return (bp);
598 }
599
600 /*
601 * Read a disk block.
602 * This algorithm described in Bach (p.54).
603 */
604 int
605 bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
606 struct buf **bpp)
607 {
608 struct buf *bp;
609
610 /* Get buffer for block. */
611 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
612
613 /* Wait for the read to complete, and return result. */
614 return (biowait(bp));
615 }
616
617 /*
618 * Read-ahead multiple disk blocks. The first is sync, the rest async.
619 * Trivial modification to the breada algorithm presented in Bach (p.55).
620 */
621 int
622 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
623 int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp)
624 {
625 struct buf *bp;
626 int i;
627
628 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
629
630 /*
631 * For each of the read-ahead blocks, start a read, if necessary.
632 */
633 for (i = 0; i < nrablks; i++) {
634 /* If it's in the cache, just go on to next one. */
635 if (incore(vp, rablks[i]))
636 continue;
637
638 /* Get a buffer for the read-ahead block */
639 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
640 }
641
642 /* Otherwise, we had to start a read for it; wait until it's valid. */
643 return (biowait(bp));
644 }
645
646 /*
647 * Read with single-block read-ahead. Defined in Bach (p.55), but
648 * implemented as a call to breadn().
649 * XXX for compatibility with old file systems.
650 */
651 int
652 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
653 int rabsize, struct ucred *cred, struct buf **bpp)
654 {
655
656 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
657 }
658
659 /*
660 * Block write. Described in Bach (p.56)
661 */
662 int
663 bwrite(struct buf *bp)
664 {
665 int rv, sync, wasdelayed, s;
666 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
667 struct proc *p = l->l_proc;
668 struct vnode *vp;
669 struct mount *mp;
670
671 KASSERT(ISSET(bp->b_flags, B_BUSY));
672
673 vp = bp->b_vp;
674 if (vp != NULL) {
675 if (vp->v_type == VBLK)
676 mp = vp->v_specmountpoint;
677 else
678 mp = vp->v_mount;
679 } else {
680 mp = NULL;
681 }
682
683 /*
684 * Remember buffer type, to switch on it later. If the write was
685 * synchronous, but the file system was mounted with MNT_ASYNC,
686 * convert it to a delayed write.
687 * XXX note that this relies on delayed tape writes being converted
688 * to async, not sync writes (which is safe, but ugly).
689 */
690 sync = !ISSET(bp->b_flags, B_ASYNC);
691 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
692 bdwrite(bp);
693 return (0);
694 }
695
696 /*
697 * Collect statistics on synchronous and asynchronous writes.
698 * Writes to block devices are charged to their associated
699 * filesystem (if any).
700 */
701 if (mp != NULL) {
702 if (sync)
703 mp->mnt_stat.f_syncwrites++;
704 else
705 mp->mnt_stat.f_asyncwrites++;
706 }
707
708 s = splbio();
709 simple_lock(&bp->b_interlock);
710
711 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
712
713 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
714
715 /*
716 * Pay for the I/O operation and make sure the buf is on the correct
717 * vnode queue.
718 */
719 if (wasdelayed)
720 reassignbuf(bp, bp->b_vp);
721 else
722 p->p_stats->p_ru.ru_oublock++;
723
724 /* Initiate disk write. Make sure the appropriate party is charged. */
725 V_INCR_NUMOUTPUT(bp->b_vp);
726 simple_unlock(&bp->b_interlock);
727 splx(s);
728
729 if (sync)
730 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
731 else
732 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
733
734 VOP_STRATEGY(vp, bp);
735
736 if (sync) {
737 /* If I/O was synchronous, wait for it to complete. */
738 rv = biowait(bp);
739
740 /* Release the buffer. */
741 brelse(bp);
742
743 return (rv);
744 } else {
745 return (0);
746 }
747 }
748
749 int
750 vn_bwrite(void *v)
751 {
752 struct vop_bwrite_args *ap = v;
753
754 return (bwrite(ap->a_bp));
755 }
756
757 /*
758 * Delayed write.
759 *
760 * The buffer is marked dirty, but is not queued for I/O.
761 * This routine should be used when the buffer is expected
762 * to be modified again soon, typically a small write that
763 * partially fills a buffer.
764 *
765 * NB: magnetic tapes cannot be delayed; they must be
766 * written in the order that the writes are requested.
767 *
768 * Described in Leffler, et al. (pp. 208-213).
769 */
770 void
771 bdwrite(struct buf *bp)
772 {
773 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
774 struct proc *p = l->l_proc;
775 const struct bdevsw *bdev;
776 int s;
777
778 /* If this is a tape block, write the block now. */
779 bdev = bdevsw_lookup(bp->b_dev);
780 if (bdev != NULL && bdev->d_type == D_TAPE) {
781 bawrite(bp);
782 return;
783 }
784
785 /*
786 * If the block hasn't been seen before:
787 * (1) Mark it as having been seen,
788 * (2) Charge for the write,
789 * (3) Make sure it's on its vnode's correct block list.
790 */
791 s = splbio();
792 simple_lock(&bp->b_interlock);
793
794 KASSERT(ISSET(bp->b_flags, B_BUSY));
795
796 if (!ISSET(bp->b_flags, B_DELWRI)) {
797 SET(bp->b_flags, B_DELWRI);
798 p->p_stats->p_ru.ru_oublock++;
799 reassignbuf(bp, bp->b_vp);
800 }
801
802 /* Otherwise, the "write" is done, so mark and release the buffer. */
803 CLR(bp->b_flags, B_DONE);
804 simple_unlock(&bp->b_interlock);
805 splx(s);
806
807 brelse(bp);
808 }
809
810 /*
811 * Asynchronous block write; just an asynchronous bwrite().
812 */
813 void
814 bawrite(struct buf *bp)
815 {
816 int s;
817
818 s = splbio();
819 simple_lock(&bp->b_interlock);
820
821 KASSERT(ISSET(bp->b_flags, B_BUSY));
822
823 SET(bp->b_flags, B_ASYNC);
824 simple_unlock(&bp->b_interlock);
825 splx(s);
826 VOP_BWRITE(bp);
827 }
828
829 /*
830 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
831 * Call at splbio() and with the buffer interlock locked.
832 * Note: called only from biodone() through ffs softdep's bioops.io_complete()
833 */
834 void
835 bdirty(struct buf *bp)
836 {
837 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
838 struct proc *p = l->l_proc;
839
840 LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
841 KASSERT(ISSET(bp->b_flags, B_BUSY));
842
843 CLR(bp->b_flags, B_AGE);
844
845 if (!ISSET(bp->b_flags, B_DELWRI)) {
846 SET(bp->b_flags, B_DELWRI);
847 p->p_stats->p_ru.ru_oublock++;
848 reassignbuf(bp, bp->b_vp);
849 }
850 }
851
852 /*
853 * Release a buffer on to the free lists.
854 * Described in Bach (p. 46).
855 */
856 void
857 brelse(struct buf *bp)
858 {
859 struct bqueues *bufq;
860 int s;
861
862 /* Block disk interrupts. */
863 s = splbio();
864 simple_lock(&bqueue_slock);
865 simple_lock(&bp->b_interlock);
866
867 KASSERT(ISSET(bp->b_flags, B_BUSY));
868 KASSERT(!ISSET(bp->b_flags, B_CALL));
869
870 /* Wake up any processes waiting for any buffer to become free. */
871 if (needbuffer) {
872 needbuffer = 0;
873 wakeup(&needbuffer);
874 }
875
876 /* Wake up any proceeses waiting for _this_ buffer to become free. */
877 if (ISSET(bp->b_flags, B_WANTED)) {
878 CLR(bp->b_flags, B_WANTED|B_AGE);
879 wakeup(bp);
880 }
881
882 /*
883 * Determine which queue the buffer should be on, then put it there.
884 */
885
886 /* If it's locked, don't report an error; try again later. */
887 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
888 CLR(bp->b_flags, B_ERROR);
889
890 /* If it's not cacheable, or an error, mark it invalid. */
891 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
892 SET(bp->b_flags, B_INVAL);
893
894 if (ISSET(bp->b_flags, B_VFLUSH)) {
895 /*
896 * This is a delayed write buffer that was just flushed to
897 * disk. It is still on the LRU queue. If it's become
898 * invalid, then we need to move it to a different queue;
899 * otherwise leave it in its current position.
900 */
901 CLR(bp->b_flags, B_VFLUSH);
902 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
903 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
904 goto already_queued;
905 } else {
906 bremfree(bp);
907 }
908 }
909
910 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
911 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
912 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
913
914 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
915 /*
916 * If it's invalid or empty, dissociate it from its vnode
917 * and put on the head of the appropriate queue.
918 */
919 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
920 (*bioops.io_deallocate)(bp);
921 CLR(bp->b_flags, B_DONE|B_DELWRI);
922 if (bp->b_vp) {
923 reassignbuf(bp, bp->b_vp);
924 brelvp(bp);
925 }
926 if (bp->b_bufsize <= 0)
927 /* no data */
928 goto already_queued;
929 else
930 /* invalid data */
931 bufq = &bufqueues[BQ_AGE];
932 binsheadfree(bp, bufq);
933 } else {
934 /*
935 * It has valid data. Put it on the end of the appropriate
936 * queue, so that it'll stick around for as long as possible.
937 * If buf is AGE, but has dependencies, must put it on last
938 * bufqueue to be scanned, ie LRU. This protects against the
939 * livelock where BQ_AGE only has buffers with dependencies,
940 * and we thus never get to the dependent buffers in BQ_LRU.
941 */
942 if (ISSET(bp->b_flags, B_LOCKED))
943 /* locked in core */
944 bufq = &bufqueues[BQ_LOCKED];
945 else if (!ISSET(bp->b_flags, B_AGE))
946 /* valid data */
947 bufq = &bufqueues[BQ_LRU];
948 else {
949 /* stale but valid data */
950 int has_deps;
951
952 if (LIST_FIRST(&bp->b_dep) != NULL &&
953 bioops.io_countdeps)
954 has_deps = (*bioops.io_countdeps)(bp, 0);
955 else
956 has_deps = 0;
957 bufq = has_deps ? &bufqueues[BQ_LRU] :
958 &bufqueues[BQ_AGE];
959 }
960 binstailfree(bp, bufq);
961 }
962
963 already_queued:
964 /* Unlock the buffer. */
965 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
966 SET(bp->b_flags, B_CACHE);
967
968 /* Allow disk interrupts. */
969 simple_unlock(&bp->b_interlock);
970 simple_unlock(&bqueue_slock);
971 if (bp->b_bufsize <= 0) {
972 #ifdef DEBUG
973 memset((char *)bp, 0, sizeof(*bp));
974 #endif
975 pool_put(&bufpool, bp);
976 }
977 splx(s);
978 }
979
980 /*
981 * Determine if a block is in the cache.
982 * Just look on what would be its hash chain. If it's there, return
983 * a pointer to it, unless it's marked invalid. If it's marked invalid,
984 * we normally don't return the buffer, unless the caller explicitly
985 * wants us to.
986 */
987 struct buf *
988 incore(struct vnode *vp, daddr_t blkno)
989 {
990 struct buf *bp;
991
992 /* Search hash chain */
993 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
994 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
995 !ISSET(bp->b_flags, B_INVAL))
996 return (bp);
997 }
998
999 return (NULL);
1000 }
1001
1002 /*
1003 * Get a block of requested size that is associated with
1004 * a given vnode and block offset. If it is found in the
1005 * block cache, mark it as having been found, make it busy
1006 * and return it. Otherwise, return an empty block of the
1007 * correct size. It is up to the caller to insure that the
1008 * cached blocks be of the correct size.
1009 */
1010 struct buf *
1011 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1012 {
1013 struct buf *bp;
1014 int s, err;
1015 int preserve;
1016
1017 start:
1018 s = splbio();
1019 simple_lock(&bqueue_slock);
1020 bp = incore(vp, blkno);
1021 if (bp != NULL) {
1022 simple_lock(&bp->b_interlock);
1023 if (ISSET(bp->b_flags, B_BUSY)) {
1024 simple_unlock(&bqueue_slock);
1025 if (curproc == uvm.pagedaemon_proc) {
1026 simple_unlock(&bp->b_interlock);
1027 splx(s);
1028 return NULL;
1029 }
1030 SET(bp->b_flags, B_WANTED);
1031 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
1032 "getblk", slptimeo, &bp->b_interlock);
1033 splx(s);
1034 if (err)
1035 return (NULL);
1036 goto start;
1037 }
1038 #ifdef DIAGNOSTIC
1039 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
1040 bp->b_bcount < size && vp->v_type != VBLK)
1041 panic("getblk: block size invariant failed");
1042 #endif
1043 SET(bp->b_flags, B_BUSY);
1044 bremfree(bp);
1045 preserve = 1;
1046 } else {
1047 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
1048 simple_unlock(&bqueue_slock);
1049 splx(s);
1050 goto start;
1051 }
1052
1053 binshash(bp, BUFHASH(vp, blkno));
1054 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1055 bgetvp(vp, bp);
1056 preserve = 0;
1057 }
1058 simple_unlock(&bp->b_interlock);
1059 simple_unlock(&bqueue_slock);
1060 splx(s);
1061 /*
1062 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1063 * if we re-size buffers here.
1064 */
1065 if (ISSET(bp->b_flags, B_LOCKED)) {
1066 KASSERT(bp->b_bufsize >= size);
1067 } else {
1068 allocbuf(bp, size, preserve);
1069 }
1070 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1071 return (bp);
1072 }
1073
1074 /*
1075 * Get an empty, disassociated buffer of given size.
1076 */
1077 struct buf *
1078 geteblk(int size)
1079 {
1080 struct buf *bp;
1081 int s;
1082
1083 s = splbio();
1084 simple_lock(&bqueue_slock);
1085 while ((bp = getnewbuf(0, 0, 0)) == 0)
1086 ;
1087
1088 SET(bp->b_flags, B_INVAL);
1089 binshash(bp, &invalhash);
1090 simple_unlock(&bqueue_slock);
1091 simple_unlock(&bp->b_interlock);
1092 splx(s);
1093 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1094 allocbuf(bp, size, 0);
1095 return (bp);
1096 }
1097
1098 /*
1099 * Expand or contract the actual memory allocated to a buffer.
1100 *
1101 * If the buffer shrinks, data is lost, so it's up to the
1102 * caller to have written it out *first*; this routine will not
1103 * start a write. If the buffer grows, it's the callers
1104 * responsibility to fill out the buffer's additional contents.
1105 */
1106 void
1107 allocbuf(struct buf *bp, int size, int preserve)
1108 {
1109 vsize_t oldsize, desired_size;
1110 caddr_t addr;
1111 int s, delta;
1112
1113 desired_size = buf_roundsize(size);
1114 if (desired_size > MAXBSIZE)
1115 printf("allocbuf: buffer larger than MAXBSIZE requested");
1116
1117 bp->b_bcount = size;
1118
1119 oldsize = bp->b_bufsize;
1120 if (oldsize == desired_size)
1121 return;
1122
1123 /*
1124 * If we want a buffer of a different size, re-allocate the
1125 * buffer's memory; copy old content only if needed.
1126 */
1127 addr = buf_malloc(desired_size);
1128 if (preserve)
1129 memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1130 if (bp->b_data != NULL)
1131 buf_mrelease(bp->b_data, oldsize);
1132 bp->b_data = addr;
1133 bp->b_bufsize = desired_size;
1134
1135 /*
1136 * Update overall buffer memory counter (protected by bqueue_slock)
1137 */
1138 delta = (long)desired_size - (long)oldsize;
1139
1140 s = splbio();
1141 simple_lock(&bqueue_slock);
1142 if ((bufmem += delta) > bufmem_hiwater) {
1143 /*
1144 * Need to trim overall memory usage.
1145 */
1146 while (buf_canrelease()) {
1147 if (buf_trim() == 0)
1148 break;
1149 }
1150 }
1151
1152 simple_unlock(&bqueue_slock);
1153 splx(s);
1154 }
1155
1156 /*
1157 * Find a buffer which is available for use.
1158 * Select something from a free list.
1159 * Preference is to AGE list, then LRU list.
1160 *
1161 * Called at splbio and with buffer queues locked.
1162 * Return buffer locked.
1163 */
1164 struct buf *
1165 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1166 {
1167 struct buf *bp;
1168
1169 start:
1170 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1171
1172 /*
1173 * Get a new buffer from the pool; but use NOWAIT because
1174 * we have the buffer queues locked.
1175 */
1176 if (buf_lotsfree() && !from_bufq &&
1177 (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1178 memset((char *)bp, 0, sizeof(*bp));
1179 BUF_INIT(bp);
1180 bp->b_dev = NODEV;
1181 bp->b_vnbufs.le_next = NOLIST;
1182 bp->b_flags = B_BUSY;
1183 simple_lock(&bp->b_interlock);
1184 return (bp);
1185 }
1186
1187 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
1188 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
1189 simple_lock(&bp->b_interlock);
1190 bremfree(bp);
1191 } else {
1192 /* wait for a free buffer of any kind */
1193 needbuffer = 1;
1194 ltsleep(&needbuffer, slpflag|(PRIBIO + 1),
1195 "getnewbuf", slptimeo, &bqueue_slock);
1196 return (NULL);
1197 }
1198
1199 #ifdef DIAGNOSTIC
1200 if (bp->b_bufsize <= 0)
1201 panic("buffer %p: on queue but empty", bp);
1202 #endif
1203
1204 if (ISSET(bp->b_flags, B_VFLUSH)) {
1205 /*
1206 * This is a delayed write buffer being flushed to disk. Make
1207 * sure it gets aged out of the queue when it's finished, and
1208 * leave it off the LRU queue.
1209 */
1210 CLR(bp->b_flags, B_VFLUSH);
1211 SET(bp->b_flags, B_AGE);
1212 simple_unlock(&bp->b_interlock);
1213 goto start;
1214 }
1215
1216 /* Buffer is no longer on free lists. */
1217 SET(bp->b_flags, B_BUSY);
1218
1219 /*
1220 * If buffer was a delayed write, start it and return NULL
1221 * (since we might sleep while starting the write).
1222 */
1223 if (ISSET(bp->b_flags, B_DELWRI)) {
1224 /*
1225 * This buffer has gone through the LRU, so make sure it gets
1226 * reused ASAP.
1227 */
1228 SET(bp->b_flags, B_AGE);
1229 simple_unlock(&bp->b_interlock);
1230 simple_unlock(&bqueue_slock);
1231 bawrite(bp);
1232 simple_lock(&bqueue_slock);
1233 return (NULL);
1234 }
1235
1236 /* disassociate us from our vnode, if we had one... */
1237 if (bp->b_vp)
1238 brelvp(bp);
1239
1240 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1241 (*bioops.io_deallocate)(bp);
1242
1243 /* clear out various other fields */
1244 bp->b_flags = B_BUSY;
1245 bp->b_dev = NODEV;
1246 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1247 bp->b_iodone = 0;
1248 bp->b_error = 0;
1249 bp->b_resid = 0;
1250 bp->b_bcount = 0;
1251
1252 bremhash(bp);
1253 return (bp);
1254 }
1255
1256 /*
1257 * Attempt to free an aged buffer off the queues.
1258 * Called at splbio and with queue lock held.
1259 * Returns the amount of buffer memory freed.
1260 */
1261 static int
1262 buf_trim(void)
1263 {
1264 struct buf *bp;
1265 long size = 0;
1266
1267 /* Instruct getnewbuf() to get buffers off the queues */
1268 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1269 return 0;
1270
1271 KASSERT(!ISSET(bp->b_flags, B_WANTED));
1272 simple_unlock(&bp->b_interlock);
1273 size = bp->b_bufsize;
1274 bufmem -= size;
1275 simple_unlock(&bqueue_slock);
1276 if (size > 0) {
1277 buf_mrelease(bp->b_data, size);
1278 bp->b_bcount = bp->b_bufsize = 0;
1279 }
1280 /* brelse() will return the buffer to the global buffer pool */
1281 brelse(bp);
1282 simple_lock(&bqueue_slock);
1283 return size;
1284 }
1285
1286 int
1287 buf_drain(int n)
1288 {
1289 int s, size = 0;
1290
1291 s = splbio();
1292 simple_lock(&bqueue_slock);
1293
1294 /* If not asked for a specific amount, make our own estimate */
1295 if (n == 0)
1296 n = buf_canrelease();
1297
1298 while (size < n && bufmem > bufmem_lowater)
1299 size += buf_trim();
1300
1301 simple_unlock(&bqueue_slock);
1302 splx(s);
1303 return size;
1304 }
1305
1306 /*
1307 * Wait for operations on the buffer to complete.
1308 * When they do, extract and return the I/O's error value.
1309 */
1310 int
1311 biowait(struct buf *bp)
1312 {
1313 int s, error;
1314
1315 s = splbio();
1316 simple_lock(&bp->b_interlock);
1317 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1318 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1319
1320 /* check for interruption of I/O (e.g. via NFS), then errors. */
1321 if (ISSET(bp->b_flags, B_EINTR)) {
1322 CLR(bp->b_flags, B_EINTR);
1323 error = EINTR;
1324 } else if (ISSET(bp->b_flags, B_ERROR))
1325 error = bp->b_error ? bp->b_error : EIO;
1326 else
1327 error = 0;
1328
1329 simple_unlock(&bp->b_interlock);
1330 splx(s);
1331 return (error);
1332 }
1333
1334 /*
1335 * Mark I/O complete on a buffer.
1336 *
1337 * If a callback has been requested, e.g. the pageout
1338 * daemon, do so. Otherwise, awaken waiting processes.
1339 *
1340 * [ Leffler, et al., says on p.247:
1341 * "This routine wakes up the blocked process, frees the buffer
1342 * for an asynchronous write, or, for a request by the pagedaemon
1343 * process, invokes a procedure specified in the buffer structure" ]
1344 *
1345 * In real life, the pagedaemon (or other system processes) wants
1346 * to do async stuff to, and doesn't want the buffer brelse()'d.
1347 * (for swap pager, that puts swap buffers on the free lists (!!!),
1348 * for the vn device, that puts malloc'd buffers on the free lists!)
1349 */
1350 void
1351 biodone(struct buf *bp)
1352 {
1353 int s = splbio();
1354
1355 simple_lock(&bp->b_interlock);
1356 if (ISSET(bp->b_flags, B_DONE))
1357 panic("biodone already");
1358 SET(bp->b_flags, B_DONE); /* note that it's done */
1359 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1360
1361 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1362 (*bioops.io_complete)(bp);
1363
1364 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
1365 vwakeup(bp);
1366
1367 /*
1368 * If necessary, call out. Unlock the buffer before calling
1369 * iodone() as the buffer isn't valid any more when it return.
1370 */
1371 if (ISSET(bp->b_flags, B_CALL)) {
1372 CLR(bp->b_flags, B_CALL); /* but note callout done */
1373 simple_unlock(&bp->b_interlock);
1374 (*bp->b_iodone)(bp);
1375 } else {
1376 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */
1377 simple_unlock(&bp->b_interlock);
1378 brelse(bp);
1379 } else { /* or just wakeup the buffer */
1380 CLR(bp->b_flags, B_WANTED);
1381 wakeup(bp);
1382 simple_unlock(&bp->b_interlock);
1383 }
1384 }
1385
1386 splx(s);
1387 }
1388
1389 /*
1390 * Return a count of buffers on the "locked" queue.
1391 */
1392 int
1393 count_lock_queue(void)
1394 {
1395 struct buf *bp;
1396 int n = 0;
1397
1398 simple_lock(&bqueue_slock);
1399 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
1400 n++;
1401 simple_unlock(&bqueue_slock);
1402 return (n);
1403 }
1404
1405 /*
1406 * Wait for all buffers to complete I/O
1407 * Return the number of "stuck" buffers.
1408 */
1409 int
1410 buf_syncwait(void)
1411 {
1412 struct buf *bp;
1413 int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1414
1415 dcount = 10000;
1416 for (iter = 0; iter < 20;) {
1417 s = splbio();
1418 simple_lock(&bqueue_slock);
1419 nbusy = 0;
1420 for (ihash = 0; ihash < bufhash+1; ihash++) {
1421 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1422 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1423 nbusy++;
1424 /*
1425 * With soft updates, some buffers that are
1426 * written will be remarked as dirty until other
1427 * buffers are written.
1428 */
1429 if (bp->b_vp && bp->b_vp->v_mount
1430 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1431 && (bp->b_flags & B_DELWRI)) {
1432 simple_lock(&bp->b_interlock);
1433 bremfree(bp);
1434 bp->b_flags |= B_BUSY;
1435 nbusy++;
1436 simple_unlock(&bp->b_interlock);
1437 simple_unlock(&bqueue_slock);
1438 bawrite(bp);
1439 if (dcount-- <= 0) {
1440 printf("softdep ");
1441 goto fail;
1442 }
1443 simple_lock(&bqueue_slock);
1444 }
1445 }
1446 }
1447
1448 simple_unlock(&bqueue_slock);
1449 splx(s);
1450
1451 if (nbusy == 0)
1452 break;
1453 if (nbusy_prev == 0)
1454 nbusy_prev = nbusy;
1455 printf("%d ", nbusy);
1456 tsleep(&nbusy, PRIBIO, "bflush",
1457 (iter == 0) ? 1 : hz / 25 * iter);
1458 if (nbusy >= nbusy_prev) /* we didn't flush anything */
1459 iter++;
1460 else
1461 nbusy_prev = nbusy;
1462 }
1463
1464 if (nbusy) {
1465 fail:;
1466 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1467 printf("giving up\nPrinting vnodes for busy buffers\n");
1468 for (ihash = 0; ihash < bufhash+1; ihash++) {
1469 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1470 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1471 vprint(NULL, bp->b_vp);
1472 }
1473 }
1474 #endif
1475 }
1476
1477 return nbusy;
1478 }
1479
1480 static void
1481 sysctl_fillbuf(struct buf *i, struct buf_sysctl *o)
1482 {
1483
1484 o->b_flags = i->b_flags;
1485 o->b_error = i->b_error;
1486 o->b_prio = i->b_prio;
1487 o->b_dev = i->b_dev;
1488 o->b_bufsize = i->b_bufsize;
1489 o->b_bcount = i->b_bcount;
1490 o->b_resid = i->b_resid;
1491 o->b_addr = PTRTOUINT64(i->b_un.b_addr);
1492 o->b_blkno = i->b_blkno;
1493 o->b_rawblkno = i->b_rawblkno;
1494 o->b_iodone = PTRTOUINT64(i->b_iodone);
1495 o->b_proc = PTRTOUINT64(i->b_proc);
1496 o->b_vp = PTRTOUINT64(i->b_vp);
1497 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr);
1498 o->b_lblkno = i->b_lblkno;
1499 }
1500
1501 #define KERN_BUFSLOP 20
1502 static int
1503 sysctl_dobuf(SYSCTLFN_ARGS)
1504 {
1505 struct buf *bp;
1506 struct buf_sysctl bs;
1507 char *dp;
1508 u_int i, op, arg;
1509 size_t len, needed, elem_size, out_size;
1510 int error, s, elem_count;
1511
1512 if (namelen == 1 && name[0] == CTL_QUERY)
1513 return (sysctl_query(SYSCTLFN_CALL(rnode)));
1514
1515 if (namelen != 4)
1516 return (EINVAL);
1517
1518 dp = oldp;
1519 len = (oldp != NULL) ? *oldlenp : 0;
1520 op = name[0];
1521 arg = name[1];
1522 elem_size = name[2];
1523 elem_count = name[3];
1524 out_size = MIN(sizeof(bs), elem_size);
1525
1526 /*
1527 * at the moment, these are just "placeholders" to make the
1528 * API for retrieving kern.buf data more extensible in the
1529 * future.
1530 *
1531 * XXX kern.buf currently has "netbsd32" issues. hopefully
1532 * these will be resolved at a later point.
1533 */
1534 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL ||
1535 elem_size < 1 || elem_count < 0)
1536 return (EINVAL);
1537
1538 error = 0;
1539 needed = 0;
1540 s = splbio();
1541 simple_lock(&bqueue_slock);
1542 for (i = 0; i < BQUEUES; i++) {
1543 TAILQ_FOREACH(bp, &bufqueues[i], b_freelist) {
1544 if (len >= elem_size && elem_count > 0) {
1545 sysctl_fillbuf(bp, &bs);
1546 error = copyout(&bs, dp, out_size);
1547 if (error)
1548 goto cleanup;
1549 dp += elem_size;
1550 len -= elem_size;
1551 }
1552 if (elem_count > 0) {
1553 needed += elem_size;
1554 if (elem_count != INT_MAX)
1555 elem_count--;
1556 }
1557 }
1558 }
1559 cleanup:
1560 simple_unlock(&bqueue_slock);
1561 splx(s);
1562
1563 *oldlenp = needed;
1564 if (oldp == NULL)
1565 *oldlenp += KERN_BUFSLOP * sizeof(struct buf);
1566
1567 return (error);
1568 }
1569
1570 static int
1571 sysctl_bufvm_update(SYSCTLFN_ARGS)
1572 {
1573 int t, error;
1574 struct sysctlnode node;
1575
1576 node = *rnode;
1577 node.sysctl_data = &t;
1578 t = *(int*)rnode->sysctl_data;
1579 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1580 if (error || newp == NULL)
1581 return (error);
1582
1583 if (rnode->sysctl_data == &bufcache) {
1584 if (t < 0 || t > 100)
1585 return (EINVAL);
1586 bufcache = t;
1587 bufmem_hiwater = buf_memcalc();
1588 bufmem_lowater = (bufmem_hiwater >> 3);
1589 if (bufmem_lowater < 64 * 1024)
1590 /* Ensure a reasonable minimum value */
1591 bufmem_lowater = 64 * 1024;
1592
1593 } else if (rnode->sysctl_data == &bufmem_lowater) {
1594 bufmem_lowater = t;
1595 } else if (rnode->sysctl_data == &bufmem_hiwater) {
1596 bufmem_hiwater = t;
1597 } else
1598 return (EINVAL);
1599
1600 /* Drain until below new high water mark */
1601 while ((t = bufmem - bufmem_hiwater) >= 0) {
1602 if (buf_drain(t / (2*1024)) <= 0)
1603 break;
1604 }
1605
1606 return 0;
1607 }
1608
1609 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1610 {
1611
1612 sysctl_createv(clog, 0, NULL, NULL,
1613 CTLFLAG_PERMANENT,
1614 CTLTYPE_NODE, "kern", NULL,
1615 NULL, 0, NULL, 0,
1616 CTL_KERN, CTL_EOL);
1617 sysctl_createv(clog, 0, NULL, NULL,
1618 CTLFLAG_PERMANENT,
1619 CTLTYPE_NODE, "buf",
1620 SYSCTL_DESCR("Kernel buffer cache information"),
1621 sysctl_dobuf, 0, NULL, 0,
1622 CTL_KERN, KERN_BUF, CTL_EOL);
1623 }
1624
1625 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
1626 {
1627
1628 sysctl_createv(clog, 0, NULL, NULL,
1629 CTLFLAG_PERMANENT,
1630 CTLTYPE_NODE, "vm", NULL,
1631 NULL, 0, NULL, 0,
1632 CTL_VM, CTL_EOL);
1633
1634 sysctl_createv(clog, 0, NULL, NULL,
1635 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1636 CTLTYPE_INT, "bufcache",
1637 SYSCTL_DESCR("Percentage of kernel memory to use for "
1638 "buffer cache"),
1639 sysctl_bufvm_update, 0, &bufcache, 0,
1640 CTL_VM, CTL_CREATE, CTL_EOL);
1641 sysctl_createv(clog, 0, NULL, NULL,
1642 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1643 CTLTYPE_INT, "bufmem",
1644 SYSCTL_DESCR("Amount of kernel memory used by buffer "
1645 "cache"),
1646 NULL, 0, &bufmem, 0,
1647 CTL_VM, CTL_CREATE, CTL_EOL);
1648 sysctl_createv(clog, 0, NULL, NULL,
1649 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1650 CTLTYPE_INT, "bufmem_lowater",
1651 SYSCTL_DESCR("Minimum amount of kernel memory to "
1652 "reserve for buffer cache"),
1653 sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1654 CTL_VM, CTL_CREATE, CTL_EOL);
1655 sysctl_createv(clog, 0, NULL, NULL,
1656 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1657 CTLTYPE_INT, "bufmem_hiwater",
1658 SYSCTL_DESCR("Maximum amount of kernel memory to use "
1659 "for buffer cache"),
1660 sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1661 CTL_VM, CTL_CREATE, CTL_EOL);
1662 }
1663
1664 #ifdef DEBUG
1665 /*
1666 * Print out statistics on the current allocation of the buffer pool.
1667 * Can be enabled to print out on every ``sync'' by setting "syncprt"
1668 * in vfs_syscalls.c using sysctl.
1669 */
1670 void
1671 vfs_bufstats(void)
1672 {
1673 int s, i, j, count;
1674 struct buf *bp;
1675 struct bqueues *dp;
1676 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1677 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1678
1679 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1680 count = 0;
1681 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1682 counts[j] = 0;
1683 s = splbio();
1684 TAILQ_FOREACH(bp, dp, b_freelist) {
1685 counts[bp->b_bufsize/PAGE_SIZE]++;
1686 count++;
1687 }
1688 splx(s);
1689 printf("%s: total-%d", bname[i], count);
1690 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1691 if (counts[j] != 0)
1692 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1693 printf("\n");
1694 }
1695 }
1696 #endif /* DEBUG */
1697