vfs_bio.c revision 1.159 1 /* $NetBSD: vfs_bio.c,v 1.159 2006/04/05 00:52:16 uwe Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
37 */
38
39 /*-
40 * Copyright (c) 1994 Christopher G. Demetriou
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
71 */
72
73 /*
74 * Some references:
75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76 * Leffler, et al.: The Design and Implementation of the 4.3BSD
77 * UNIX Operating System (Addison Welley, 1989)
78 */
79
80 #include "fs_ffs.h"
81 #include "opt_bufcache.h"
82 #include "opt_softdep.h"
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.159 2006/04/05 00:52:16 uwe Exp $");
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/kernel.h>
90 #include <sys/proc.h>
91 #include <sys/buf.h>
92 #include <sys/vnode.h>
93 #include <sys/mount.h>
94 #include <sys/malloc.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sysctl.h>
97 #include <sys/conf.h>
98
99 #include <uvm/uvm.h>
100
101 #include <miscfs/specfs/specdev.h>
102
103 #ifndef BUFPAGES
104 # define BUFPAGES 0
105 #endif
106
107 #ifdef BUFCACHE
108 # if (BUFCACHE < 5) || (BUFCACHE > 95)
109 # error BUFCACHE is not between 5 and 95
110 # endif
111 #else
112 # define BUFCACHE 15
113 #endif
114
115 u_int nbuf; /* XXX - for softdep_lockedbufs */
116 u_int bufpages = BUFPAGES; /* optional hardwired count */
117 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */
118
119 /* Function prototypes */
120 struct bqueue;
121
122 static void buf_setwm(void);
123 static int buf_trim(void);
124 static void *bufpool_page_alloc(struct pool *, int);
125 static void bufpool_page_free(struct pool *, void *);
126 static inline struct buf *bio_doread(struct vnode *, daddr_t, int,
127 struct ucred *, int);
128 static int buf_lotsfree(void);
129 static int buf_canrelease(void);
130 static inline u_long buf_mempoolidx(u_long);
131 static inline u_long buf_roundsize(u_long);
132 static inline caddr_t buf_malloc(size_t);
133 static void buf_mrelease(caddr_t, size_t);
134 static inline void binsheadfree(struct buf *, struct bqueue *);
135 static inline void binstailfree(struct buf *, struct bqueue *);
136 int count_lock_queue(void); /* XXX */
137 #ifdef DEBUG
138 static int checkfreelist(struct buf *, struct bqueue *);
139 #endif
140
141 /*
142 * Definitions for the buffer hash lists.
143 */
144 #define BUFHASH(dvp, lbn) \
145 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
146 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
147 u_long bufhash;
148 #if !defined(SOFTDEP) || !defined(FFS)
149 struct bio_ops bioops; /* I/O operation notification */
150 #endif
151
152 /*
153 * Insq/Remq for the buffer hash lists.
154 */
155 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
156 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
157
158 /*
159 * Definitions for the buffer free lists.
160 */
161 #define BQUEUES 3 /* number of free buffer queues */
162
163 #define BQ_LOCKED 0 /* super-blocks &c */
164 #define BQ_LRU 1 /* lru, useful buffers */
165 #define BQ_AGE 2 /* rubbish */
166
167 struct bqueue {
168 TAILQ_HEAD(, buf) bq_queue;
169 uint64_t bq_bytes;
170 } bufqueues[BQUEUES];
171 int needbuffer;
172
173 /*
174 * Buffer queue lock.
175 * Take this lock first if also taking some buffer's b_interlock.
176 */
177 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
178
179 /*
180 * Buffer pool for I/O buffers.
181 * Access to this pool must be protected with splbio().
182 */
183 static POOL_INIT(bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
184
185
186 /* XXX - somewhat gross.. */
187 #if MAXBSIZE == 0x2000
188 #define NMEMPOOLS 5
189 #elif MAXBSIZE == 0x4000
190 #define NMEMPOOLS 6
191 #elif MAXBSIZE == 0x8000
192 #define NMEMPOOLS 7
193 #else
194 #define NMEMPOOLS 8
195 #endif
196
197 #define MEMPOOL_INDEX_OFFSET 9 /* smallest pool is 512 bytes */
198 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
199 #error update vfs_bio buffer memory parameters
200 #endif
201
202 /* Buffer memory pools */
203 static struct pool bmempools[NMEMPOOLS];
204
205 struct vm_map *buf_map;
206
207 /*
208 * Buffer memory pool allocator.
209 */
210 static void *
211 bufpool_page_alloc(struct pool *pp, int flags)
212 {
213
214 return (void *)uvm_km_alloc(buf_map,
215 MAXBSIZE, MAXBSIZE,
216 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)
217 | UVM_KMF_WIRED);
218 }
219
220 static void
221 bufpool_page_free(struct pool *pp, void *v)
222 {
223
224 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED);
225 }
226
227 static struct pool_allocator bufmempool_allocator = {
228 bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
229 };
230
231 /* Buffer memory management variables */
232 u_long bufmem_valimit;
233 u_long bufmem_hiwater;
234 u_long bufmem_lowater;
235 u_long bufmem;
236
237 /*
238 * MD code can call this to set a hard limit on the amount
239 * of virtual memory used by the buffer cache.
240 */
241 int
242 buf_setvalimit(vsize_t sz)
243 {
244
245 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
246 if (sz < NMEMPOOLS * MAXBSIZE)
247 return EINVAL;
248
249 bufmem_valimit = sz;
250 return 0;
251 }
252
253 static void
254 buf_setwm(void)
255 {
256
257 bufmem_hiwater = buf_memcalc();
258 /* lowater is approx. 2% of memory (with bufcache = 15) */
259 #define BUFMEM_WMSHIFT 3
260 #define BUFMEM_HIWMMIN (64 * 1024 << BUFMEM_WMSHIFT)
261 if (bufmem_hiwater < BUFMEM_HIWMMIN)
262 /* Ensure a reasonable minimum value */
263 bufmem_hiwater = BUFMEM_HIWMMIN;
264 bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT;
265 }
266
267 #ifdef DEBUG
268 int debug_verify_freelist = 0;
269 static int
270 checkfreelist(struct buf *bp, struct bqueue *dp)
271 {
272 struct buf *b;
273
274 TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) {
275 if (b == bp)
276 return 1;
277 }
278 return 0;
279 }
280 #endif
281
282 /*
283 * Insq/Remq for the buffer hash lists.
284 * Call with buffer queue locked.
285 */
286 static inline void
287 binsheadfree(struct buf *bp, struct bqueue *dp)
288 {
289
290 KASSERT(bp->b_freelistindex == -1);
291 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist);
292 dp->bq_bytes += bp->b_bufsize;
293 bp->b_freelistindex = dp - bufqueues;
294 }
295
296 static inline void
297 binstailfree(struct buf *bp, struct bqueue *dp)
298 {
299
300 KASSERT(bp->b_freelistindex == -1);
301 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist);
302 dp->bq_bytes += bp->b_bufsize;
303 bp->b_freelistindex = dp - bufqueues;
304 }
305
306 void
307 bremfree(struct buf *bp)
308 {
309 struct bqueue *dp;
310 int bqidx = bp->b_freelistindex;
311
312 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
313
314 KASSERT(bqidx != -1);
315 dp = &bufqueues[bqidx];
316 KDASSERT(!debug_verify_freelist || checkfreelist(bp, dp));
317 KASSERT(dp->bq_bytes >= bp->b_bufsize);
318 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist);
319 dp->bq_bytes -= bp->b_bufsize;
320 #if defined(DIAGNOSTIC)
321 bp->b_freelistindex = -1;
322 #endif /* defined(DIAGNOSTIC) */
323 }
324
325 u_long
326 buf_memcalc(void)
327 {
328 u_long n;
329
330 /*
331 * Determine the upper bound of memory to use for buffers.
332 *
333 * - If bufpages is specified, use that as the number
334 * pages.
335 *
336 * - Otherwise, use bufcache as the percentage of
337 * physical memory.
338 */
339 if (bufpages != 0) {
340 n = bufpages;
341 } else {
342 if (bufcache < 5) {
343 printf("forcing bufcache %d -> 5", bufcache);
344 bufcache = 5;
345 }
346 if (bufcache > 95) {
347 printf("forcing bufcache %d -> 95", bufcache);
348 bufcache = 95;
349 }
350 n = physmem / 100 * bufcache;
351 }
352
353 n <<= PAGE_SHIFT;
354 if (bufmem_valimit != 0 && n > bufmem_valimit)
355 n = bufmem_valimit;
356
357 return (n);
358 }
359
360 /*
361 * Initialize buffers and hash links for buffers.
362 */
363 void
364 bufinit(void)
365 {
366 struct bqueue *dp;
367 int use_std;
368 u_int i;
369
370 /*
371 * Initialize buffer cache memory parameters.
372 */
373 bufmem = 0;
374 buf_setwm();
375
376 if (bufmem_valimit != 0) {
377 vaddr_t minaddr = 0, maxaddr;
378 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
379 bufmem_valimit, VM_MAP_PAGEABLE,
380 FALSE, 0);
381 if (buf_map == NULL)
382 panic("bufinit: cannot allocate submap");
383 } else
384 buf_map = kernel_map;
385
386 /* On "small" machines use small pool page sizes where possible */
387 use_std = (physmem < atop(16*1024*1024));
388
389 /*
390 * Also use them on systems that can map the pool pages using
391 * a direct-mapped segment.
392 */
393 #ifdef PMAP_MAP_POOLPAGE
394 use_std = 1;
395 #endif
396
397 for (i = 0; i < NMEMPOOLS; i++) {
398 struct pool_allocator *pa;
399 struct pool *pp = &bmempools[i];
400 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
401 char *name = malloc(8, M_TEMP, M_WAITOK);
402 snprintf(name, 8, "buf%dk", 1 << i);
403 pa = (size <= PAGE_SIZE && use_std)
404 ? &pool_allocator_nointr
405 : &bufmempool_allocator;
406 pool_init(pp, size, 0, 0, 0, name, pa);
407 pool_setlowat(pp, 1);
408 pool_sethiwat(pp, 1);
409 }
410
411 /* Initialize the buffer queues */
412 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
413 TAILQ_INIT(&dp->bq_queue);
414 dp->bq_bytes = 0;
415 }
416
417 /*
418 * Estimate hash table size based on the amount of memory we
419 * intend to use for the buffer cache. The average buffer
420 * size is dependent on our clients (i.e. filesystems).
421 *
422 * For now, use an empirical 3K per buffer.
423 */
424 nbuf = (bufmem_hiwater / 1024) / 3;
425 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
426 }
427
428 static int
429 buf_lotsfree(void)
430 {
431 int try, thresh;
432 struct lwp *l = curlwp;
433
434 /* Always allocate if doing copy on write */
435 if (l->l_flag & L_COWINPROGRESS)
436 return 1;
437
438 /* Always allocate if less than the low water mark. */
439 if (bufmem < bufmem_lowater)
440 return 1;
441
442 /* Never allocate if greater than the high water mark. */
443 if (bufmem > bufmem_hiwater)
444 return 0;
445
446 /* If there's anything on the AGE list, it should be eaten. */
447 if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL)
448 return 0;
449
450 /*
451 * The probabily of getting a new allocation is inversely
452 * proportional to the current size of the cache, using
453 * a granularity of 16 steps.
454 */
455 try = random() & 0x0000000fL;
456
457 /* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */
458 thresh = (bufmem - bufmem_lowater) /
459 ((bufmem_hiwater - bufmem_lowater) / 16);
460
461 if (try >= thresh)
462 return 1;
463
464 /* Otherwise don't allocate. */
465 return 0;
466 }
467
468 /*
469 * Return estimate of bytes we think need to be
470 * released to help resolve low memory conditions.
471 *
472 * => called at splbio.
473 * => called with bqueue_slock held.
474 */
475 static int
476 buf_canrelease(void)
477 {
478 int pagedemand, ninvalid = 0;
479
480 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
481
482 if (bufmem < bufmem_lowater)
483 return 0;
484
485 if (bufmem > bufmem_hiwater)
486 return bufmem - bufmem_hiwater;
487
488 ninvalid += bufqueues[BQ_AGE].bq_bytes;
489
490 pagedemand = uvmexp.freetarg - uvmexp.free;
491 if (pagedemand < 0)
492 return ninvalid;
493 return MAX(ninvalid, MIN(2 * MAXBSIZE,
494 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
495 }
496
497 /*
498 * Buffer memory allocation helper functions
499 */
500 static inline u_long
501 buf_mempoolidx(u_long size)
502 {
503 u_int n = 0;
504
505 size -= 1;
506 size >>= MEMPOOL_INDEX_OFFSET;
507 while (size) {
508 size >>= 1;
509 n += 1;
510 }
511 if (n >= NMEMPOOLS)
512 panic("buf mem pool index %d", n);
513 return n;
514 }
515
516 static inline u_long
517 buf_roundsize(u_long size)
518 {
519 /* Round up to nearest power of 2 */
520 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
521 }
522
523 static inline caddr_t
524 buf_malloc(size_t size)
525 {
526 u_int n = buf_mempoolidx(size);
527 caddr_t addr;
528 int s;
529
530 while (1) {
531 addr = pool_get(&bmempools[n], PR_NOWAIT);
532 if (addr != NULL)
533 break;
534
535 /* No memory, see if we can free some. If so, try again */
536 if (buf_drain(1) > 0)
537 continue;
538
539 /* Wait for buffers to arrive on the LRU queue */
540 s = splbio();
541 simple_lock(&bqueue_slock);
542 needbuffer = 1;
543 ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1),
544 "buf_malloc", 0, &bqueue_slock);
545 splx(s);
546 }
547
548 return addr;
549 }
550
551 static void
552 buf_mrelease(caddr_t addr, size_t size)
553 {
554
555 pool_put(&bmempools[buf_mempoolidx(size)], addr);
556 }
557
558 /*
559 * bread()/breadn() helper.
560 */
561 static inline struct buf *
562 bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
563 int async)
564 {
565 struct buf *bp;
566 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
567 struct proc *p = l->l_proc;
568 struct mount *mp;
569
570 bp = getblk(vp, blkno, size, 0, 0);
571
572 #ifdef DIAGNOSTIC
573 if (bp == NULL) {
574 panic("bio_doread: no such buf");
575 }
576 #endif
577
578 /*
579 * If buffer does not have data valid, start a read.
580 * Note that if buffer is B_INVAL, getblk() won't return it.
581 * Therefore, it's valid if its I/O has completed or been delayed.
582 */
583 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
584 /* Start I/O for the buffer. */
585 SET(bp->b_flags, B_READ | async);
586 if (async)
587 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
588 else
589 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
590 VOP_STRATEGY(vp, bp);
591
592 /* Pay for the read. */
593 p->p_stats->p_ru.ru_inblock++;
594 } else if (async) {
595 brelse(bp);
596 }
597
598 if (vp->v_type == VBLK)
599 mp = vp->v_specmountpoint;
600 else
601 mp = vp->v_mount;
602
603 /*
604 * Collect statistics on synchronous and asynchronous reads.
605 * Reads from block devices are charged to their associated
606 * filesystem (if any).
607 */
608 if (mp != NULL) {
609 if (async == 0)
610 mp->mnt_stat.f_syncreads++;
611 else
612 mp->mnt_stat.f_asyncreads++;
613 }
614
615 return (bp);
616 }
617
618 /*
619 * Read a disk block.
620 * This algorithm described in Bach (p.54).
621 */
622 int
623 bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
624 struct buf **bpp)
625 {
626 struct buf *bp;
627
628 /* Get buffer for block. */
629 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
630
631 /* Wait for the read to complete, and return result. */
632 return (biowait(bp));
633 }
634
635 /*
636 * Read-ahead multiple disk blocks. The first is sync, the rest async.
637 * Trivial modification to the breada algorithm presented in Bach (p.55).
638 */
639 int
640 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
641 int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp)
642 {
643 struct buf *bp;
644 int i;
645
646 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
647
648 /*
649 * For each of the read-ahead blocks, start a read, if necessary.
650 */
651 for (i = 0; i < nrablks; i++) {
652 /* If it's in the cache, just go on to next one. */
653 if (incore(vp, rablks[i]))
654 continue;
655
656 /* Get a buffer for the read-ahead block */
657 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
658 }
659
660 /* Otherwise, we had to start a read for it; wait until it's valid. */
661 return (biowait(bp));
662 }
663
664 /*
665 * Read with single-block read-ahead. Defined in Bach (p.55), but
666 * implemented as a call to breadn().
667 * XXX for compatibility with old file systems.
668 */
669 int
670 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
671 int rabsize, struct ucred *cred, struct buf **bpp)
672 {
673
674 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
675 }
676
677 /*
678 * Block write. Described in Bach (p.56)
679 */
680 int
681 bwrite(struct buf *bp)
682 {
683 int rv, sync, wasdelayed, s;
684 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
685 struct proc *p = l->l_proc;
686 struct vnode *vp;
687 struct mount *mp;
688
689 KASSERT(ISSET(bp->b_flags, B_BUSY));
690
691 vp = bp->b_vp;
692 if (vp != NULL) {
693 if (vp->v_type == VBLK)
694 mp = vp->v_specmountpoint;
695 else
696 mp = vp->v_mount;
697 } else {
698 mp = NULL;
699 }
700
701 /*
702 * Remember buffer type, to switch on it later. If the write was
703 * synchronous, but the file system was mounted with MNT_ASYNC,
704 * convert it to a delayed write.
705 * XXX note that this relies on delayed tape writes being converted
706 * to async, not sync writes (which is safe, but ugly).
707 */
708 sync = !ISSET(bp->b_flags, B_ASYNC);
709 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
710 bdwrite(bp);
711 return (0);
712 }
713
714 /*
715 * Collect statistics on synchronous and asynchronous writes.
716 * Writes to block devices are charged to their associated
717 * filesystem (if any).
718 */
719 if (mp != NULL) {
720 if (sync)
721 mp->mnt_stat.f_syncwrites++;
722 else
723 mp->mnt_stat.f_asyncwrites++;
724 }
725
726 s = splbio();
727 simple_lock(&bp->b_interlock);
728
729 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
730
731 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
732
733 /*
734 * Pay for the I/O operation and make sure the buf is on the correct
735 * vnode queue.
736 */
737 if (wasdelayed)
738 reassignbuf(bp, bp->b_vp);
739 else
740 p->p_stats->p_ru.ru_oublock++;
741
742 /* Initiate disk write. Make sure the appropriate party is charged. */
743 V_INCR_NUMOUTPUT(bp->b_vp);
744 simple_unlock(&bp->b_interlock);
745 splx(s);
746
747 if (sync)
748 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
749 else
750 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
751
752 VOP_STRATEGY(vp, bp);
753
754 if (sync) {
755 /* If I/O was synchronous, wait for it to complete. */
756 rv = biowait(bp);
757
758 /* Release the buffer. */
759 brelse(bp);
760
761 return (rv);
762 } else {
763 return (0);
764 }
765 }
766
767 int
768 vn_bwrite(void *v)
769 {
770 struct vop_bwrite_args *ap = v;
771
772 return (bwrite(ap->a_bp));
773 }
774
775 /*
776 * Delayed write.
777 *
778 * The buffer is marked dirty, but is not queued for I/O.
779 * This routine should be used when the buffer is expected
780 * to be modified again soon, typically a small write that
781 * partially fills a buffer.
782 *
783 * NB: magnetic tapes cannot be delayed; they must be
784 * written in the order that the writes are requested.
785 *
786 * Described in Leffler, et al. (pp. 208-213).
787 */
788 void
789 bdwrite(struct buf *bp)
790 {
791 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
792 struct proc *p = l->l_proc;
793 const struct bdevsw *bdev;
794 int s;
795
796 /* If this is a tape block, write the block now. */
797 bdev = bdevsw_lookup(bp->b_dev);
798 if (bdev != NULL && bdev->d_type == D_TAPE) {
799 bawrite(bp);
800 return;
801 }
802
803 /*
804 * If the block hasn't been seen before:
805 * (1) Mark it as having been seen,
806 * (2) Charge for the write,
807 * (3) Make sure it's on its vnode's correct block list.
808 */
809 s = splbio();
810 simple_lock(&bp->b_interlock);
811
812 KASSERT(ISSET(bp->b_flags, B_BUSY));
813
814 if (!ISSET(bp->b_flags, B_DELWRI)) {
815 SET(bp->b_flags, B_DELWRI);
816 p->p_stats->p_ru.ru_oublock++;
817 reassignbuf(bp, bp->b_vp);
818 }
819
820 /* Otherwise, the "write" is done, so mark and release the buffer. */
821 CLR(bp->b_flags, B_DONE);
822 simple_unlock(&bp->b_interlock);
823 splx(s);
824
825 brelse(bp);
826 }
827
828 /*
829 * Asynchronous block write; just an asynchronous bwrite().
830 */
831 void
832 bawrite(struct buf *bp)
833 {
834 int s;
835
836 s = splbio();
837 simple_lock(&bp->b_interlock);
838
839 KASSERT(ISSET(bp->b_flags, B_BUSY));
840
841 SET(bp->b_flags, B_ASYNC);
842 simple_unlock(&bp->b_interlock);
843 splx(s);
844 VOP_BWRITE(bp);
845 }
846
847 /*
848 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
849 * Call at splbio() and with the buffer interlock locked.
850 * Note: called only from biodone() through ffs softdep's bioops.io_complete()
851 */
852 void
853 bdirty(struct buf *bp)
854 {
855 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
856 struct proc *p = l->l_proc;
857
858 LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
859 KASSERT(ISSET(bp->b_flags, B_BUSY));
860
861 CLR(bp->b_flags, B_AGE);
862
863 if (!ISSET(bp->b_flags, B_DELWRI)) {
864 SET(bp->b_flags, B_DELWRI);
865 p->p_stats->p_ru.ru_oublock++;
866 reassignbuf(bp, bp->b_vp);
867 }
868 }
869
870 /*
871 * Release a buffer on to the free lists.
872 * Described in Bach (p. 46).
873 */
874 void
875 brelse(struct buf *bp)
876 {
877 struct bqueue *bufq;
878 int s;
879
880 /* Block disk interrupts. */
881 s = splbio();
882 simple_lock(&bqueue_slock);
883 simple_lock(&bp->b_interlock);
884
885 KASSERT(ISSET(bp->b_flags, B_BUSY));
886 KASSERT(!ISSET(bp->b_flags, B_CALL));
887
888 /* Wake up any processes waiting for any buffer to become free. */
889 if (needbuffer) {
890 needbuffer = 0;
891 wakeup(&needbuffer);
892 }
893
894 /* Wake up any proceeses waiting for _this_ buffer to become free. */
895 if (ISSET(bp->b_flags, B_WANTED)) {
896 CLR(bp->b_flags, B_WANTED|B_AGE);
897 wakeup(bp);
898 }
899
900 /*
901 * Determine which queue the buffer should be on, then put it there.
902 */
903
904 /* If it's locked, don't report an error; try again later. */
905 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
906 CLR(bp->b_flags, B_ERROR);
907
908 /* If it's not cacheable, or an error, mark it invalid. */
909 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
910 SET(bp->b_flags, B_INVAL);
911
912 if (ISSET(bp->b_flags, B_VFLUSH)) {
913 /*
914 * This is a delayed write buffer that was just flushed to
915 * disk. It is still on the LRU queue. If it's become
916 * invalid, then we need to move it to a different queue;
917 * otherwise leave it in its current position.
918 */
919 CLR(bp->b_flags, B_VFLUSH);
920 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
921 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
922 goto already_queued;
923 } else {
924 bremfree(bp);
925 }
926 }
927
928 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
929 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
930 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
931
932 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
933 /*
934 * If it's invalid or empty, dissociate it from its vnode
935 * and put on the head of the appropriate queue.
936 */
937 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
938 (*bioops.io_deallocate)(bp);
939 CLR(bp->b_flags, B_DONE|B_DELWRI);
940 if (bp->b_vp) {
941 reassignbuf(bp, bp->b_vp);
942 brelvp(bp);
943 }
944 if (bp->b_bufsize <= 0)
945 /* no data */
946 goto already_queued;
947 else
948 /* invalid data */
949 bufq = &bufqueues[BQ_AGE];
950 binsheadfree(bp, bufq);
951 } else {
952 /*
953 * It has valid data. Put it on the end of the appropriate
954 * queue, so that it'll stick around for as long as possible.
955 * If buf is AGE, but has dependencies, must put it on last
956 * bufqueue to be scanned, ie LRU. This protects against the
957 * livelock where BQ_AGE only has buffers with dependencies,
958 * and we thus never get to the dependent buffers in BQ_LRU.
959 */
960 if (ISSET(bp->b_flags, B_LOCKED))
961 /* locked in core */
962 bufq = &bufqueues[BQ_LOCKED];
963 else if (!ISSET(bp->b_flags, B_AGE))
964 /* valid data */
965 bufq = &bufqueues[BQ_LRU];
966 else {
967 /* stale but valid data */
968 int has_deps;
969
970 if (LIST_FIRST(&bp->b_dep) != NULL &&
971 bioops.io_countdeps)
972 has_deps = (*bioops.io_countdeps)(bp, 0);
973 else
974 has_deps = 0;
975 bufq = has_deps ? &bufqueues[BQ_LRU] :
976 &bufqueues[BQ_AGE];
977 }
978 binstailfree(bp, bufq);
979 }
980
981 already_queued:
982 /* Unlock the buffer. */
983 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
984 SET(bp->b_flags, B_CACHE);
985
986 /* Allow disk interrupts. */
987 simple_unlock(&bp->b_interlock);
988 simple_unlock(&bqueue_slock);
989 if (bp->b_bufsize <= 0) {
990 #ifdef DEBUG
991 memset((char *)bp, 0, sizeof(*bp));
992 #endif
993 pool_put(&bufpool, bp);
994 }
995 splx(s);
996 }
997
998 /*
999 * Determine if a block is in the cache.
1000 * Just look on what would be its hash chain. If it's there, return
1001 * a pointer to it, unless it's marked invalid. If it's marked invalid,
1002 * we normally don't return the buffer, unless the caller explicitly
1003 * wants us to.
1004 */
1005 struct buf *
1006 incore(struct vnode *vp, daddr_t blkno)
1007 {
1008 struct buf *bp;
1009
1010 /* Search hash chain */
1011 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
1012 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
1013 !ISSET(bp->b_flags, B_INVAL))
1014 return (bp);
1015 }
1016
1017 return (NULL);
1018 }
1019
1020 /*
1021 * Get a block of requested size that is associated with
1022 * a given vnode and block offset. If it is found in the
1023 * block cache, mark it as having been found, make it busy
1024 * and return it. Otherwise, return an empty block of the
1025 * correct size. It is up to the caller to insure that the
1026 * cached blocks be of the correct size.
1027 */
1028 struct buf *
1029 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1030 {
1031 struct buf *bp;
1032 int s, err;
1033 int preserve;
1034
1035 start:
1036 s = splbio();
1037 simple_lock(&bqueue_slock);
1038 bp = incore(vp, blkno);
1039 if (bp != NULL) {
1040 simple_lock(&bp->b_interlock);
1041 if (ISSET(bp->b_flags, B_BUSY)) {
1042 simple_unlock(&bqueue_slock);
1043 if (curproc == uvm.pagedaemon_proc) {
1044 simple_unlock(&bp->b_interlock);
1045 splx(s);
1046 return NULL;
1047 }
1048 SET(bp->b_flags, B_WANTED);
1049 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
1050 "getblk", slptimeo, &bp->b_interlock);
1051 splx(s);
1052 if (err)
1053 return (NULL);
1054 goto start;
1055 }
1056 #ifdef DIAGNOSTIC
1057 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
1058 bp->b_bcount < size && vp->v_type != VBLK)
1059 panic("getblk: block size invariant failed");
1060 #endif
1061 SET(bp->b_flags, B_BUSY);
1062 bremfree(bp);
1063 preserve = 1;
1064 } else {
1065 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
1066 simple_unlock(&bqueue_slock);
1067 splx(s);
1068 goto start;
1069 }
1070
1071 binshash(bp, BUFHASH(vp, blkno));
1072 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1073 bgetvp(vp, bp);
1074 preserve = 0;
1075 }
1076 simple_unlock(&bp->b_interlock);
1077 simple_unlock(&bqueue_slock);
1078 splx(s);
1079 /*
1080 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1081 * if we re-size buffers here.
1082 */
1083 if (ISSET(bp->b_flags, B_LOCKED)) {
1084 KASSERT(bp->b_bufsize >= size);
1085 } else {
1086 allocbuf(bp, size, preserve);
1087 }
1088 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1089 return (bp);
1090 }
1091
1092 /*
1093 * Get an empty, disassociated buffer of given size.
1094 */
1095 struct buf *
1096 geteblk(int size)
1097 {
1098 struct buf *bp;
1099 int s;
1100
1101 s = splbio();
1102 simple_lock(&bqueue_slock);
1103 while ((bp = getnewbuf(0, 0, 0)) == 0)
1104 ;
1105
1106 SET(bp->b_flags, B_INVAL);
1107 binshash(bp, &invalhash);
1108 simple_unlock(&bqueue_slock);
1109 simple_unlock(&bp->b_interlock);
1110 splx(s);
1111 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1112 allocbuf(bp, size, 0);
1113 return (bp);
1114 }
1115
1116 /*
1117 * Expand or contract the actual memory allocated to a buffer.
1118 *
1119 * If the buffer shrinks, data is lost, so it's up to the
1120 * caller to have written it out *first*; this routine will not
1121 * start a write. If the buffer grows, it's the callers
1122 * responsibility to fill out the buffer's additional contents.
1123 */
1124 void
1125 allocbuf(struct buf *bp, int size, int preserve)
1126 {
1127 vsize_t oldsize, desired_size;
1128 caddr_t addr;
1129 int s, delta;
1130
1131 desired_size = buf_roundsize(size);
1132 if (desired_size > MAXBSIZE)
1133 printf("allocbuf: buffer larger than MAXBSIZE requested");
1134
1135 bp->b_bcount = size;
1136
1137 oldsize = bp->b_bufsize;
1138 if (oldsize == desired_size)
1139 return;
1140
1141 /*
1142 * If we want a buffer of a different size, re-allocate the
1143 * buffer's memory; copy old content only if needed.
1144 */
1145 addr = buf_malloc(desired_size);
1146 if (preserve)
1147 memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1148 if (bp->b_data != NULL)
1149 buf_mrelease(bp->b_data, oldsize);
1150 bp->b_data = addr;
1151 bp->b_bufsize = desired_size;
1152
1153 /*
1154 * Update overall buffer memory counter (protected by bqueue_slock)
1155 */
1156 delta = (long)desired_size - (long)oldsize;
1157
1158 s = splbio();
1159 simple_lock(&bqueue_slock);
1160 if ((bufmem += delta) > bufmem_hiwater) {
1161 /*
1162 * Need to trim overall memory usage.
1163 */
1164 while (buf_canrelease()) {
1165 if (curcpu()->ci_schedstate.spc_flags &
1166 SPCF_SHOULDYIELD) {
1167 simple_unlock(&bqueue_slock);
1168 splx(s);
1169 preempt(1);
1170 s = splbio();
1171 simple_lock(&bqueue_slock);
1172 }
1173
1174 if (buf_trim() == 0)
1175 break;
1176 }
1177 }
1178
1179 simple_unlock(&bqueue_slock);
1180 splx(s);
1181 }
1182
1183 /*
1184 * Find a buffer which is available for use.
1185 * Select something from a free list.
1186 * Preference is to AGE list, then LRU list.
1187 *
1188 * Called at splbio and with buffer queues locked.
1189 * Return buffer locked.
1190 */
1191 struct buf *
1192 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1193 {
1194 struct buf *bp;
1195
1196 start:
1197 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1198
1199 /*
1200 * Get a new buffer from the pool; but use NOWAIT because
1201 * we have the buffer queues locked.
1202 */
1203 if (!from_bufq && buf_lotsfree() &&
1204 (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1205 memset((char *)bp, 0, sizeof(*bp));
1206 BUF_INIT(bp);
1207 bp->b_dev = NODEV;
1208 bp->b_vnbufs.le_next = NOLIST;
1209 bp->b_flags = B_BUSY;
1210 simple_lock(&bp->b_interlock);
1211 #if defined(DIAGNOSTIC)
1212 bp->b_freelistindex = -1;
1213 #endif /* defined(DIAGNOSTIC) */
1214 return (bp);
1215 }
1216
1217 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL ||
1218 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) {
1219 simple_lock(&bp->b_interlock);
1220 bremfree(bp);
1221 } else {
1222 /*
1223 * XXX: !from_bufq should be removed.
1224 */
1225 if (!from_bufq || curproc != uvm.pagedaemon_proc) {
1226 /* wait for a free buffer of any kind */
1227 needbuffer = 1;
1228 ltsleep(&needbuffer, slpflag|(PRIBIO + 1),
1229 "getnewbuf", slptimeo, &bqueue_slock);
1230 }
1231 return (NULL);
1232 }
1233
1234 #ifdef DIAGNOSTIC
1235 if (bp->b_bufsize <= 0)
1236 panic("buffer %p: on queue but empty", bp);
1237 #endif
1238
1239 if (ISSET(bp->b_flags, B_VFLUSH)) {
1240 /*
1241 * This is a delayed write buffer being flushed to disk. Make
1242 * sure it gets aged out of the queue when it's finished, and
1243 * leave it off the LRU queue.
1244 */
1245 CLR(bp->b_flags, B_VFLUSH);
1246 SET(bp->b_flags, B_AGE);
1247 simple_unlock(&bp->b_interlock);
1248 goto start;
1249 }
1250
1251 /* Buffer is no longer on free lists. */
1252 SET(bp->b_flags, B_BUSY);
1253
1254 /*
1255 * If buffer was a delayed write, start it and return NULL
1256 * (since we might sleep while starting the write).
1257 */
1258 if (ISSET(bp->b_flags, B_DELWRI)) {
1259 /*
1260 * This buffer has gone through the LRU, so make sure it gets
1261 * reused ASAP.
1262 */
1263 SET(bp->b_flags, B_AGE);
1264 simple_unlock(&bp->b_interlock);
1265 simple_unlock(&bqueue_slock);
1266 bawrite(bp);
1267 simple_lock(&bqueue_slock);
1268 return (NULL);
1269 }
1270
1271 /* disassociate us from our vnode, if we had one... */
1272 if (bp->b_vp)
1273 brelvp(bp);
1274
1275 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1276 (*bioops.io_deallocate)(bp);
1277
1278 /* clear out various other fields */
1279 bp->b_flags = B_BUSY;
1280 bp->b_dev = NODEV;
1281 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1282 bp->b_iodone = 0;
1283 bp->b_error = 0;
1284 bp->b_resid = 0;
1285 bp->b_bcount = 0;
1286
1287 bremhash(bp);
1288 return (bp);
1289 }
1290
1291 /*
1292 * Attempt to free an aged buffer off the queues.
1293 * Called at splbio and with queue lock held.
1294 * Returns the amount of buffer memory freed.
1295 */
1296 static int
1297 buf_trim(void)
1298 {
1299 struct buf *bp;
1300 long size = 0;
1301
1302 /* Instruct getnewbuf() to get buffers off the queues */
1303 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1304 return 0;
1305
1306 KASSERT(!ISSET(bp->b_flags, B_WANTED));
1307 simple_unlock(&bp->b_interlock);
1308 size = bp->b_bufsize;
1309 bufmem -= size;
1310 simple_unlock(&bqueue_slock);
1311 if (size > 0) {
1312 buf_mrelease(bp->b_data, size);
1313 bp->b_bcount = bp->b_bufsize = 0;
1314 }
1315 /* brelse() will return the buffer to the global buffer pool */
1316 brelse(bp);
1317 simple_lock(&bqueue_slock);
1318 return size;
1319 }
1320
1321 int
1322 buf_drain(int n)
1323 {
1324 int s, size = 0, sz;
1325
1326 s = splbio();
1327 simple_lock(&bqueue_slock);
1328
1329 while (size < n && bufmem > bufmem_lowater) {
1330 sz = buf_trim();
1331 if (sz <= 0)
1332 break;
1333 size += sz;
1334 }
1335
1336 simple_unlock(&bqueue_slock);
1337 splx(s);
1338 return size;
1339 }
1340
1341 /*
1342 * Wait for operations on the buffer to complete.
1343 * When they do, extract and return the I/O's error value.
1344 */
1345 int
1346 biowait(struct buf *bp)
1347 {
1348 int s, error;
1349
1350 s = splbio();
1351 simple_lock(&bp->b_interlock);
1352 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1353 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1354
1355 /* check errors. */
1356 if (ISSET(bp->b_flags, B_ERROR))
1357 error = bp->b_error ? bp->b_error : EIO;
1358 else
1359 error = 0;
1360
1361 simple_unlock(&bp->b_interlock);
1362 splx(s);
1363 return (error);
1364 }
1365
1366 /*
1367 * Mark I/O complete on a buffer.
1368 *
1369 * If a callback has been requested, e.g. the pageout
1370 * daemon, do so. Otherwise, awaken waiting processes.
1371 *
1372 * [ Leffler, et al., says on p.247:
1373 * "This routine wakes up the blocked process, frees the buffer
1374 * for an asynchronous write, or, for a request by the pagedaemon
1375 * process, invokes a procedure specified in the buffer structure" ]
1376 *
1377 * In real life, the pagedaemon (or other system processes) wants
1378 * to do async stuff to, and doesn't want the buffer brelse()'d.
1379 * (for swap pager, that puts swap buffers on the free lists (!!!),
1380 * for the vn device, that puts malloc'd buffers on the free lists!)
1381 */
1382 void
1383 biodone(struct buf *bp)
1384 {
1385 int s = splbio();
1386
1387 simple_lock(&bp->b_interlock);
1388 if (ISSET(bp->b_flags, B_DONE))
1389 panic("biodone already");
1390 SET(bp->b_flags, B_DONE); /* note that it's done */
1391 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1392
1393 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1394 (*bioops.io_complete)(bp);
1395
1396 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
1397 vwakeup(bp);
1398
1399 /*
1400 * If necessary, call out. Unlock the buffer before calling
1401 * iodone() as the buffer isn't valid any more when it return.
1402 */
1403 if (ISSET(bp->b_flags, B_CALL)) {
1404 CLR(bp->b_flags, B_CALL); /* but note callout done */
1405 simple_unlock(&bp->b_interlock);
1406 (*bp->b_iodone)(bp);
1407 } else {
1408 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */
1409 simple_unlock(&bp->b_interlock);
1410 brelse(bp);
1411 } else { /* or just wakeup the buffer */
1412 CLR(bp->b_flags, B_WANTED);
1413 wakeup(bp);
1414 simple_unlock(&bp->b_interlock);
1415 }
1416 }
1417
1418 splx(s);
1419 }
1420
1421 /*
1422 * Return a count of buffers on the "locked" queue.
1423 */
1424 int
1425 count_lock_queue(void)
1426 {
1427 struct buf *bp;
1428 int n = 0;
1429
1430 simple_lock(&bqueue_slock);
1431 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist)
1432 n++;
1433 simple_unlock(&bqueue_slock);
1434 return (n);
1435 }
1436
1437 /*
1438 * Wait for all buffers to complete I/O
1439 * Return the number of "stuck" buffers.
1440 */
1441 int
1442 buf_syncwait(void)
1443 {
1444 struct buf *bp;
1445 int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1446
1447 dcount = 10000;
1448 for (iter = 0; iter < 20;) {
1449 s = splbio();
1450 simple_lock(&bqueue_slock);
1451 nbusy = 0;
1452 for (ihash = 0; ihash < bufhash+1; ihash++) {
1453 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1454 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1455 nbusy++;
1456 /*
1457 * With soft updates, some buffers that are
1458 * written will be remarked as dirty until other
1459 * buffers are written.
1460 */
1461 if (bp->b_vp && bp->b_vp->v_mount
1462 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1463 && (bp->b_flags & B_DELWRI)) {
1464 simple_lock(&bp->b_interlock);
1465 bremfree(bp);
1466 bp->b_flags |= B_BUSY;
1467 nbusy++;
1468 simple_unlock(&bp->b_interlock);
1469 simple_unlock(&bqueue_slock);
1470 bawrite(bp);
1471 if (dcount-- <= 0) {
1472 printf("softdep ");
1473 splx(s);
1474 goto fail;
1475 }
1476 simple_lock(&bqueue_slock);
1477 }
1478 }
1479 }
1480
1481 simple_unlock(&bqueue_slock);
1482 splx(s);
1483
1484 if (nbusy == 0)
1485 break;
1486 if (nbusy_prev == 0)
1487 nbusy_prev = nbusy;
1488 printf("%d ", nbusy);
1489 tsleep(&nbusy, PRIBIO, "bflush",
1490 (iter == 0) ? 1 : hz / 25 * iter);
1491 if (nbusy >= nbusy_prev) /* we didn't flush anything */
1492 iter++;
1493 else
1494 nbusy_prev = nbusy;
1495 }
1496
1497 if (nbusy) {
1498 fail:;
1499 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1500 printf("giving up\nPrinting vnodes for busy buffers\n");
1501 s = splbio();
1502 for (ihash = 0; ihash < bufhash+1; ihash++) {
1503 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1504 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1505 vprint(NULL, bp->b_vp);
1506 }
1507 }
1508 splx(s);
1509 #endif
1510 }
1511
1512 return nbusy;
1513 }
1514
1515 static void
1516 sysctl_fillbuf(struct buf *i, struct buf_sysctl *o)
1517 {
1518
1519 o->b_flags = i->b_flags;
1520 o->b_error = i->b_error;
1521 o->b_prio = i->b_prio;
1522 o->b_dev = i->b_dev;
1523 o->b_bufsize = i->b_bufsize;
1524 o->b_bcount = i->b_bcount;
1525 o->b_resid = i->b_resid;
1526 o->b_addr = PTRTOUINT64(i->b_un.b_addr);
1527 o->b_blkno = i->b_blkno;
1528 o->b_rawblkno = i->b_rawblkno;
1529 o->b_iodone = PTRTOUINT64(i->b_iodone);
1530 o->b_proc = PTRTOUINT64(i->b_proc);
1531 o->b_vp = PTRTOUINT64(i->b_vp);
1532 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr);
1533 o->b_lblkno = i->b_lblkno;
1534 }
1535
1536 #define KERN_BUFSLOP 20
1537 static int
1538 sysctl_dobuf(SYSCTLFN_ARGS)
1539 {
1540 struct buf *bp;
1541 struct buf_sysctl bs;
1542 char *dp;
1543 u_int i, op, arg;
1544 size_t len, needed, elem_size, out_size;
1545 int error, s, elem_count;
1546
1547 if (namelen == 1 && name[0] == CTL_QUERY)
1548 return (sysctl_query(SYSCTLFN_CALL(rnode)));
1549
1550 if (namelen != 4)
1551 return (EINVAL);
1552
1553 dp = oldp;
1554 len = (oldp != NULL) ? *oldlenp : 0;
1555 op = name[0];
1556 arg = name[1];
1557 elem_size = name[2];
1558 elem_count = name[3];
1559 out_size = MIN(sizeof(bs), elem_size);
1560
1561 /*
1562 * at the moment, these are just "placeholders" to make the
1563 * API for retrieving kern.buf data more extensible in the
1564 * future.
1565 *
1566 * XXX kern.buf currently has "netbsd32" issues. hopefully
1567 * these will be resolved at a later point.
1568 */
1569 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL ||
1570 elem_size < 1 || elem_count < 0)
1571 return (EINVAL);
1572
1573 error = 0;
1574 needed = 0;
1575 s = splbio();
1576 simple_lock(&bqueue_slock);
1577 for (i = 0; i < BQUEUES; i++) {
1578 TAILQ_FOREACH(bp, &bufqueues[i].bq_queue, b_freelist) {
1579 if (len >= elem_size && elem_count > 0) {
1580 sysctl_fillbuf(bp, &bs);
1581 error = copyout(&bs, dp, out_size);
1582 if (error)
1583 goto cleanup;
1584 dp += elem_size;
1585 len -= elem_size;
1586 }
1587 if (elem_count > 0) {
1588 needed += elem_size;
1589 if (elem_count != INT_MAX)
1590 elem_count--;
1591 }
1592 }
1593 }
1594 cleanup:
1595 simple_unlock(&bqueue_slock);
1596 splx(s);
1597
1598 *oldlenp = needed;
1599 if (oldp == NULL)
1600 *oldlenp += KERN_BUFSLOP * sizeof(struct buf);
1601
1602 return (error);
1603 }
1604
1605 static int
1606 sysctl_bufvm_update(SYSCTLFN_ARGS)
1607 {
1608 int t, error;
1609 struct sysctlnode node;
1610
1611 node = *rnode;
1612 node.sysctl_data = &t;
1613 t = *(int *)rnode->sysctl_data;
1614 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1615 if (error || newp == NULL)
1616 return (error);
1617
1618 if (t < 0)
1619 return EINVAL;
1620 if (rnode->sysctl_data == &bufcache) {
1621 if (t > 100)
1622 return (EINVAL);
1623 bufcache = t;
1624 buf_setwm();
1625 } else if (rnode->sysctl_data == &bufmem_lowater) {
1626 if (bufmem_hiwater - t < 16)
1627 return (EINVAL);
1628 bufmem_lowater = t;
1629 } else if (rnode->sysctl_data == &bufmem_hiwater) {
1630 if (t - bufmem_lowater < 16)
1631 return (EINVAL);
1632 bufmem_hiwater = t;
1633 } else
1634 return (EINVAL);
1635
1636 /* Drain until below new high water mark */
1637 while ((t = bufmem - bufmem_hiwater) >= 0) {
1638 if (buf_drain(t / (2 * 1024)) <= 0)
1639 break;
1640 }
1641
1642 return 0;
1643 }
1644
1645 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1646 {
1647
1648 sysctl_createv(clog, 0, NULL, NULL,
1649 CTLFLAG_PERMANENT,
1650 CTLTYPE_NODE, "kern", NULL,
1651 NULL, 0, NULL, 0,
1652 CTL_KERN, CTL_EOL);
1653 sysctl_createv(clog, 0, NULL, NULL,
1654 CTLFLAG_PERMANENT,
1655 CTLTYPE_NODE, "buf",
1656 SYSCTL_DESCR("Kernel buffer cache information"),
1657 sysctl_dobuf, 0, NULL, 0,
1658 CTL_KERN, KERN_BUF, CTL_EOL);
1659 }
1660
1661 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
1662 {
1663
1664 sysctl_createv(clog, 0, NULL, NULL,
1665 CTLFLAG_PERMANENT,
1666 CTLTYPE_NODE, "vm", NULL,
1667 NULL, 0, NULL, 0,
1668 CTL_VM, CTL_EOL);
1669
1670 sysctl_createv(clog, 0, NULL, NULL,
1671 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1672 CTLTYPE_INT, "bufcache",
1673 SYSCTL_DESCR("Percentage of physical memory to use for "
1674 "buffer cache"),
1675 sysctl_bufvm_update, 0, &bufcache, 0,
1676 CTL_VM, CTL_CREATE, CTL_EOL);
1677 sysctl_createv(clog, 0, NULL, NULL,
1678 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1679 CTLTYPE_INT, "bufmem",
1680 SYSCTL_DESCR("Amount of kernel memory used by buffer "
1681 "cache"),
1682 NULL, 0, &bufmem, 0,
1683 CTL_VM, CTL_CREATE, CTL_EOL);
1684 sysctl_createv(clog, 0, NULL, NULL,
1685 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1686 CTLTYPE_INT, "bufmem_lowater",
1687 SYSCTL_DESCR("Minimum amount of kernel memory to "
1688 "reserve for buffer cache"),
1689 sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1690 CTL_VM, CTL_CREATE, CTL_EOL);
1691 sysctl_createv(clog, 0, NULL, NULL,
1692 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1693 CTLTYPE_INT, "bufmem_hiwater",
1694 SYSCTL_DESCR("Maximum amount of kernel memory to use "
1695 "for buffer cache"),
1696 sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1697 CTL_VM, CTL_CREATE, CTL_EOL);
1698 }
1699
1700 #ifdef DEBUG
1701 /*
1702 * Print out statistics on the current allocation of the buffer pool.
1703 * Can be enabled to print out on every ``sync'' by setting "syncprt"
1704 * in vfs_syscalls.c using sysctl.
1705 */
1706 void
1707 vfs_bufstats(void)
1708 {
1709 int s, i, j, count;
1710 struct buf *bp;
1711 struct bqueue *dp;
1712 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1713 static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1714
1715 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1716 count = 0;
1717 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1718 counts[j] = 0;
1719 s = splbio();
1720 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) {
1721 counts[bp->b_bufsize/PAGE_SIZE]++;
1722 count++;
1723 }
1724 splx(s);
1725 printf("%s: total-%d", bname[i], count);
1726 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1727 if (counts[j] != 0)
1728 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1729 printf("\n");
1730 }
1731 }
1732 #endif /* DEBUG */
1733
1734 /* ------------------------------ */
1735
1736 static POOL_INIT(bufiopool, sizeof(struct buf), 0, 0, 0, "biopl", NULL);
1737
1738 static struct buf *
1739 getiobuf1(int prflags)
1740 {
1741 struct buf *bp;
1742 int s;
1743
1744 s = splbio();
1745 bp = pool_get(&bufiopool, prflags);
1746 splx(s);
1747 if (bp != NULL) {
1748 BUF_INIT(bp);
1749 }
1750 return bp;
1751 }
1752
1753 struct buf *
1754 getiobuf(void)
1755 {
1756
1757 return getiobuf1(PR_WAITOK);
1758 }
1759
1760 struct buf *
1761 getiobuf_nowait(void)
1762 {
1763
1764 return getiobuf1(PR_NOWAIT);
1765 }
1766
1767 void
1768 putiobuf(struct buf *bp)
1769 {
1770 int s;
1771
1772 s = splbio();
1773 pool_put(&bufiopool, bp);
1774 splx(s);
1775 }
1776
1777 /*
1778 * nestiobuf_iodone: b_iodone callback for nested buffers.
1779 */
1780
1781 static void
1782 nestiobuf_iodone(struct buf *bp)
1783 {
1784 struct buf *mbp = bp->b_private;
1785 int error;
1786 int donebytes;
1787
1788 KASSERT(bp->b_bcount <= bp->b_bufsize);
1789 KASSERT(mbp != bp);
1790
1791 error = 0;
1792 if ((bp->b_flags & B_ERROR) != 0) {
1793 error = EIO;
1794 /* check if an error code was returned */
1795 if (bp->b_error)
1796 error = bp->b_error;
1797 } else if ((bp->b_bcount < bp->b_bufsize) || (bp->b_resid > 0)) {
1798 /*
1799 * Not all got transfered, raise an error. We have no way to
1800 * propagate these conditions to mbp.
1801 */
1802 error = EIO;
1803 }
1804
1805 donebytes = bp->b_bufsize;
1806
1807 putiobuf(bp);
1808 nestiobuf_done(mbp, donebytes, error);
1809 }
1810
1811 /*
1812 * nestiobuf_setup: setup a "nested" buffer.
1813 *
1814 * => 'mbp' is a "master" buffer which is being divided into sub pieces.
1815 * => 'bp' should be a buffer allocated by getiobuf or getiobuf_nowait.
1816 * => 'offset' is a byte offset in the master buffer.
1817 * => 'size' is a size in bytes of this nested buffer.
1818 */
1819
1820 void
1821 nestiobuf_setup(struct buf *mbp, struct buf *bp, int offset, size_t size)
1822 {
1823 const int b_read = mbp->b_flags & B_READ;
1824 struct vnode *vp = mbp->b_vp;
1825
1826 KASSERT(mbp->b_bcount >= offset + size);
1827 bp->b_vp = vp;
1828 bp->b_flags = B_BUSY | B_CALL | B_ASYNC | b_read;
1829 bp->b_iodone = nestiobuf_iodone;
1830 bp->b_data = mbp->b_data + offset;
1831 bp->b_resid = bp->b_bcount = size;
1832 bp->b_bufsize = bp->b_bcount;
1833 bp->b_private = mbp;
1834 BIO_COPYPRIO(bp, mbp);
1835 if (!b_read && vp != NULL) {
1836 int s;
1837
1838 s = splbio();
1839 V_INCR_NUMOUTPUT(vp);
1840 splx(s);
1841 }
1842 }
1843
1844 /*
1845 * nestiobuf_done: propagate completion to the master buffer.
1846 *
1847 * => 'donebytes' specifies how many bytes in the 'mbp' is completed.
1848 * => 'error' is an errno(2) that 'donebytes' has been completed with.
1849 */
1850
1851 void
1852 nestiobuf_done(struct buf *mbp, int donebytes, int error)
1853 {
1854 int s;
1855
1856 if (donebytes == 0) {
1857 return;
1858 }
1859 s = splbio();
1860 KASSERT(mbp->b_resid >= donebytes);
1861 if (error) {
1862 mbp->b_flags |= B_ERROR;
1863 mbp->b_error = error;
1864 }
1865 mbp->b_resid -= donebytes;
1866 if (mbp->b_resid == 0) {
1867 if ((mbp->b_flags & B_ERROR) != 0) {
1868 mbp->b_resid = mbp->b_bcount; /* be conservative */
1869 }
1870 biodone(mbp);
1871 }
1872 splx(s);
1873 }
1874