vfs_bio.c revision 1.99 1 /* $NetBSD: vfs_bio.c,v 1.99 2003/12/02 04:18:19 dbj Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
37 */
38
39 /*-
40 * Copyright (c) 1994 Christopher G. Demetriou
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
71 */
72
73 /*
74 * Some references:
75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76 * Leffler, et al.: The Design and Implementation of the 4.3BSD
77 * UNIX Operating System (Addison Welley, 1989)
78 */
79
80 #include "opt_softdep.h"
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.99 2003/12/02 04:18:19 dbj Exp $");
84
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/proc.h>
88 #include <sys/buf.h>
89 #include <sys/vnode.h>
90 #include <sys/mount.h>
91 #include <sys/malloc.h>
92 #include <sys/resourcevar.h>
93 #include <sys/conf.h>
94
95 #include <uvm/uvm.h>
96
97 #include <miscfs/specfs/specdev.h>
98
99 /* Macros to clear/set/test flags. */
100 #define SET(t, f) (t) |= (f)
101 #define CLR(t, f) (t) &= ~(f)
102 #define ISSET(t, f) ((t) & (f))
103
104 /*
105 * Definitions for the buffer hash lists.
106 */
107 #define BUFHASH(dvp, lbn) \
108 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
109 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
110 u_long bufhash;
111 #ifndef SOFTDEP
112 struct bio_ops bioops; /* I/O operation notification */
113 #endif
114
115 /*
116 * Insq/Remq for the buffer hash lists.
117 */
118 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
119 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
120
121 /*
122 * Definitions for the buffer free lists.
123 */
124 #define BQUEUES 4 /* number of free buffer queues */
125
126 #define BQ_LOCKED 0 /* super-blocks &c */
127 #define BQ_LRU 1 /* lru, useful buffers */
128 #define BQ_AGE 2 /* rubbish */
129 #define BQ_EMPTY 3 /* buffer headers with no memory */
130
131 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
132 int needbuffer;
133
134 /*
135 * Buffer queue lock.
136 * Take this lock first if also taking some buffer's b_interlock.
137 */
138 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
139
140 /*
141 * Buffer pool for I/O buffers.
142 */
143 struct pool bufpool;
144
145 /*
146 * bread()/breadn() helper.
147 */
148 static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
149 struct ucred *, int);
150 int count_lock_queue(void);
151
152 /*
153 * Insq/Remq for the buffer free lists.
154 * Call with buffer queue locked.
155 */
156 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
157 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
158
159 #ifdef DEBUG
160 int debug_verify_freelist = 0;
161 int checkfreelist(struct buf *, struct bqueues *);
162 int
163 checkfreelist(struct buf *bp, struct bqueues *dp)
164 {
165 struct buf *b;
166 TAILQ_FOREACH(b, dp, b_freelist) {
167 if (b == bp)
168 return 1;
169 }
170 return 0;
171 }
172 #endif
173
174 void
175 bremfree(bp)
176 struct buf *bp;
177 {
178 struct bqueues *dp = NULL;
179
180 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
181
182 KDASSERT(!debug_verify_freelist ||
183 checkfreelist(bp, &bufqueues[BQ_AGE]) ||
184 checkfreelist(bp, &bufqueues[BQ_LRU]) ||
185 checkfreelist(bp, &bufqueues[BQ_LOCKED]) ||
186 checkfreelist(bp, &bufqueues[BQ_EMPTY]));
187
188 /*
189 * We only calculate the head of the freelist when removing
190 * the last element of the list as that is the only time that
191 * it is needed (e.g. to reset the tail pointer).
192 *
193 * NB: This makes an assumption about how tailq's are implemented.
194 *
195 * We break the TAILQ abstraction in order to efficiently remove a
196 * buffer from its freelist without having to know exactly which
197 * freelist it is on.
198 */
199 if (TAILQ_NEXT(bp, b_freelist) == NULL) {
200 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
201 if (dp->tqh_last == &bp->b_freelist.tqe_next)
202 break;
203 if (dp == &bufqueues[BQUEUES])
204 panic("bremfree: lost tail");
205 }
206 TAILQ_REMOVE(dp, bp, b_freelist);
207 }
208
209 /*
210 * Initialize buffers and hash links for buffers.
211 */
212 void
213 bufinit()
214 {
215 struct buf *bp;
216 struct bqueues *dp;
217 u_int i, base, residual;
218
219 /*
220 * Initialize the buffer pool. This pool is used for buffers
221 * which are strictly I/O control blocks, not buffer cache
222 * buffers.
223 */
224 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
225
226 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
227 TAILQ_INIT(dp);
228 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
229 base = bufpages / nbuf;
230 residual = bufpages % nbuf;
231 for (i = 0; i < nbuf; i++) {
232 bp = &buf[i];
233 memset((char *)bp, 0, sizeof(*bp));
234 BUF_INIT(bp);
235 bp->b_dev = NODEV;
236 bp->b_vnbufs.le_next = NOLIST;
237 bp->b_data = buffers + i * MAXBSIZE;
238 if (i < residual)
239 bp->b_bufsize = (base + 1) * PAGE_SIZE;
240 else
241 bp->b_bufsize = base * PAGE_SIZE;
242 bp->b_flags = B_INVAL;
243 dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
244 binsheadfree(bp, dp);
245 binshash(bp, &invalhash);
246 }
247 }
248
249 static __inline struct buf *
250 bio_doread(vp, blkno, size, cred, async)
251 struct vnode *vp;
252 daddr_t blkno;
253 int size;
254 struct ucred *cred;
255 int async;
256 {
257 struct buf *bp;
258 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
259 struct proc *p = l->l_proc;
260
261 bp = getblk(vp, blkno, size, 0, 0);
262
263 #ifdef DIAGNOSTIC
264 if (bp == NULL) {
265 panic("bio_doread: no such buf");
266 }
267 #endif
268
269 /*
270 * If buffer does not have data valid, start a read.
271 * Note that if buffer is B_INVAL, getblk() won't return it.
272 * Therefore, it's valid if its I/O has completed or been delayed.
273 */
274 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
275 /* Start I/O for the buffer. */
276 SET(bp->b_flags, B_READ | async);
277 VOP_STRATEGY(bp);
278
279 /* Pay for the read. */
280 p->p_stats->p_ru.ru_inblock++;
281 } else if (async) {
282 brelse(bp);
283 }
284
285 return (bp);
286 }
287
288 /*
289 * Read a disk block.
290 * This algorithm described in Bach (p.54).
291 */
292 int
293 bread(vp, blkno, size, cred, bpp)
294 struct vnode *vp;
295 daddr_t blkno;
296 int size;
297 struct ucred *cred;
298 struct buf **bpp;
299 {
300 struct buf *bp;
301
302 /* Get buffer for block. */
303 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
304
305 /* Wait for the read to complete, and return result. */
306 return (biowait(bp));
307 }
308
309 /*
310 * Read-ahead multiple disk blocks. The first is sync, the rest async.
311 * Trivial modification to the breada algorithm presented in Bach (p.55).
312 */
313 int
314 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
315 struct vnode *vp;
316 daddr_t blkno; int size;
317 daddr_t rablks[]; int rasizes[];
318 int nrablks;
319 struct ucred *cred;
320 struct buf **bpp;
321 {
322 struct buf *bp;
323 int i;
324
325 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
326
327 /*
328 * For each of the read-ahead blocks, start a read, if necessary.
329 */
330 for (i = 0; i < nrablks; i++) {
331 /* If it's in the cache, just go on to next one. */
332 if (incore(vp, rablks[i]))
333 continue;
334
335 /* Get a buffer for the read-ahead block */
336 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
337 }
338
339 /* Otherwise, we had to start a read for it; wait until it's valid. */
340 return (biowait(bp));
341 }
342
343 /*
344 * Read with single-block read-ahead. Defined in Bach (p.55), but
345 * implemented as a call to breadn().
346 * XXX for compatibility with old file systems.
347 */
348 int
349 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
350 struct vnode *vp;
351 daddr_t blkno; int size;
352 daddr_t rablkno; int rabsize;
353 struct ucred *cred;
354 struct buf **bpp;
355 {
356
357 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
358 }
359
360 /*
361 * Block write. Described in Bach (p.56)
362 */
363 int
364 bwrite(bp)
365 struct buf *bp;
366 {
367 int rv, sync, wasdelayed, s;
368 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
369 struct proc *p = l->l_proc;
370 struct vnode *vp;
371 struct mount *mp;
372
373 KASSERT(ISSET(bp->b_flags, B_BUSY));
374
375 vp = bp->b_vp;
376 if (vp != NULL) {
377 if (vp->v_type == VBLK)
378 mp = vp->v_specmountpoint;
379 else
380 mp = vp->v_mount;
381 } else {
382 mp = NULL;
383 }
384
385 /*
386 * Remember buffer type, to switch on it later. If the write was
387 * synchronous, but the file system was mounted with MNT_ASYNC,
388 * convert it to a delayed write.
389 * XXX note that this relies on delayed tape writes being converted
390 * to async, not sync writes (which is safe, but ugly).
391 */
392 sync = !ISSET(bp->b_flags, B_ASYNC);
393 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
394 bdwrite(bp);
395 return (0);
396 }
397
398 /*
399 * Collect statistics on synchronous and asynchronous writes.
400 * Writes to block devices are charged to their associated
401 * filesystem (if any).
402 */
403 if (mp != NULL) {
404 if (sync)
405 mp->mnt_stat.f_syncwrites++;
406 else
407 mp->mnt_stat.f_asyncwrites++;
408 }
409
410 s = splbio();
411 simple_lock(&bp->b_interlock);
412
413 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
414
415 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
416
417 /*
418 * Pay for the I/O operation and make sure the buf is on the correct
419 * vnode queue.
420 */
421 if (wasdelayed)
422 reassignbuf(bp, bp->b_vp);
423 else
424 p->p_stats->p_ru.ru_oublock++;
425
426 /* Initiate disk write. Make sure the appropriate party is charged. */
427 V_INCR_NUMOUTPUT(bp->b_vp);
428 simple_unlock(&bp->b_interlock);
429 splx(s);
430
431 VOP_STRATEGY(bp);
432
433 if (sync) {
434 /* If I/O was synchronous, wait for it to complete. */
435 rv = biowait(bp);
436
437 /* Release the buffer. */
438 brelse(bp);
439
440 return (rv);
441 } else {
442 return (0);
443 }
444 }
445
446 int
447 vn_bwrite(v)
448 void *v;
449 {
450 struct vop_bwrite_args *ap = v;
451
452 return (bwrite(ap->a_bp));
453 }
454
455 /*
456 * Delayed write.
457 *
458 * The buffer is marked dirty, but is not queued for I/O.
459 * This routine should be used when the buffer is expected
460 * to be modified again soon, typically a small write that
461 * partially fills a buffer.
462 *
463 * NB: magnetic tapes cannot be delayed; they must be
464 * written in the order that the writes are requested.
465 *
466 * Described in Leffler, et al. (pp. 208-213).
467 */
468 void
469 bdwrite(bp)
470 struct buf *bp;
471 {
472 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
473 struct proc *p = l->l_proc;
474 const struct bdevsw *bdev;
475 int s;
476
477 /* If this is a tape block, write the block now. */
478 bdev = bdevsw_lookup(bp->b_dev);
479 if (bdev != NULL && bdev->d_type == D_TAPE) {
480 bawrite(bp);
481 return;
482 }
483
484 /*
485 * If the block hasn't been seen before:
486 * (1) Mark it as having been seen,
487 * (2) Charge for the write,
488 * (3) Make sure it's on its vnode's correct block list.
489 */
490 s = splbio();
491 simple_lock(&bp->b_interlock);
492
493 KASSERT(ISSET(bp->b_flags, B_BUSY));
494
495 if (!ISSET(bp->b_flags, B_DELWRI)) {
496 SET(bp->b_flags, B_DELWRI);
497 p->p_stats->p_ru.ru_oublock++;
498 reassignbuf(bp, bp->b_vp);
499 }
500
501 /* Otherwise, the "write" is done, so mark and release the buffer. */
502 CLR(bp->b_flags, B_DONE);
503 simple_unlock(&bp->b_interlock);
504 splx(s);
505
506 brelse(bp);
507 }
508
509 /*
510 * Asynchronous block write; just an asynchronous bwrite().
511 */
512 void
513 bawrite(bp)
514 struct buf *bp;
515 {
516 int s;
517
518 s = splbio();
519 simple_lock(&bp->b_interlock);
520
521 KASSERT(ISSET(bp->b_flags, B_BUSY));
522
523 SET(bp->b_flags, B_ASYNC);
524 simple_unlock(&bp->b_interlock);
525 splx(s);
526 VOP_BWRITE(bp);
527 }
528
529 /*
530 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
531 * Call at splbio() and with the buffer interlock locked.
532 * Note: called only from biodone() through ffs softdep's bioops.io_complete()
533 */
534 void
535 bdirty(bp)
536 struct buf *bp;
537 {
538 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
539 struct proc *p = l->l_proc;
540
541 LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
542 KASSERT(ISSET(bp->b_flags, B_BUSY));
543
544 CLR(bp->b_flags, B_AGE);
545
546 if (!ISSET(bp->b_flags, B_DELWRI)) {
547 SET(bp->b_flags, B_DELWRI);
548 p->p_stats->p_ru.ru_oublock++;
549 reassignbuf(bp, bp->b_vp);
550 }
551 }
552
553 /*
554 * Release a buffer on to the free lists.
555 * Described in Bach (p. 46).
556 */
557 void
558 brelse(bp)
559 struct buf *bp;
560 {
561 struct bqueues *bufq;
562 int s;
563
564 /* Block disk interrupts. */
565 s = splbio();
566 simple_lock(&bqueue_slock);
567 simple_lock(&bp->b_interlock);
568
569 KASSERT(ISSET(bp->b_flags, B_BUSY));
570 KASSERT(!ISSET(bp->b_flags, B_CALL));
571
572 /* Wake up any processes waiting for any buffer to become free. */
573 if (needbuffer) {
574 needbuffer = 0;
575 wakeup(&needbuffer);
576 }
577
578 /* Wake up any proceeses waiting for _this_ buffer to become free. */
579 if (ISSET(bp->b_flags, B_WANTED)) {
580 CLR(bp->b_flags, B_WANTED|B_AGE);
581 wakeup(bp);
582 }
583
584 /*
585 * Determine which queue the buffer should be on, then put it there.
586 */
587
588 /* If it's locked, don't report an error; try again later. */
589 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
590 CLR(bp->b_flags, B_ERROR);
591
592 /* If it's not cacheable, or an error, mark it invalid. */
593 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
594 SET(bp->b_flags, B_INVAL);
595
596 if (ISSET(bp->b_flags, B_VFLUSH)) {
597 /*
598 * This is a delayed write buffer that was just flushed to
599 * disk. It is still on the LRU queue. If it's become
600 * invalid, then we need to move it to a different queue;
601 * otherwise leave it in its current position.
602 */
603 CLR(bp->b_flags, B_VFLUSH);
604 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
605 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
606 goto already_queued;
607 } else {
608 bremfree(bp);
609 }
610 }
611
612 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
613 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
614 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_EMPTY]));
615 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
616
617 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
618 /*
619 * If it's invalid or empty, dissociate it from its vnode
620 * and put on the head of the appropriate queue.
621 */
622 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
623 (*bioops.io_deallocate)(bp);
624 CLR(bp->b_flags, B_DONE|B_DELWRI);
625 if (bp->b_vp) {
626 reassignbuf(bp, bp->b_vp);
627 brelvp(bp);
628 }
629 if (bp->b_bufsize <= 0)
630 /* no data */
631 bufq = &bufqueues[BQ_EMPTY];
632 else
633 /* invalid data */
634 bufq = &bufqueues[BQ_AGE];
635 binsheadfree(bp, bufq);
636 } else {
637 /*
638 * It has valid data. Put it on the end of the appropriate
639 * queue, so that it'll stick around for as long as possible.
640 * If buf is AGE, but has dependencies, must put it on last
641 * bufqueue to be scanned, ie LRU. This protects against the
642 * livelock where BQ_AGE only has buffers with dependencies,
643 * and we thus never get to the dependent buffers in BQ_LRU.
644 */
645 if (ISSET(bp->b_flags, B_LOCKED))
646 /* locked in core */
647 bufq = &bufqueues[BQ_LOCKED];
648 else if (!ISSET(bp->b_flags, B_AGE))
649 /* valid data */
650 bufq = &bufqueues[BQ_LRU];
651 else {
652 /* stale but valid data */
653 int has_deps;
654
655 if (LIST_FIRST(&bp->b_dep) != NULL &&
656 bioops.io_countdeps)
657 has_deps = (*bioops.io_countdeps)(bp, 0);
658 else
659 has_deps = 0;
660 bufq = has_deps ? &bufqueues[BQ_LRU] :
661 &bufqueues[BQ_AGE];
662 }
663 binstailfree(bp, bufq);
664 }
665
666 already_queued:
667 /* Unlock the buffer. */
668 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
669 SET(bp->b_flags, B_CACHE);
670
671 /* Allow disk interrupts. */
672 simple_unlock(&bp->b_interlock);
673 simple_unlock(&bqueue_slock);
674 splx(s);
675 }
676
677 /*
678 * Determine if a block is in the cache.
679 * Just look on what would be its hash chain. If it's there, return
680 * a pointer to it, unless it's marked invalid. If it's marked invalid,
681 * we normally don't return the buffer, unless the caller explicitly
682 * wants us to.
683 */
684 struct buf *
685 incore(vp, blkno)
686 struct vnode *vp;
687 daddr_t blkno;
688 {
689 struct buf *bp;
690
691 /* Search hash chain */
692 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
693 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
694 !ISSET(bp->b_flags, B_INVAL))
695 return (bp);
696 }
697
698 return (NULL);
699 }
700
701 /*
702 * Get a block of requested size that is associated with
703 * a given vnode and block offset. If it is found in the
704 * block cache, mark it as having been found, make it busy
705 * and return it. Otherwise, return an empty block of the
706 * correct size. It is up to the caller to insure that the
707 * cached blocks be of the correct size.
708 */
709 struct buf *
710 getblk(vp, blkno, size, slpflag, slptimeo)
711 struct vnode *vp;
712 daddr_t blkno;
713 int size, slpflag, slptimeo;
714 {
715 struct buf *bp;
716 int s, err;
717
718 start:
719 s = splbio();
720 simple_lock(&bqueue_slock);
721 bp = incore(vp, blkno);
722 if (bp != NULL) {
723 simple_lock(&bp->b_interlock);
724 if (ISSET(bp->b_flags, B_BUSY)) {
725 simple_unlock(&bqueue_slock);
726 if (curproc == uvm.pagedaemon_proc) {
727 simple_unlock(&bp->b_interlock);
728 splx(s);
729 return NULL;
730 }
731 SET(bp->b_flags, B_WANTED);
732 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
733 "getblk", slptimeo, &bp->b_interlock);
734 splx(s);
735 if (err)
736 return (NULL);
737 goto start;
738 }
739 #ifdef DIAGNOSTIC
740 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
741 bp->b_bcount < size && vp->v_type != VBLK)
742 panic("getblk: block size invariant failed");
743 #endif
744 SET(bp->b_flags, B_BUSY);
745 bremfree(bp);
746 } else {
747 if ((bp = getnewbuf(slpflag, slptimeo)) == NULL) {
748 simple_unlock(&bqueue_slock);
749 splx(s);
750 goto start;
751 }
752
753 binshash(bp, BUFHASH(vp, blkno));
754 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
755 bgetvp(vp, bp);
756 }
757 simple_unlock(&bp->b_interlock);
758 simple_unlock(&bqueue_slock);
759 splx(s);
760 /*
761 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
762 * if we re-size buffers here.
763 */
764 if (ISSET(bp->b_flags, B_LOCKED)) {
765 KASSERT(bp->b_bufsize >= size);
766 } else {
767 allocbuf(bp, size);
768 }
769 return (bp);
770 }
771
772 /*
773 * Get an empty, disassociated buffer of given size.
774 */
775 struct buf *
776 geteblk(size)
777 int size;
778 {
779 struct buf *bp;
780 int s;
781
782 s = splbio();
783 simple_lock(&bqueue_slock);
784 while ((bp = getnewbuf(0, 0)) == 0)
785 ;
786
787 SET(bp->b_flags, B_INVAL);
788 binshash(bp, &invalhash);
789 simple_unlock(&bqueue_slock);
790 simple_unlock(&bp->b_interlock);
791 splx(s);
792 allocbuf(bp, size);
793 return (bp);
794 }
795
796 /*
797 * Expand or contract the actual memory allocated to a buffer.
798 *
799 * If the buffer shrinks, data is lost, so it's up to the
800 * caller to have written it out *first*; this routine will not
801 * start a write. If the buffer grows, it's the callers
802 * responsibility to fill out the buffer's additional contents.
803 */
804 void
805 allocbuf(bp, size)
806 struct buf *bp;
807 int size;
808 {
809 struct buf *nbp;
810 vsize_t desired_size;
811 int s;
812
813 desired_size = round_page((vsize_t)size);
814 if (desired_size > MAXBSIZE)
815 panic("allocbuf: buffer larger than MAXBSIZE requested");
816
817 if (bp->b_bufsize == desired_size)
818 goto out;
819
820 /*
821 * If the buffer is smaller than the desired size, we need to snarf
822 * it from other buffers. Get buffers (via getnewbuf()), and
823 * steal their pages.
824 */
825 while (bp->b_bufsize < desired_size) {
826 int amt;
827
828 /* find a buffer */
829 s = splbio();
830 simple_lock(&bqueue_slock);
831 while ((nbp = getnewbuf(0, 0)) == NULL)
832 ;
833
834 SET(nbp->b_flags, B_INVAL);
835 binshash(nbp, &invalhash);
836
837 simple_unlock(&nbp->b_interlock);
838 simple_unlock(&bqueue_slock);
839 splx(s);
840
841 /* and steal its pages, up to the amount we need */
842 amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
843 pagemove((nbp->b_data + nbp->b_bufsize - amt),
844 bp->b_data + bp->b_bufsize, amt);
845 bp->b_bufsize += amt;
846 nbp->b_bufsize -= amt;
847
848 /* reduce transfer count if we stole some data */
849 if (nbp->b_bcount > nbp->b_bufsize)
850 nbp->b_bcount = nbp->b_bufsize;
851
852 #ifdef DIAGNOSTIC
853 if (nbp->b_bufsize < 0)
854 panic("allocbuf: negative bufsize");
855 #endif
856 brelse(nbp);
857 }
858
859 /*
860 * If we want a buffer smaller than the current size,
861 * shrink this buffer. Grab a buf head from the EMPTY queue,
862 * move a page onto it, and put it on front of the AGE queue.
863 * If there are no free buffer headers, leave the buffer alone.
864 */
865 if (bp->b_bufsize > desired_size) {
866 s = splbio();
867 simple_lock(&bqueue_slock);
868 if ((nbp = TAILQ_FIRST(&bufqueues[BQ_EMPTY])) == NULL) {
869 /* No free buffer head */
870 simple_unlock(&bqueue_slock);
871 splx(s);
872 goto out;
873 }
874 /* No need to lock nbp since it came from the empty queue */
875 bremfree(nbp);
876 SET(nbp->b_flags, B_BUSY | B_INVAL);
877 simple_unlock(&bqueue_slock);
878 splx(s);
879
880 /* move the page to it and note this change */
881 pagemove(bp->b_data + desired_size,
882 nbp->b_data, bp->b_bufsize - desired_size);
883 nbp->b_bufsize = bp->b_bufsize - desired_size;
884 bp->b_bufsize = desired_size;
885 nbp->b_bcount = 0;
886
887 /* release the newly-filled buffer and leave */
888 brelse(nbp);
889 }
890
891 out:
892 bp->b_bcount = size;
893 }
894
895 /*
896 * Find a buffer which is available for use.
897 * Select something from a free list.
898 * Preference is to AGE list, then LRU list.
899 *
900 * Called with buffer queues locked.
901 * Return buffer locked.
902 */
903 struct buf *
904 getnewbuf(slpflag, slptimeo)
905 int slpflag, slptimeo;
906 {
907 struct buf *bp;
908
909 start:
910 LOCK_ASSERT(simple_lock_held(&bqueue_slock));
911
912 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
913 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
914 simple_lock(&bp->b_interlock);
915 bremfree(bp);
916 } else {
917 /* wait for a free buffer of any kind */
918 needbuffer = 1;
919 ltsleep(&needbuffer, slpflag|(PRIBIO+1),
920 "getnewbuf", slptimeo, &bqueue_slock);
921 return (NULL);
922 }
923
924 if (ISSET(bp->b_flags, B_VFLUSH)) {
925 /*
926 * This is a delayed write buffer being flushed to disk. Make
927 * sure it gets aged out of the queue when it's finished, and
928 * leave it off the LRU queue.
929 */
930 CLR(bp->b_flags, B_VFLUSH);
931 SET(bp->b_flags, B_AGE);
932 simple_unlock(&bp->b_interlock);
933 goto start;
934 }
935
936 /* Buffer is no longer on free lists. */
937 SET(bp->b_flags, B_BUSY);
938
939 /*
940 * If buffer was a delayed write, start it and return NULL
941 * (since we might sleep while starting the write).
942 */
943 if (ISSET(bp->b_flags, B_DELWRI)) {
944 /*
945 * This buffer has gone through the LRU, so make sure it gets
946 * reused ASAP.
947 */
948 SET(bp->b_flags, B_AGE);
949 simple_unlock(&bp->b_interlock);
950 simple_unlock(&bqueue_slock);
951 bawrite(bp);
952 simple_lock(&bqueue_slock);
953 return (NULL);
954 }
955
956 /* disassociate us from our vnode, if we had one... */
957 if (bp->b_vp)
958 brelvp(bp);
959
960 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
961 (*bioops.io_deallocate)(bp);
962
963 /* clear out various other fields */
964 bp->b_flags = B_BUSY;
965 bp->b_dev = NODEV;
966 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
967 bp->b_iodone = 0;
968 bp->b_error = 0;
969 bp->b_resid = 0;
970 bp->b_bcount = 0;
971
972 bremhash(bp);
973 return (bp);
974 }
975
976 /*
977 * Wait for operations on the buffer to complete.
978 * When they do, extract and return the I/O's error value.
979 */
980 int
981 biowait(bp)
982 struct buf *bp;
983 {
984 int s, error;
985
986 s = splbio();
987 simple_lock(&bp->b_interlock);
988 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
989 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
990
991 /* check for interruption of I/O (e.g. via NFS), then errors. */
992 if (ISSET(bp->b_flags, B_EINTR)) {
993 CLR(bp->b_flags, B_EINTR);
994 error = EINTR;
995 } else if (ISSET(bp->b_flags, B_ERROR))
996 error = bp->b_error ? bp->b_error : EIO;
997 else
998 error = 0;
999
1000 simple_unlock(&bp->b_interlock);
1001 splx(s);
1002 return (error);
1003 }
1004
1005 /*
1006 * Mark I/O complete on a buffer.
1007 *
1008 * If a callback has been requested, e.g. the pageout
1009 * daemon, do so. Otherwise, awaken waiting processes.
1010 *
1011 * [ Leffler, et al., says on p.247:
1012 * "This routine wakes up the blocked process, frees the buffer
1013 * for an asynchronous write, or, for a request by the pagedaemon
1014 * process, invokes a procedure specified in the buffer structure" ]
1015 *
1016 * In real life, the pagedaemon (or other system processes) wants
1017 * to do async stuff to, and doesn't want the buffer brelse()'d.
1018 * (for swap pager, that puts swap buffers on the free lists (!!!),
1019 * for the vn device, that puts malloc'd buffers on the free lists!)
1020 */
1021 void
1022 biodone(bp)
1023 struct buf *bp;
1024 {
1025 int s = splbio();
1026
1027 simple_lock(&bp->b_interlock);
1028 if (ISSET(bp->b_flags, B_DONE))
1029 panic("biodone already");
1030 SET(bp->b_flags, B_DONE); /* note that it's done */
1031
1032 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1033 (*bioops.io_complete)(bp);
1034
1035 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
1036 vwakeup(bp);
1037
1038 /*
1039 * If necessary, call out. Unlock the buffer before calling
1040 * iodone() as the buffer isn't valid any more when it return.
1041 */
1042 if (ISSET(bp->b_flags, B_CALL)) {
1043 CLR(bp->b_flags, B_CALL); /* but note callout done */
1044 simple_unlock(&bp->b_interlock);
1045 (*bp->b_iodone)(bp);
1046 } else {
1047 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */
1048 simple_unlock(&bp->b_interlock);
1049 brelse(bp);
1050 } else { /* or just wakeup the buffer */
1051 CLR(bp->b_flags, B_WANTED);
1052 wakeup(bp);
1053 simple_unlock(&bp->b_interlock);
1054 }
1055 }
1056
1057 splx(s);
1058 }
1059
1060 /*
1061 * Return a count of buffers on the "locked" queue.
1062 */
1063 int
1064 count_lock_queue()
1065 {
1066 struct buf *bp;
1067 int n = 0;
1068
1069 simple_lock(&bqueue_slock);
1070 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
1071 n++;
1072 simple_unlock(&bqueue_slock);
1073 return (n);
1074 }
1075
1076 #ifdef DEBUG
1077 /*
1078 * Print out statistics on the current allocation of the buffer pool.
1079 * Can be enabled to print out on every ``sync'' by setting "syncprt"
1080 * in vfs_syscalls.c using sysctl.
1081 */
1082 void
1083 vfs_bufstats()
1084 {
1085 int s, i, j, count;
1086 struct buf *bp;
1087 struct bqueues *dp;
1088 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1089 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
1090
1091 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1092 count = 0;
1093 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1094 counts[j] = 0;
1095 s = splbio();
1096 TAILQ_FOREACH(bp, dp, b_freelist) {
1097 counts[bp->b_bufsize/PAGE_SIZE]++;
1098 count++;
1099 }
1100 splx(s);
1101 printf("%s: total-%d", bname[i], count);
1102 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1103 if (counts[j] != 0)
1104 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1105 printf("\n");
1106 }
1107 }
1108 #endif /* DEBUG */
1109