vfs_bio.c revision 1.99 1 1.99 dbj /* $NetBSD: vfs_bio.c,v 1.99 2003/12/02 04:18:19 dbj Exp $ */
2 1.31 cgd
3 1.31 cgd /*-
4 1.31 cgd * Copyright (c) 1982, 1986, 1989, 1993
5 1.31 cgd * The Regents of the University of California. All rights reserved.
6 1.31 cgd * (c) UNIX System Laboratories, Inc.
7 1.31 cgd * All or some portions of this file are derived from material licensed
8 1.31 cgd * to the University of California by American Telephone and Telegraph
9 1.31 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 1.31 cgd * the permission of UNIX System Laboratories, Inc.
11 1.31 cgd *
12 1.31 cgd * Redistribution and use in source and binary forms, with or without
13 1.31 cgd * modification, are permitted provided that the following conditions
14 1.31 cgd * are met:
15 1.31 cgd * 1. Redistributions of source code must retain the above copyright
16 1.31 cgd * notice, this list of conditions and the following disclaimer.
17 1.31 cgd * 2. Redistributions in binary form must reproduce the above copyright
18 1.31 cgd * notice, this list of conditions and the following disclaimer in the
19 1.31 cgd * documentation and/or other materials provided with the distribution.
20 1.93 agc * 3. Neither the name of the University nor the names of its contributors
21 1.93 agc * may be used to endorse or promote products derived from this software
22 1.93 agc * without specific prior written permission.
23 1.93 agc *
24 1.93 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.93 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.93 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.93 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.93 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.93 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.93 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.93 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.93 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.93 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.93 agc * SUCH DAMAGE.
35 1.93 agc *
36 1.93 agc * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
37 1.93 agc */
38 1.93 agc
39 1.93 agc /*-
40 1.93 agc * Copyright (c) 1994 Christopher G. Demetriou
41 1.93 agc *
42 1.93 agc * Redistribution and use in source and binary forms, with or without
43 1.93 agc * modification, are permitted provided that the following conditions
44 1.93 agc * are met:
45 1.93 agc * 1. Redistributions of source code must retain the above copyright
46 1.93 agc * notice, this list of conditions and the following disclaimer.
47 1.93 agc * 2. Redistributions in binary form must reproduce the above copyright
48 1.93 agc * notice, this list of conditions and the following disclaimer in the
49 1.93 agc * documentation and/or other materials provided with the distribution.
50 1.31 cgd * 3. All advertising materials mentioning features or use of this software
51 1.31 cgd * must display the following acknowledgement:
52 1.31 cgd * This product includes software developed by the University of
53 1.31 cgd * California, Berkeley and its contributors.
54 1.31 cgd * 4. Neither the name of the University nor the names of its contributors
55 1.31 cgd * may be used to endorse or promote products derived from this software
56 1.31 cgd * without specific prior written permission.
57 1.31 cgd *
58 1.31 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 1.31 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 1.31 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 1.31 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 1.31 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 1.31 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 1.31 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 1.31 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 1.31 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 1.31 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 1.31 cgd * SUCH DAMAGE.
69 1.31 cgd *
70 1.31 cgd * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
71 1.31 cgd */
72 1.31 cgd
73 1.31 cgd /*
74 1.31 cgd * Some references:
75 1.31 cgd * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76 1.31 cgd * Leffler, et al.: The Design and Implementation of the 4.3BSD
77 1.31 cgd * UNIX Operating System (Addison Welley, 1989)
78 1.31 cgd */
79 1.77 lukem
80 1.81 matt #include "opt_softdep.h"
81 1.81 matt
82 1.77 lukem #include <sys/cdefs.h>
83 1.99 dbj __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.99 2003/12/02 04:18:19 dbj Exp $");
84 1.31 cgd
85 1.31 cgd #include <sys/param.h>
86 1.31 cgd #include <sys/systm.h>
87 1.31 cgd #include <sys/proc.h>
88 1.31 cgd #include <sys/buf.h>
89 1.31 cgd #include <sys/vnode.h>
90 1.31 cgd #include <sys/mount.h>
91 1.31 cgd #include <sys/malloc.h>
92 1.31 cgd #include <sys/resourcevar.h>
93 1.35 mycroft #include <sys/conf.h>
94 1.40 christos
95 1.73 chs #include <uvm/uvm.h>
96 1.71 thorpej
97 1.59 fvdl #include <miscfs/specfs/specdev.h>
98 1.59 fvdl
99 1.31 cgd /* Macros to clear/set/test flags. */
100 1.31 cgd #define SET(t, f) (t) |= (f)
101 1.31 cgd #define CLR(t, f) (t) &= ~(f)
102 1.31 cgd #define ISSET(t, f) ((t) & (f))
103 1.31 cgd
104 1.31 cgd /*
105 1.31 cgd * Definitions for the buffer hash lists.
106 1.31 cgd */
107 1.31 cgd #define BUFHASH(dvp, lbn) \
108 1.73 chs (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
109 1.31 cgd LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
110 1.31 cgd u_long bufhash;
111 1.81 matt #ifndef SOFTDEP
112 1.59 fvdl struct bio_ops bioops; /* I/O operation notification */
113 1.81 matt #endif
114 1.31 cgd
115 1.31 cgd /*
116 1.31 cgd * Insq/Remq for the buffer hash lists.
117 1.31 cgd */
118 1.31 cgd #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
119 1.31 cgd #define bremhash(bp) LIST_REMOVE(bp, b_hash)
120 1.31 cgd
121 1.31 cgd /*
122 1.31 cgd * Definitions for the buffer free lists.
123 1.31 cgd */
124 1.31 cgd #define BQUEUES 4 /* number of free buffer queues */
125 1.31 cgd
126 1.31 cgd #define BQ_LOCKED 0 /* super-blocks &c */
127 1.31 cgd #define BQ_LRU 1 /* lru, useful buffers */
128 1.31 cgd #define BQ_AGE 2 /* rubbish */
129 1.31 cgd #define BQ_EMPTY 3 /* buffer headers with no memory */
130 1.31 cgd
131 1.31 cgd TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
132 1.31 cgd int needbuffer;
133 1.31 cgd
134 1.31 cgd /*
135 1.87 pk * Buffer queue lock.
136 1.87 pk * Take this lock first if also taking some buffer's b_interlock.
137 1.87 pk */
138 1.87 pk struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
139 1.87 pk
140 1.87 pk /*
141 1.65 thorpej * Buffer pool for I/O buffers.
142 1.65 thorpej */
143 1.65 thorpej struct pool bufpool;
144 1.65 thorpej
145 1.65 thorpej /*
146 1.87 pk * bread()/breadn() helper.
147 1.87 pk */
148 1.87 pk static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
149 1.87 pk struct ucred *, int);
150 1.87 pk int count_lock_queue(void);
151 1.87 pk
152 1.87 pk /*
153 1.31 cgd * Insq/Remq for the buffer free lists.
154 1.87 pk * Call with buffer queue locked.
155 1.31 cgd */
156 1.31 cgd #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
157 1.31 cgd #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
158 1.31 cgd
159 1.99 dbj #ifdef DEBUG
160 1.99 dbj int debug_verify_freelist = 0;
161 1.99 dbj int checkfreelist(struct buf *, struct bqueues *);
162 1.99 dbj int
163 1.99 dbj checkfreelist(struct buf *bp, struct bqueues *dp)
164 1.99 dbj {
165 1.99 dbj struct buf *b;
166 1.99 dbj TAILQ_FOREACH(b, dp, b_freelist) {
167 1.99 dbj if (b == bp)
168 1.99 dbj return 1;
169 1.99 dbj }
170 1.99 dbj return 0;
171 1.99 dbj }
172 1.99 dbj #endif
173 1.99 dbj
174 1.31 cgd void
175 1.31 cgd bremfree(bp)
176 1.31 cgd struct buf *bp;
177 1.31 cgd {
178 1.31 cgd struct bqueues *dp = NULL;
179 1.94 yamt
180 1.94 yamt LOCK_ASSERT(simple_lock_held(&bqueue_slock));
181 1.31 cgd
182 1.99 dbj KDASSERT(!debug_verify_freelist ||
183 1.99 dbj checkfreelist(bp, &bufqueues[BQ_AGE]) ||
184 1.99 dbj checkfreelist(bp, &bufqueues[BQ_LRU]) ||
185 1.99 dbj checkfreelist(bp, &bufqueues[BQ_LOCKED]) ||
186 1.99 dbj checkfreelist(bp, &bufqueues[BQ_EMPTY]));
187 1.99 dbj
188 1.31 cgd /*
189 1.31 cgd * We only calculate the head of the freelist when removing
190 1.31 cgd * the last element of the list as that is the only time that
191 1.31 cgd * it is needed (e.g. to reset the tail pointer).
192 1.31 cgd *
193 1.31 cgd * NB: This makes an assumption about how tailq's are implemented.
194 1.98 dbj *
195 1.98 dbj * We break the TAILQ abstraction in order to efficiently remove a
196 1.98 dbj * buffer from its freelist without having to know exactly which
197 1.98 dbj * freelist it is on.
198 1.31 cgd */
199 1.84 matt if (TAILQ_NEXT(bp, b_freelist) == NULL) {
200 1.31 cgd for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
201 1.31 cgd if (dp->tqh_last == &bp->b_freelist.tqe_next)
202 1.31 cgd break;
203 1.31 cgd if (dp == &bufqueues[BQUEUES])
204 1.31 cgd panic("bremfree: lost tail");
205 1.31 cgd }
206 1.31 cgd TAILQ_REMOVE(dp, bp, b_freelist);
207 1.31 cgd }
208 1.31 cgd
209 1.31 cgd /*
210 1.31 cgd * Initialize buffers and hash links for buffers.
211 1.31 cgd */
212 1.31 cgd void
213 1.31 cgd bufinit()
214 1.31 cgd {
215 1.66 augustss struct buf *bp;
216 1.31 cgd struct bqueues *dp;
217 1.82 thorpej u_int i, base, residual;
218 1.65 thorpej
219 1.65 thorpej /*
220 1.65 thorpej * Initialize the buffer pool. This pool is used for buffers
221 1.65 thorpej * which are strictly I/O control blocks, not buffer cache
222 1.65 thorpej * buffers.
223 1.65 thorpej */
224 1.79 thorpej pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
225 1.31 cgd
226 1.31 cgd for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
227 1.31 cgd TAILQ_INIT(dp);
228 1.70 ad bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
229 1.31 cgd base = bufpages / nbuf;
230 1.31 cgd residual = bufpages % nbuf;
231 1.31 cgd for (i = 0; i < nbuf; i++) {
232 1.31 cgd bp = &buf[i];
233 1.55 perry memset((char *)bp, 0, sizeof(*bp));
234 1.91 thorpej BUF_INIT(bp);
235 1.31 cgd bp->b_dev = NODEV;
236 1.31 cgd bp->b_vnbufs.le_next = NOLIST;
237 1.31 cgd bp->b_data = buffers + i * MAXBSIZE;
238 1.31 cgd if (i < residual)
239 1.71 thorpej bp->b_bufsize = (base + 1) * PAGE_SIZE;
240 1.31 cgd else
241 1.71 thorpej bp->b_bufsize = base * PAGE_SIZE;
242 1.31 cgd bp->b_flags = B_INVAL;
243 1.31 cgd dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
244 1.31 cgd binsheadfree(bp, dp);
245 1.31 cgd binshash(bp, &invalhash);
246 1.31 cgd }
247 1.31 cgd }
248 1.31 cgd
249 1.40 christos static __inline struct buf *
250 1.34 mycroft bio_doread(vp, blkno, size, cred, async)
251 1.31 cgd struct vnode *vp;
252 1.31 cgd daddr_t blkno;
253 1.31 cgd int size;
254 1.31 cgd struct ucred *cred;
255 1.34 mycroft int async;
256 1.31 cgd {
257 1.66 augustss struct buf *bp;
258 1.86 thorpej struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
259 1.86 thorpej struct proc *p = l->l_proc;
260 1.31 cgd
261 1.34 mycroft bp = getblk(vp, blkno, size, 0, 0);
262 1.31 cgd
263 1.86 thorpej #ifdef DIAGNOSTIC
264 1.86 thorpej if (bp == NULL) {
265 1.86 thorpej panic("bio_doread: no such buf");
266 1.86 thorpej }
267 1.86 thorpej #endif
268 1.86 thorpej
269 1.31 cgd /*
270 1.34 mycroft * If buffer does not have data valid, start a read.
271 1.31 cgd * Note that if buffer is B_INVAL, getblk() won't return it.
272 1.87 pk * Therefore, it's valid if its I/O has completed or been delayed.
273 1.31 cgd */
274 1.34 mycroft if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
275 1.73 chs /* Start I/O for the buffer. */
276 1.34 mycroft SET(bp->b_flags, B_READ | async);
277 1.34 mycroft VOP_STRATEGY(bp);
278 1.31 cgd
279 1.34 mycroft /* Pay for the read. */
280 1.49 cgd p->p_stats->p_ru.ru_inblock++;
281 1.34 mycroft } else if (async) {
282 1.34 mycroft brelse(bp);
283 1.31 cgd }
284 1.31 cgd
285 1.34 mycroft return (bp);
286 1.34 mycroft }
287 1.34 mycroft
288 1.34 mycroft /*
289 1.34 mycroft * Read a disk block.
290 1.34 mycroft * This algorithm described in Bach (p.54).
291 1.34 mycroft */
292 1.40 christos int
293 1.34 mycroft bread(vp, blkno, size, cred, bpp)
294 1.34 mycroft struct vnode *vp;
295 1.34 mycroft daddr_t blkno;
296 1.34 mycroft int size;
297 1.34 mycroft struct ucred *cred;
298 1.34 mycroft struct buf **bpp;
299 1.34 mycroft {
300 1.66 augustss struct buf *bp;
301 1.34 mycroft
302 1.34 mycroft /* Get buffer for block. */
303 1.34 mycroft bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
304 1.31 cgd
305 1.80 chs /* Wait for the read to complete, and return result. */
306 1.31 cgd return (biowait(bp));
307 1.31 cgd }
308 1.31 cgd
309 1.31 cgd /*
310 1.31 cgd * Read-ahead multiple disk blocks. The first is sync, the rest async.
311 1.31 cgd * Trivial modification to the breada algorithm presented in Bach (p.55).
312 1.31 cgd */
313 1.40 christos int
314 1.31 cgd breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
315 1.31 cgd struct vnode *vp;
316 1.31 cgd daddr_t blkno; int size;
317 1.31 cgd daddr_t rablks[]; int rasizes[];
318 1.31 cgd int nrablks;
319 1.31 cgd struct ucred *cred;
320 1.31 cgd struct buf **bpp;
321 1.31 cgd {
322 1.66 augustss struct buf *bp;
323 1.31 cgd int i;
324 1.31 cgd
325 1.34 mycroft bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
326 1.31 cgd
327 1.31 cgd /*
328 1.31 cgd * For each of the read-ahead blocks, start a read, if necessary.
329 1.31 cgd */
330 1.31 cgd for (i = 0; i < nrablks; i++) {
331 1.31 cgd /* If it's in the cache, just go on to next one. */
332 1.31 cgd if (incore(vp, rablks[i]))
333 1.31 cgd continue;
334 1.31 cgd
335 1.31 cgd /* Get a buffer for the read-ahead block */
336 1.34 mycroft (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
337 1.31 cgd }
338 1.31 cgd
339 1.80 chs /* Otherwise, we had to start a read for it; wait until it's valid. */
340 1.31 cgd return (biowait(bp));
341 1.31 cgd }
342 1.31 cgd
343 1.31 cgd /*
344 1.31 cgd * Read with single-block read-ahead. Defined in Bach (p.55), but
345 1.31 cgd * implemented as a call to breadn().
346 1.31 cgd * XXX for compatibility with old file systems.
347 1.31 cgd */
348 1.40 christos int
349 1.31 cgd breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
350 1.31 cgd struct vnode *vp;
351 1.31 cgd daddr_t blkno; int size;
352 1.31 cgd daddr_t rablkno; int rabsize;
353 1.31 cgd struct ucred *cred;
354 1.31 cgd struct buf **bpp;
355 1.31 cgd {
356 1.34 mycroft
357 1.31 cgd return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
358 1.31 cgd }
359 1.31 cgd
360 1.31 cgd /*
361 1.31 cgd * Block write. Described in Bach (p.56)
362 1.31 cgd */
363 1.40 christos int
364 1.31 cgd bwrite(bp)
365 1.31 cgd struct buf *bp;
366 1.31 cgd {
367 1.44 pk int rv, sync, wasdelayed, s;
368 1.86 thorpej struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
369 1.86 thorpej struct proc *p = l->l_proc;
370 1.59 fvdl struct vnode *vp;
371 1.59 fvdl struct mount *mp;
372 1.31 cgd
373 1.87 pk KASSERT(ISSET(bp->b_flags, B_BUSY));
374 1.87 pk
375 1.76 chs vp = bp->b_vp;
376 1.76 chs if (vp != NULL) {
377 1.76 chs if (vp->v_type == VBLK)
378 1.76 chs mp = vp->v_specmountpoint;
379 1.76 chs else
380 1.76 chs mp = vp->v_mount;
381 1.76 chs } else {
382 1.76 chs mp = NULL;
383 1.76 chs }
384 1.76 chs
385 1.38 cgd /*
386 1.38 cgd * Remember buffer type, to switch on it later. If the write was
387 1.38 cgd * synchronous, but the file system was mounted with MNT_ASYNC,
388 1.38 cgd * convert it to a delayed write.
389 1.38 cgd * XXX note that this relies on delayed tape writes being converted
390 1.38 cgd * to async, not sync writes (which is safe, but ugly).
391 1.38 cgd */
392 1.31 cgd sync = !ISSET(bp->b_flags, B_ASYNC);
393 1.76 chs if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
394 1.37 cgd bdwrite(bp);
395 1.37 cgd return (0);
396 1.37 cgd }
397 1.46 mycroft
398 1.59 fvdl /*
399 1.59 fvdl * Collect statistics on synchronous and asynchronous writes.
400 1.59 fvdl * Writes to block devices are charged to their associated
401 1.59 fvdl * filesystem (if any).
402 1.59 fvdl */
403 1.76 chs if (mp != NULL) {
404 1.76 chs if (sync)
405 1.76 chs mp->mnt_stat.f_syncwrites++;
406 1.59 fvdl else
407 1.76 chs mp->mnt_stat.f_asyncwrites++;
408 1.59 fvdl }
409 1.59 fvdl
410 1.44 pk s = splbio();
411 1.87 pk simple_lock(&bp->b_interlock);
412 1.46 mycroft
413 1.97 dbj wasdelayed = ISSET(bp->b_flags, B_DELWRI);
414 1.97 dbj
415 1.60 fvdl CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
416 1.60 fvdl
417 1.46 mycroft /*
418 1.46 mycroft * Pay for the I/O operation and make sure the buf is on the correct
419 1.46 mycroft * vnode queue.
420 1.46 mycroft */
421 1.46 mycroft if (wasdelayed)
422 1.46 mycroft reassignbuf(bp, bp->b_vp);
423 1.46 mycroft else
424 1.49 cgd p->p_stats->p_ru.ru_oublock++;
425 1.32 mycroft
426 1.31 cgd /* Initiate disk write. Make sure the appropriate party is charged. */
427 1.87 pk V_INCR_NUMOUTPUT(bp->b_vp);
428 1.87 pk simple_unlock(&bp->b_interlock);
429 1.44 pk splx(s);
430 1.46 mycroft
431 1.31 cgd VOP_STRATEGY(bp);
432 1.31 cgd
433 1.34 mycroft if (sync) {
434 1.46 mycroft /* If I/O was synchronous, wait for it to complete. */
435 1.31 cgd rv = biowait(bp);
436 1.31 cgd
437 1.34 mycroft /* Release the buffer. */
438 1.31 cgd brelse(bp);
439 1.34 mycroft
440 1.34 mycroft return (rv);
441 1.34 mycroft } else {
442 1.34 mycroft return (0);
443 1.31 cgd }
444 1.31 cgd }
445 1.31 cgd
446 1.31 cgd int
447 1.40 christos vn_bwrite(v)
448 1.40 christos void *v;
449 1.31 cgd {
450 1.40 christos struct vop_bwrite_args *ap = v;
451 1.34 mycroft
452 1.31 cgd return (bwrite(ap->a_bp));
453 1.31 cgd }
454 1.31 cgd
455 1.31 cgd /*
456 1.31 cgd * Delayed write.
457 1.31 cgd *
458 1.31 cgd * The buffer is marked dirty, but is not queued for I/O.
459 1.31 cgd * This routine should be used when the buffer is expected
460 1.31 cgd * to be modified again soon, typically a small write that
461 1.31 cgd * partially fills a buffer.
462 1.31 cgd *
463 1.31 cgd * NB: magnetic tapes cannot be delayed; they must be
464 1.31 cgd * written in the order that the writes are requested.
465 1.31 cgd *
466 1.31 cgd * Described in Leffler, et al. (pp. 208-213).
467 1.31 cgd */
468 1.31 cgd void
469 1.31 cgd bdwrite(bp)
470 1.31 cgd struct buf *bp;
471 1.31 cgd {
472 1.86 thorpej struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
473 1.86 thorpej struct proc *p = l->l_proc;
474 1.85 gehenna const struct bdevsw *bdev;
475 1.45 pk int s;
476 1.31 cgd
477 1.46 mycroft /* If this is a tape block, write the block now. */
478 1.90 pk bdev = bdevsw_lookup(bp->b_dev);
479 1.90 pk if (bdev != NULL && bdev->d_type == D_TAPE) {
480 1.90 pk bawrite(bp);
481 1.90 pk return;
482 1.46 mycroft }
483 1.46 mycroft
484 1.31 cgd /*
485 1.31 cgd * If the block hasn't been seen before:
486 1.31 cgd * (1) Mark it as having been seen,
487 1.45 pk * (2) Charge for the write,
488 1.45 pk * (3) Make sure it's on its vnode's correct block list.
489 1.31 cgd */
490 1.60 fvdl s = splbio();
491 1.87 pk simple_lock(&bp->b_interlock);
492 1.60 fvdl
493 1.97 dbj KASSERT(ISSET(bp->b_flags, B_BUSY));
494 1.97 dbj
495 1.31 cgd if (!ISSET(bp->b_flags, B_DELWRI)) {
496 1.31 cgd SET(bp->b_flags, B_DELWRI);
497 1.49 cgd p->p_stats->p_ru.ru_oublock++;
498 1.31 cgd reassignbuf(bp, bp->b_vp);
499 1.31 cgd }
500 1.31 cgd
501 1.31 cgd /* Otherwise, the "write" is done, so mark and release the buffer. */
502 1.92 yamt CLR(bp->b_flags, B_DONE);
503 1.87 pk simple_unlock(&bp->b_interlock);
504 1.60 fvdl splx(s);
505 1.60 fvdl
506 1.31 cgd brelse(bp);
507 1.31 cgd }
508 1.31 cgd
509 1.31 cgd /*
510 1.31 cgd * Asynchronous block write; just an asynchronous bwrite().
511 1.31 cgd */
512 1.31 cgd void
513 1.31 cgd bawrite(bp)
514 1.31 cgd struct buf *bp;
515 1.31 cgd {
516 1.87 pk int s;
517 1.31 cgd
518 1.97 dbj s = splbio();
519 1.97 dbj simple_lock(&bp->b_interlock);
520 1.97 dbj
521 1.87 pk KASSERT(ISSET(bp->b_flags, B_BUSY));
522 1.87 pk
523 1.31 cgd SET(bp->b_flags, B_ASYNC);
524 1.87 pk simple_unlock(&bp->b_interlock);
525 1.87 pk splx(s);
526 1.31 cgd VOP_BWRITE(bp);
527 1.31 cgd }
528 1.31 cgd
529 1.31 cgd /*
530 1.59 fvdl * Same as first half of bdwrite, mark buffer dirty, but do not release it.
531 1.88 pk * Call at splbio() and with the buffer interlock locked.
532 1.88 pk * Note: called only from biodone() through ffs softdep's bioops.io_complete()
533 1.59 fvdl */
534 1.59 fvdl void
535 1.59 fvdl bdirty(bp)
536 1.59 fvdl struct buf *bp;
537 1.59 fvdl {
538 1.86 thorpej struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
539 1.86 thorpej struct proc *p = l->l_proc;
540 1.59 fvdl
541 1.97 dbj LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
542 1.87 pk KASSERT(ISSET(bp->b_flags, B_BUSY));
543 1.61 fvdl
544 1.61 fvdl CLR(bp->b_flags, B_AGE);
545 1.60 fvdl
546 1.59 fvdl if (!ISSET(bp->b_flags, B_DELWRI)) {
547 1.59 fvdl SET(bp->b_flags, B_DELWRI);
548 1.59 fvdl p->p_stats->p_ru.ru_oublock++;
549 1.59 fvdl reassignbuf(bp, bp->b_vp);
550 1.59 fvdl }
551 1.59 fvdl }
552 1.59 fvdl
553 1.59 fvdl /*
554 1.31 cgd * Release a buffer on to the free lists.
555 1.31 cgd * Described in Bach (p. 46).
556 1.31 cgd */
557 1.31 cgd void
558 1.31 cgd brelse(bp)
559 1.31 cgd struct buf *bp;
560 1.31 cgd {
561 1.31 cgd struct bqueues *bufq;
562 1.31 cgd int s;
563 1.31 cgd
564 1.87 pk /* Block disk interrupts. */
565 1.87 pk s = splbio();
566 1.87 pk simple_lock(&bqueue_slock);
567 1.87 pk simple_lock(&bp->b_interlock);
568 1.97 dbj
569 1.97 dbj KASSERT(ISSET(bp->b_flags, B_BUSY));
570 1.97 dbj KASSERT(!ISSET(bp->b_flags, B_CALL));
571 1.87 pk
572 1.31 cgd /* Wake up any processes waiting for any buffer to become free. */
573 1.31 cgd if (needbuffer) {
574 1.31 cgd needbuffer = 0;
575 1.31 cgd wakeup(&needbuffer);
576 1.31 cgd }
577 1.31 cgd
578 1.31 cgd /* Wake up any proceeses waiting for _this_ buffer to become free. */
579 1.31 cgd if (ISSET(bp->b_flags, B_WANTED)) {
580 1.57 mycroft CLR(bp->b_flags, B_WANTED|B_AGE);
581 1.31 cgd wakeup(bp);
582 1.31 cgd }
583 1.31 cgd
584 1.31 cgd /*
585 1.31 cgd * Determine which queue the buffer should be on, then put it there.
586 1.31 cgd */
587 1.31 cgd
588 1.31 cgd /* If it's locked, don't report an error; try again later. */
589 1.31 cgd if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
590 1.31 cgd CLR(bp->b_flags, B_ERROR);
591 1.31 cgd
592 1.31 cgd /* If it's not cacheable, or an error, mark it invalid. */
593 1.31 cgd if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
594 1.31 cgd SET(bp->b_flags, B_INVAL);
595 1.31 cgd
596 1.50 mycroft if (ISSET(bp->b_flags, B_VFLUSH)) {
597 1.50 mycroft /*
598 1.50 mycroft * This is a delayed write buffer that was just flushed to
599 1.50 mycroft * disk. It is still on the LRU queue. If it's become
600 1.50 mycroft * invalid, then we need to move it to a different queue;
601 1.50 mycroft * otherwise leave it in its current position.
602 1.50 mycroft */
603 1.50 mycroft CLR(bp->b_flags, B_VFLUSH);
604 1.99 dbj if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
605 1.99 dbj KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
606 1.50 mycroft goto already_queued;
607 1.99 dbj } else {
608 1.50 mycroft bremfree(bp);
609 1.99 dbj }
610 1.50 mycroft }
611 1.99 dbj
612 1.99 dbj KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
613 1.99 dbj KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
614 1.99 dbj KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_EMPTY]));
615 1.99 dbj KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
616 1.50 mycroft
617 1.31 cgd if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
618 1.31 cgd /*
619 1.31 cgd * If it's invalid or empty, dissociate it from its vnode
620 1.31 cgd * and put on the head of the appropriate queue.
621 1.31 cgd */
622 1.59 fvdl if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
623 1.59 fvdl (*bioops.io_deallocate)(bp);
624 1.59 fvdl CLR(bp->b_flags, B_DONE|B_DELWRI);
625 1.59 fvdl if (bp->b_vp) {
626 1.59 fvdl reassignbuf(bp, bp->b_vp);
627 1.31 cgd brelvp(bp);
628 1.59 fvdl }
629 1.31 cgd if (bp->b_bufsize <= 0)
630 1.31 cgd /* no data */
631 1.31 cgd bufq = &bufqueues[BQ_EMPTY];
632 1.31 cgd else
633 1.31 cgd /* invalid data */
634 1.31 cgd bufq = &bufqueues[BQ_AGE];
635 1.31 cgd binsheadfree(bp, bufq);
636 1.31 cgd } else {
637 1.31 cgd /*
638 1.31 cgd * It has valid data. Put it on the end of the appropriate
639 1.31 cgd * queue, so that it'll stick around for as long as possible.
640 1.67 fvdl * If buf is AGE, but has dependencies, must put it on last
641 1.67 fvdl * bufqueue to be scanned, ie LRU. This protects against the
642 1.67 fvdl * livelock where BQ_AGE only has buffers with dependencies,
643 1.67 fvdl * and we thus never get to the dependent buffers in BQ_LRU.
644 1.31 cgd */
645 1.31 cgd if (ISSET(bp->b_flags, B_LOCKED))
646 1.31 cgd /* locked in core */
647 1.31 cgd bufq = &bufqueues[BQ_LOCKED];
648 1.67 fvdl else if (!ISSET(bp->b_flags, B_AGE))
649 1.31 cgd /* valid data */
650 1.31 cgd bufq = &bufqueues[BQ_LRU];
651 1.67 fvdl else {
652 1.67 fvdl /* stale but valid data */
653 1.67 fvdl int has_deps;
654 1.67 fvdl
655 1.67 fvdl if (LIST_FIRST(&bp->b_dep) != NULL &&
656 1.67 fvdl bioops.io_countdeps)
657 1.67 fvdl has_deps = (*bioops.io_countdeps)(bp, 0);
658 1.67 fvdl else
659 1.67 fvdl has_deps = 0;
660 1.67 fvdl bufq = has_deps ? &bufqueues[BQ_LRU] :
661 1.67 fvdl &bufqueues[BQ_AGE];
662 1.67 fvdl }
663 1.31 cgd binstailfree(bp, bufq);
664 1.31 cgd }
665 1.31 cgd
666 1.50 mycroft already_queued:
667 1.31 cgd /* Unlock the buffer. */
668 1.83 hannken CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
669 1.73 chs SET(bp->b_flags, B_CACHE);
670 1.31 cgd
671 1.31 cgd /* Allow disk interrupts. */
672 1.87 pk simple_unlock(&bp->b_interlock);
673 1.87 pk simple_unlock(&bqueue_slock);
674 1.31 cgd splx(s);
675 1.31 cgd }
676 1.31 cgd
677 1.31 cgd /*
678 1.31 cgd * Determine if a block is in the cache.
679 1.31 cgd * Just look on what would be its hash chain. If it's there, return
680 1.31 cgd * a pointer to it, unless it's marked invalid. If it's marked invalid,
681 1.31 cgd * we normally don't return the buffer, unless the caller explicitly
682 1.31 cgd * wants us to.
683 1.31 cgd */
684 1.31 cgd struct buf *
685 1.31 cgd incore(vp, blkno)
686 1.31 cgd struct vnode *vp;
687 1.31 cgd daddr_t blkno;
688 1.31 cgd {
689 1.31 cgd struct buf *bp;
690 1.31 cgd
691 1.31 cgd /* Search hash chain */
692 1.84 matt LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
693 1.31 cgd if (bp->b_lblkno == blkno && bp->b_vp == vp &&
694 1.31 cgd !ISSET(bp->b_flags, B_INVAL))
695 1.31 cgd return (bp);
696 1.31 cgd }
697 1.31 cgd
698 1.73 chs return (NULL);
699 1.31 cgd }
700 1.31 cgd
701 1.31 cgd /*
702 1.31 cgd * Get a block of requested size that is associated with
703 1.31 cgd * a given vnode and block offset. If it is found in the
704 1.31 cgd * block cache, mark it as having been found, make it busy
705 1.31 cgd * and return it. Otherwise, return an empty block of the
706 1.31 cgd * correct size. It is up to the caller to insure that the
707 1.31 cgd * cached blocks be of the correct size.
708 1.31 cgd */
709 1.31 cgd struct buf *
710 1.31 cgd getblk(vp, blkno, size, slpflag, slptimeo)
711 1.66 augustss struct vnode *vp;
712 1.31 cgd daddr_t blkno;
713 1.31 cgd int size, slpflag, slptimeo;
714 1.31 cgd {
715 1.31 cgd struct buf *bp;
716 1.31 cgd int s, err;
717 1.31 cgd
718 1.39 cgd start:
719 1.87 pk s = splbio();
720 1.87 pk simple_lock(&bqueue_slock);
721 1.73 chs bp = incore(vp, blkno);
722 1.73 chs if (bp != NULL) {
723 1.87 pk simple_lock(&bp->b_interlock);
724 1.31 cgd if (ISSET(bp->b_flags, B_BUSY)) {
725 1.87 pk simple_unlock(&bqueue_slock);
726 1.73 chs if (curproc == uvm.pagedaemon_proc) {
727 1.87 pk simple_unlock(&bp->b_interlock);
728 1.73 chs splx(s);
729 1.73 chs return NULL;
730 1.73 chs }
731 1.31 cgd SET(bp->b_flags, B_WANTED);
732 1.87 pk err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
733 1.87 pk "getblk", slptimeo, &bp->b_interlock);
734 1.31 cgd splx(s);
735 1.31 cgd if (err)
736 1.31 cgd return (NULL);
737 1.31 cgd goto start;
738 1.31 cgd }
739 1.57 mycroft #ifdef DIAGNOSTIC
740 1.78 chs if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
741 1.78 chs bp->b_bcount < size && vp->v_type != VBLK)
742 1.73 chs panic("getblk: block size invariant failed");
743 1.57 mycroft #endif
744 1.73 chs SET(bp->b_flags, B_BUSY);
745 1.73 chs bremfree(bp);
746 1.73 chs } else {
747 1.87 pk if ((bp = getnewbuf(slpflag, slptimeo)) == NULL) {
748 1.87 pk simple_unlock(&bqueue_slock);
749 1.87 pk splx(s);
750 1.31 cgd goto start;
751 1.87 pk }
752 1.73 chs
753 1.73 chs binshash(bp, BUFHASH(vp, blkno));
754 1.64 thorpej bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
755 1.31 cgd bgetvp(vp, bp);
756 1.31 cgd }
757 1.87 pk simple_unlock(&bp->b_interlock);
758 1.87 pk simple_unlock(&bqueue_slock);
759 1.87 pk splx(s);
760 1.96 yamt /*
761 1.96 yamt * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
762 1.96 yamt * if we re-size buffers here.
763 1.96 yamt */
764 1.96 yamt if (ISSET(bp->b_flags, B_LOCKED)) {
765 1.96 yamt KASSERT(bp->b_bufsize >= size);
766 1.96 yamt } else {
767 1.96 yamt allocbuf(bp, size);
768 1.96 yamt }
769 1.31 cgd return (bp);
770 1.31 cgd }
771 1.31 cgd
772 1.31 cgd /*
773 1.31 cgd * Get an empty, disassociated buffer of given size.
774 1.31 cgd */
775 1.31 cgd struct buf *
776 1.31 cgd geteblk(size)
777 1.31 cgd int size;
778 1.31 cgd {
779 1.31 cgd struct buf *bp;
780 1.87 pk int s;
781 1.31 cgd
782 1.87 pk s = splbio();
783 1.87 pk simple_lock(&bqueue_slock);
784 1.31 cgd while ((bp = getnewbuf(0, 0)) == 0)
785 1.31 cgd ;
786 1.87 pk
787 1.31 cgd SET(bp->b_flags, B_INVAL);
788 1.31 cgd binshash(bp, &invalhash);
789 1.87 pk simple_unlock(&bqueue_slock);
790 1.87 pk simple_unlock(&bp->b_interlock);
791 1.87 pk splx(s);
792 1.31 cgd allocbuf(bp, size);
793 1.31 cgd return (bp);
794 1.31 cgd }
795 1.31 cgd
796 1.31 cgd /*
797 1.31 cgd * Expand or contract the actual memory allocated to a buffer.
798 1.31 cgd *
799 1.31 cgd * If the buffer shrinks, data is lost, so it's up to the
800 1.31 cgd * caller to have written it out *first*; this routine will not
801 1.31 cgd * start a write. If the buffer grows, it's the callers
802 1.31 cgd * responsibility to fill out the buffer's additional contents.
803 1.31 cgd */
804 1.40 christos void
805 1.31 cgd allocbuf(bp, size)
806 1.31 cgd struct buf *bp;
807 1.31 cgd int size;
808 1.31 cgd {
809 1.73 chs struct buf *nbp;
810 1.73 chs vsize_t desired_size;
811 1.73 chs int s;
812 1.31 cgd
813 1.69 chs desired_size = round_page((vsize_t)size);
814 1.31 cgd if (desired_size > MAXBSIZE)
815 1.31 cgd panic("allocbuf: buffer larger than MAXBSIZE requested");
816 1.31 cgd
817 1.31 cgd if (bp->b_bufsize == desired_size)
818 1.31 cgd goto out;
819 1.31 cgd
820 1.31 cgd /*
821 1.31 cgd * If the buffer is smaller than the desired size, we need to snarf
822 1.31 cgd * it from other buffers. Get buffers (via getnewbuf()), and
823 1.31 cgd * steal their pages.
824 1.31 cgd */
825 1.31 cgd while (bp->b_bufsize < desired_size) {
826 1.31 cgd int amt;
827 1.31 cgd
828 1.31 cgd /* find a buffer */
829 1.87 pk s = splbio();
830 1.87 pk simple_lock(&bqueue_slock);
831 1.31 cgd while ((nbp = getnewbuf(0, 0)) == NULL)
832 1.31 cgd ;
833 1.73 chs
834 1.34 mycroft SET(nbp->b_flags, B_INVAL);
835 1.34 mycroft binshash(nbp, &invalhash);
836 1.31 cgd
837 1.87 pk simple_unlock(&nbp->b_interlock);
838 1.87 pk simple_unlock(&bqueue_slock);
839 1.87 pk splx(s);
840 1.87 pk
841 1.31 cgd /* and steal its pages, up to the amount we need */
842 1.31 cgd amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
843 1.31 cgd pagemove((nbp->b_data + nbp->b_bufsize - amt),
844 1.40 christos bp->b_data + bp->b_bufsize, amt);
845 1.31 cgd bp->b_bufsize += amt;
846 1.31 cgd nbp->b_bufsize -= amt;
847 1.31 cgd
848 1.31 cgd /* reduce transfer count if we stole some data */
849 1.31 cgd if (nbp->b_bcount > nbp->b_bufsize)
850 1.31 cgd nbp->b_bcount = nbp->b_bufsize;
851 1.31 cgd
852 1.31 cgd #ifdef DIAGNOSTIC
853 1.31 cgd if (nbp->b_bufsize < 0)
854 1.31 cgd panic("allocbuf: negative bufsize");
855 1.31 cgd #endif
856 1.31 cgd brelse(nbp);
857 1.31 cgd }
858 1.31 cgd
859 1.31 cgd /*
860 1.31 cgd * If we want a buffer smaller than the current size,
861 1.31 cgd * shrink this buffer. Grab a buf head from the EMPTY queue,
862 1.31 cgd * move a page onto it, and put it on front of the AGE queue.
863 1.31 cgd * If there are no free buffer headers, leave the buffer alone.
864 1.31 cgd */
865 1.31 cgd if (bp->b_bufsize > desired_size) {
866 1.31 cgd s = splbio();
867 1.87 pk simple_lock(&bqueue_slock);
868 1.84 matt if ((nbp = TAILQ_FIRST(&bufqueues[BQ_EMPTY])) == NULL) {
869 1.31 cgd /* No free buffer head */
870 1.87 pk simple_unlock(&bqueue_slock);
871 1.31 cgd splx(s);
872 1.31 cgd goto out;
873 1.31 cgd }
874 1.87 pk /* No need to lock nbp since it came from the empty queue */
875 1.31 cgd bremfree(nbp);
876 1.87 pk SET(nbp->b_flags, B_BUSY | B_INVAL);
877 1.87 pk simple_unlock(&bqueue_slock);
878 1.31 cgd splx(s);
879 1.31 cgd
880 1.31 cgd /* move the page to it and note this change */
881 1.31 cgd pagemove(bp->b_data + desired_size,
882 1.31 cgd nbp->b_data, bp->b_bufsize - desired_size);
883 1.31 cgd nbp->b_bufsize = bp->b_bufsize - desired_size;
884 1.31 cgd bp->b_bufsize = desired_size;
885 1.31 cgd nbp->b_bcount = 0;
886 1.31 cgd
887 1.31 cgd /* release the newly-filled buffer and leave */
888 1.31 cgd brelse(nbp);
889 1.31 cgd }
890 1.31 cgd
891 1.31 cgd out:
892 1.31 cgd bp->b_bcount = size;
893 1.31 cgd }
894 1.31 cgd
895 1.31 cgd /*
896 1.31 cgd * Find a buffer which is available for use.
897 1.31 cgd * Select something from a free list.
898 1.31 cgd * Preference is to AGE list, then LRU list.
899 1.87 pk *
900 1.87 pk * Called with buffer queues locked.
901 1.87 pk * Return buffer locked.
902 1.31 cgd */
903 1.31 cgd struct buf *
904 1.31 cgd getnewbuf(slpflag, slptimeo)
905 1.31 cgd int slpflag, slptimeo;
906 1.31 cgd {
907 1.66 augustss struct buf *bp;
908 1.31 cgd
909 1.31 cgd start:
910 1.87 pk LOCK_ASSERT(simple_lock_held(&bqueue_slock));
911 1.87 pk
912 1.84 matt if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
913 1.84 matt (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
914 1.87 pk simple_lock(&bp->b_interlock);
915 1.31 cgd bremfree(bp);
916 1.31 cgd } else {
917 1.31 cgd /* wait for a free buffer of any kind */
918 1.31 cgd needbuffer = 1;
919 1.87 pk ltsleep(&needbuffer, slpflag|(PRIBIO+1),
920 1.87 pk "getnewbuf", slptimeo, &bqueue_slock);
921 1.73 chs return (NULL);
922 1.31 cgd }
923 1.31 cgd
924 1.50 mycroft if (ISSET(bp->b_flags, B_VFLUSH)) {
925 1.50 mycroft /*
926 1.50 mycroft * This is a delayed write buffer being flushed to disk. Make
927 1.50 mycroft * sure it gets aged out of the queue when it's finished, and
928 1.50 mycroft * leave it off the LRU queue.
929 1.50 mycroft */
930 1.50 mycroft CLR(bp->b_flags, B_VFLUSH);
931 1.50 mycroft SET(bp->b_flags, B_AGE);
932 1.87 pk simple_unlock(&bp->b_interlock);
933 1.50 mycroft goto start;
934 1.50 mycroft }
935 1.50 mycroft
936 1.31 cgd /* Buffer is no longer on free lists. */
937 1.31 cgd SET(bp->b_flags, B_BUSY);
938 1.31 cgd
939 1.75 chs /*
940 1.75 chs * If buffer was a delayed write, start it and return NULL
941 1.75 chs * (since we might sleep while starting the write).
942 1.75 chs */
943 1.31 cgd if (ISSET(bp->b_flags, B_DELWRI)) {
944 1.50 mycroft /*
945 1.50 mycroft * This buffer has gone through the LRU, so make sure it gets
946 1.50 mycroft * reused ASAP.
947 1.50 mycroft */
948 1.50 mycroft SET(bp->b_flags, B_AGE);
949 1.87 pk simple_unlock(&bp->b_interlock);
950 1.89 pk simple_unlock(&bqueue_slock);
951 1.50 mycroft bawrite(bp);
952 1.89 pk simple_lock(&bqueue_slock);
953 1.75 chs return (NULL);
954 1.31 cgd }
955 1.31 cgd
956 1.31 cgd /* disassociate us from our vnode, if we had one... */
957 1.31 cgd if (bp->b_vp)
958 1.31 cgd brelvp(bp);
959 1.31 cgd
960 1.59 fvdl if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
961 1.59 fvdl (*bioops.io_deallocate)(bp);
962 1.59 fvdl
963 1.31 cgd /* clear out various other fields */
964 1.31 cgd bp->b_flags = B_BUSY;
965 1.31 cgd bp->b_dev = NODEV;
966 1.64 thorpej bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
967 1.31 cgd bp->b_iodone = 0;
968 1.31 cgd bp->b_error = 0;
969 1.31 cgd bp->b_resid = 0;
970 1.31 cgd bp->b_bcount = 0;
971 1.31 cgd
972 1.34 mycroft bremhash(bp);
973 1.31 cgd return (bp);
974 1.31 cgd }
975 1.31 cgd
976 1.31 cgd /*
977 1.31 cgd * Wait for operations on the buffer to complete.
978 1.31 cgd * When they do, extract and return the I/O's error value.
979 1.31 cgd */
980 1.31 cgd int
981 1.31 cgd biowait(bp)
982 1.31 cgd struct buf *bp;
983 1.31 cgd {
984 1.87 pk int s, error;
985 1.59 fvdl
986 1.31 cgd s = splbio();
987 1.87 pk simple_lock(&bp->b_interlock);
988 1.80 chs while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
989 1.87 pk ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
990 1.31 cgd
991 1.31 cgd /* check for interruption of I/O (e.g. via NFS), then errors. */
992 1.31 cgd if (ISSET(bp->b_flags, B_EINTR)) {
993 1.31 cgd CLR(bp->b_flags, B_EINTR);
994 1.87 pk error = EINTR;
995 1.31 cgd } else if (ISSET(bp->b_flags, B_ERROR))
996 1.87 pk error = bp->b_error ? bp->b_error : EIO;
997 1.31 cgd else
998 1.87 pk error = 0;
999 1.87 pk
1000 1.87 pk simple_unlock(&bp->b_interlock);
1001 1.87 pk splx(s);
1002 1.87 pk return (error);
1003 1.31 cgd }
1004 1.31 cgd
1005 1.31 cgd /*
1006 1.31 cgd * Mark I/O complete on a buffer.
1007 1.31 cgd *
1008 1.31 cgd * If a callback has been requested, e.g. the pageout
1009 1.31 cgd * daemon, do so. Otherwise, awaken waiting processes.
1010 1.31 cgd *
1011 1.31 cgd * [ Leffler, et al., says on p.247:
1012 1.31 cgd * "This routine wakes up the blocked process, frees the buffer
1013 1.31 cgd * for an asynchronous write, or, for a request by the pagedaemon
1014 1.31 cgd * process, invokes a procedure specified in the buffer structure" ]
1015 1.31 cgd *
1016 1.31 cgd * In real life, the pagedaemon (or other system processes) wants
1017 1.31 cgd * to do async stuff to, and doesn't want the buffer brelse()'d.
1018 1.31 cgd * (for swap pager, that puts swap buffers on the free lists (!!!),
1019 1.31 cgd * for the vn device, that puts malloc'd buffers on the free lists!)
1020 1.31 cgd */
1021 1.31 cgd void
1022 1.31 cgd biodone(bp)
1023 1.31 cgd struct buf *bp;
1024 1.31 cgd {
1025 1.60 fvdl int s = splbio();
1026 1.60 fvdl
1027 1.87 pk simple_lock(&bp->b_interlock);
1028 1.31 cgd if (ISSET(bp->b_flags, B_DONE))
1029 1.31 cgd panic("biodone already");
1030 1.31 cgd SET(bp->b_flags, B_DONE); /* note that it's done */
1031 1.31 cgd
1032 1.59 fvdl if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1033 1.59 fvdl (*bioops.io_complete)(bp);
1034 1.59 fvdl
1035 1.31 cgd if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
1036 1.31 cgd vwakeup(bp);
1037 1.31 cgd
1038 1.87 pk /*
1039 1.87 pk * If necessary, call out. Unlock the buffer before calling
1040 1.87 pk * iodone() as the buffer isn't valid any more when it return.
1041 1.87 pk */
1042 1.87 pk if (ISSET(bp->b_flags, B_CALL)) {
1043 1.31 cgd CLR(bp->b_flags, B_CALL); /* but note callout done */
1044 1.87 pk simple_unlock(&bp->b_interlock);
1045 1.31 cgd (*bp->b_iodone)(bp);
1046 1.59 fvdl } else {
1047 1.87 pk if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */
1048 1.87 pk simple_unlock(&bp->b_interlock);
1049 1.59 fvdl brelse(bp);
1050 1.87 pk } else { /* or just wakeup the buffer */
1051 1.59 fvdl CLR(bp->b_flags, B_WANTED);
1052 1.59 fvdl wakeup(bp);
1053 1.87 pk simple_unlock(&bp->b_interlock);
1054 1.59 fvdl }
1055 1.31 cgd }
1056 1.60 fvdl
1057 1.60 fvdl splx(s);
1058 1.31 cgd }
1059 1.31 cgd
1060 1.31 cgd /*
1061 1.31 cgd * Return a count of buffers on the "locked" queue.
1062 1.31 cgd */
1063 1.31 cgd int
1064 1.31 cgd count_lock_queue()
1065 1.31 cgd {
1066 1.66 augustss struct buf *bp;
1067 1.66 augustss int n = 0;
1068 1.31 cgd
1069 1.87 pk simple_lock(&bqueue_slock);
1070 1.84 matt TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
1071 1.31 cgd n++;
1072 1.87 pk simple_unlock(&bqueue_slock);
1073 1.31 cgd return (n);
1074 1.31 cgd }
1075 1.31 cgd
1076 1.36 cgd #ifdef DEBUG
1077 1.31 cgd /*
1078 1.31 cgd * Print out statistics on the current allocation of the buffer pool.
1079 1.31 cgd * Can be enabled to print out on every ``sync'' by setting "syncprt"
1080 1.31 cgd * in vfs_syscalls.c using sysctl.
1081 1.31 cgd */
1082 1.31 cgd void
1083 1.31 cgd vfs_bufstats()
1084 1.31 cgd {
1085 1.31 cgd int s, i, j, count;
1086 1.66 augustss struct buf *bp;
1087 1.66 augustss struct bqueues *dp;
1088 1.72 simonb int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1089 1.31 cgd static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
1090 1.71 thorpej
1091 1.31 cgd for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1092 1.31 cgd count = 0;
1093 1.71 thorpej for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1094 1.31 cgd counts[j] = 0;
1095 1.31 cgd s = splbio();
1096 1.84 matt TAILQ_FOREACH(bp, dp, b_freelist) {
1097 1.71 thorpej counts[bp->b_bufsize/PAGE_SIZE]++;
1098 1.31 cgd count++;
1099 1.31 cgd }
1100 1.31 cgd splx(s);
1101 1.48 christos printf("%s: total-%d", bname[i], count);
1102 1.71 thorpej for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1103 1.31 cgd if (counts[j] != 0)
1104 1.71 thorpej printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1105 1.48 christos printf("\n");
1106 1.31 cgd }
1107 1.31 cgd }
1108 1.36 cgd #endif /* DEBUG */
1109