vfs_bio.c revision 1.37 1 1.37 cgd /* $NetBSD: vfs_bio.c,v 1.37 1995/07/12 07:39:00 cgd Exp $ */
2 1.31 cgd
3 1.31 cgd /*-
4 1.31 cgd * Copyright (c) 1994 Christopher G. Demetriou
5 1.31 cgd * Copyright (c) 1982, 1986, 1989, 1993
6 1.31 cgd * The Regents of the University of California. All rights reserved.
7 1.31 cgd * (c) UNIX System Laboratories, Inc.
8 1.31 cgd * All or some portions of this file are derived from material licensed
9 1.31 cgd * to the University of California by American Telephone and Telegraph
10 1.31 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 1.31 cgd * the permission of UNIX System Laboratories, Inc.
12 1.31 cgd *
13 1.31 cgd * Redistribution and use in source and binary forms, with or without
14 1.31 cgd * modification, are permitted provided that the following conditions
15 1.31 cgd * are met:
16 1.31 cgd * 1. Redistributions of source code must retain the above copyright
17 1.31 cgd * notice, this list of conditions and the following disclaimer.
18 1.31 cgd * 2. Redistributions in binary form must reproduce the above copyright
19 1.31 cgd * notice, this list of conditions and the following disclaimer in the
20 1.31 cgd * documentation and/or other materials provided with the distribution.
21 1.31 cgd * 3. All advertising materials mentioning features or use of this software
22 1.31 cgd * must display the following acknowledgement:
23 1.31 cgd * This product includes software developed by the University of
24 1.31 cgd * California, Berkeley and its contributors.
25 1.31 cgd * 4. Neither the name of the University nor the names of its contributors
26 1.31 cgd * may be used to endorse or promote products derived from this software
27 1.31 cgd * without specific prior written permission.
28 1.31 cgd *
29 1.31 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 1.31 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 1.31 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.31 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 1.31 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 1.31 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 1.31 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 1.31 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 1.31 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 1.31 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 1.31 cgd * SUCH DAMAGE.
40 1.31 cgd *
41 1.31 cgd * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
42 1.31 cgd */
43 1.31 cgd
44 1.31 cgd /*
45 1.31 cgd * Some references:
46 1.31 cgd * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
47 1.31 cgd * Leffler, et al.: The Design and Implementation of the 4.3BSD
48 1.31 cgd * UNIX Operating System (Addison Welley, 1989)
49 1.31 cgd */
50 1.31 cgd
51 1.31 cgd #include <sys/param.h>
52 1.31 cgd #include <sys/systm.h>
53 1.31 cgd #include <sys/proc.h>
54 1.31 cgd #include <sys/buf.h>
55 1.31 cgd #include <sys/vnode.h>
56 1.31 cgd #include <sys/mount.h>
57 1.31 cgd #include <sys/trace.h>
58 1.31 cgd #include <sys/malloc.h>
59 1.31 cgd #include <sys/resourcevar.h>
60 1.35 mycroft #include <sys/conf.h>
61 1.31 cgd
62 1.31 cgd /* Macros to clear/set/test flags. */
63 1.31 cgd #define SET(t, f) (t) |= (f)
64 1.31 cgd #define CLR(t, f) (t) &= ~(f)
65 1.31 cgd #define ISSET(t, f) ((t) & (f))
66 1.31 cgd
67 1.31 cgd /*
68 1.31 cgd * Definitions for the buffer hash lists.
69 1.31 cgd */
70 1.31 cgd #define BUFHASH(dvp, lbn) \
71 1.33 cgd (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
72 1.31 cgd LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
73 1.31 cgd u_long bufhash;
74 1.31 cgd
75 1.31 cgd /*
76 1.31 cgd * Insq/Remq for the buffer hash lists.
77 1.31 cgd */
78 1.31 cgd #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
79 1.31 cgd #define bremhash(bp) LIST_REMOVE(bp, b_hash)
80 1.31 cgd
81 1.31 cgd /*
82 1.31 cgd * Definitions for the buffer free lists.
83 1.31 cgd */
84 1.31 cgd #define BQUEUES 4 /* number of free buffer queues */
85 1.31 cgd
86 1.31 cgd #define BQ_LOCKED 0 /* super-blocks &c */
87 1.31 cgd #define BQ_LRU 1 /* lru, useful buffers */
88 1.31 cgd #define BQ_AGE 2 /* rubbish */
89 1.31 cgd #define BQ_EMPTY 3 /* buffer headers with no memory */
90 1.31 cgd
91 1.31 cgd TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
92 1.31 cgd int needbuffer;
93 1.31 cgd
94 1.31 cgd /*
95 1.31 cgd * Insq/Remq for the buffer free lists.
96 1.31 cgd */
97 1.31 cgd #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
98 1.31 cgd #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
99 1.31 cgd
100 1.31 cgd void
101 1.31 cgd bremfree(bp)
102 1.31 cgd struct buf *bp;
103 1.31 cgd {
104 1.31 cgd struct bqueues *dp = NULL;
105 1.31 cgd
106 1.31 cgd /*
107 1.31 cgd * We only calculate the head of the freelist when removing
108 1.31 cgd * the last element of the list as that is the only time that
109 1.31 cgd * it is needed (e.g. to reset the tail pointer).
110 1.31 cgd *
111 1.31 cgd * NB: This makes an assumption about how tailq's are implemented.
112 1.31 cgd */
113 1.31 cgd if (bp->b_freelist.tqe_next == NULL) {
114 1.31 cgd for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
115 1.31 cgd if (dp->tqh_last == &bp->b_freelist.tqe_next)
116 1.31 cgd break;
117 1.31 cgd if (dp == &bufqueues[BQUEUES])
118 1.31 cgd panic("bremfree: lost tail");
119 1.31 cgd }
120 1.31 cgd TAILQ_REMOVE(dp, bp, b_freelist);
121 1.31 cgd }
122 1.31 cgd
123 1.31 cgd /*
124 1.31 cgd * Initialize buffers and hash links for buffers.
125 1.31 cgd */
126 1.31 cgd void
127 1.31 cgd bufinit()
128 1.31 cgd {
129 1.31 cgd register struct buf *bp;
130 1.31 cgd struct bqueues *dp;
131 1.31 cgd register int i;
132 1.31 cgd int base, residual;
133 1.31 cgd
134 1.31 cgd for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
135 1.31 cgd TAILQ_INIT(dp);
136 1.31 cgd bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
137 1.31 cgd base = bufpages / nbuf;
138 1.31 cgd residual = bufpages % nbuf;
139 1.31 cgd for (i = 0; i < nbuf; i++) {
140 1.31 cgd bp = &buf[i];
141 1.31 cgd bzero((char *)bp, sizeof *bp);
142 1.31 cgd bp->b_dev = NODEV;
143 1.31 cgd bp->b_rcred = NOCRED;
144 1.31 cgd bp->b_wcred = NOCRED;
145 1.31 cgd bp->b_vnbufs.le_next = NOLIST;
146 1.31 cgd bp->b_data = buffers + i * MAXBSIZE;
147 1.31 cgd if (i < residual)
148 1.31 cgd bp->b_bufsize = (base + 1) * CLBYTES;
149 1.31 cgd else
150 1.31 cgd bp->b_bufsize = base * CLBYTES;
151 1.31 cgd bp->b_flags = B_INVAL;
152 1.31 cgd dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
153 1.31 cgd binsheadfree(bp, dp);
154 1.31 cgd binshash(bp, &invalhash);
155 1.31 cgd }
156 1.31 cgd }
157 1.31 cgd
158 1.34 mycroft __inline struct buf *
159 1.34 mycroft bio_doread(vp, blkno, size, cred, async)
160 1.31 cgd struct vnode *vp;
161 1.31 cgd daddr_t blkno;
162 1.31 cgd int size;
163 1.31 cgd struct ucred *cred;
164 1.34 mycroft int async;
165 1.31 cgd {
166 1.31 cgd register struct buf *bp;
167 1.31 cgd
168 1.34 mycroft bp = getblk(vp, blkno, size, 0, 0);
169 1.31 cgd
170 1.31 cgd /*
171 1.34 mycroft * If buffer does not have data valid, start a read.
172 1.31 cgd * Note that if buffer is B_INVAL, getblk() won't return it.
173 1.31 cgd * Therefore, it's valid if it's I/O has completed or been delayed.
174 1.31 cgd */
175 1.34 mycroft if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
176 1.34 mycroft /* Start I/O for the buffer (keeping credentials). */
177 1.34 mycroft SET(bp->b_flags, B_READ | async);
178 1.34 mycroft if (cred != NOCRED && bp->b_rcred == NOCRED) {
179 1.34 mycroft crhold(cred);
180 1.34 mycroft bp->b_rcred = cred;
181 1.34 mycroft }
182 1.34 mycroft VOP_STRATEGY(bp);
183 1.31 cgd
184 1.34 mycroft /* Pay for the read. */
185 1.34 mycroft curproc->p_stats->p_ru.ru_inblock++; /* XXX */
186 1.34 mycroft } else if (async) {
187 1.34 mycroft brelse(bp);
188 1.31 cgd }
189 1.31 cgd
190 1.34 mycroft return (bp);
191 1.34 mycroft }
192 1.34 mycroft
193 1.34 mycroft /*
194 1.34 mycroft * Read a disk block.
195 1.34 mycroft * This algorithm described in Bach (p.54).
196 1.34 mycroft */
197 1.34 mycroft bread(vp, blkno, size, cred, bpp)
198 1.34 mycroft struct vnode *vp;
199 1.34 mycroft daddr_t blkno;
200 1.34 mycroft int size;
201 1.34 mycroft struct ucred *cred;
202 1.34 mycroft struct buf **bpp;
203 1.34 mycroft {
204 1.34 mycroft register struct buf *bp;
205 1.34 mycroft
206 1.34 mycroft /* Get buffer for block. */
207 1.34 mycroft bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
208 1.31 cgd
209 1.31 cgd /* Wait for the read to complete, and return result. */
210 1.31 cgd return (biowait(bp));
211 1.31 cgd }
212 1.31 cgd
213 1.31 cgd /*
214 1.31 cgd * Read-ahead multiple disk blocks. The first is sync, the rest async.
215 1.31 cgd * Trivial modification to the breada algorithm presented in Bach (p.55).
216 1.31 cgd */
217 1.31 cgd breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
218 1.31 cgd struct vnode *vp;
219 1.31 cgd daddr_t blkno; int size;
220 1.31 cgd daddr_t rablks[]; int rasizes[];
221 1.31 cgd int nrablks;
222 1.31 cgd struct ucred *cred;
223 1.31 cgd struct buf **bpp;
224 1.31 cgd {
225 1.34 mycroft register struct buf *bp;
226 1.31 cgd int i;
227 1.31 cgd
228 1.34 mycroft bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
229 1.31 cgd
230 1.31 cgd /*
231 1.31 cgd * For each of the read-ahead blocks, start a read, if necessary.
232 1.31 cgd */
233 1.31 cgd for (i = 0; i < nrablks; i++) {
234 1.31 cgd /* If it's in the cache, just go on to next one. */
235 1.31 cgd if (incore(vp, rablks[i]))
236 1.31 cgd continue;
237 1.31 cgd
238 1.31 cgd /* Get a buffer for the read-ahead block */
239 1.34 mycroft (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
240 1.31 cgd }
241 1.31 cgd
242 1.31 cgd /* Otherwise, we had to start a read for it; wait until it's valid. */
243 1.31 cgd return (biowait(bp));
244 1.31 cgd }
245 1.31 cgd
246 1.31 cgd /*
247 1.31 cgd * Read with single-block read-ahead. Defined in Bach (p.55), but
248 1.31 cgd * implemented as a call to breadn().
249 1.31 cgd * XXX for compatibility with old file systems.
250 1.31 cgd */
251 1.31 cgd breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
252 1.31 cgd struct vnode *vp;
253 1.31 cgd daddr_t blkno; int size;
254 1.31 cgd daddr_t rablkno; int rabsize;
255 1.31 cgd struct ucred *cred;
256 1.31 cgd struct buf **bpp;
257 1.31 cgd {
258 1.34 mycroft
259 1.31 cgd return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
260 1.31 cgd }
261 1.31 cgd
262 1.31 cgd /*
263 1.31 cgd * Block write. Described in Bach (p.56)
264 1.31 cgd */
265 1.31 cgd bwrite(bp)
266 1.31 cgd struct buf *bp;
267 1.31 cgd {
268 1.31 cgd int rv, s, sync, wasdelayed;
269 1.31 cgd
270 1.31 cgd /* Remember buffer type, to switch on it later. */
271 1.31 cgd sync = !ISSET(bp->b_flags, B_ASYNC);
272 1.37 cgd #if 0
273 1.37 cgd if (sync && bp->b_vp && bp->b_vp->v_mount &&
274 1.37 cgd ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) {
275 1.37 cgd bdwrite(bp);
276 1.37 cgd return (0);
277 1.37 cgd }
278 1.37 cgd #endif
279 1.31 cgd wasdelayed = ISSET(bp->b_flags, B_DELWRI);
280 1.31 cgd CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
281 1.31 cgd
282 1.34 mycroft if (!sync) {
283 1.34 mycroft /*
284 1.34 mycroft * If not synchronous, pay for the I/O operation and make
285 1.34 mycroft * sure the buf is on the correct vnode queue. We have
286 1.34 mycroft * to do this now, because if we don't, the vnode may not
287 1.34 mycroft * be properly notified that its I/O has completed.
288 1.34 mycroft */
289 1.32 mycroft if (wasdelayed)
290 1.32 mycroft reassignbuf(bp, bp->b_vp);
291 1.32 mycroft else
292 1.32 mycroft curproc->p_stats->p_ru.ru_oublock++;
293 1.34 mycroft }
294 1.32 mycroft
295 1.31 cgd /* Initiate disk write. Make sure the appropriate party is charged. */
296 1.31 cgd SET(bp->b_flags, B_WRITEINPROG);
297 1.31 cgd bp->b_vp->v_numoutput++;
298 1.31 cgd VOP_STRATEGY(bp);
299 1.31 cgd
300 1.34 mycroft if (sync) {
301 1.34 mycroft /*
302 1.34 mycroft * If I/O was synchronous, wait for it to complete.
303 1.34 mycroft */
304 1.31 cgd rv = biowait(bp);
305 1.31 cgd
306 1.34 mycroft /*
307 1.34 mycroft * Pay for the I/O operation, if it's not been paid for, and
308 1.34 mycroft * make sure it's on the correct vnode queue. (async operatings
309 1.34 mycroft * were payed for above.)
310 1.34 mycroft */
311 1.32 mycroft if (wasdelayed)
312 1.32 mycroft reassignbuf(bp, bp->b_vp);
313 1.32 mycroft else
314 1.32 mycroft curproc->p_stats->p_ru.ru_oublock++;
315 1.31 cgd
316 1.34 mycroft /* Release the buffer. */
317 1.31 cgd brelse(bp);
318 1.34 mycroft
319 1.34 mycroft return (rv);
320 1.34 mycroft } else {
321 1.34 mycroft return (0);
322 1.31 cgd }
323 1.31 cgd }
324 1.31 cgd
325 1.31 cgd int
326 1.31 cgd vn_bwrite(ap)
327 1.31 cgd struct vop_bwrite_args *ap;
328 1.31 cgd {
329 1.34 mycroft
330 1.31 cgd return (bwrite(ap->a_bp));
331 1.31 cgd }
332 1.31 cgd
333 1.31 cgd /*
334 1.31 cgd * Delayed write.
335 1.31 cgd *
336 1.31 cgd * The buffer is marked dirty, but is not queued for I/O.
337 1.31 cgd * This routine should be used when the buffer is expected
338 1.31 cgd * to be modified again soon, typically a small write that
339 1.31 cgd * partially fills a buffer.
340 1.31 cgd *
341 1.31 cgd * NB: magnetic tapes cannot be delayed; they must be
342 1.31 cgd * written in the order that the writes are requested.
343 1.31 cgd *
344 1.31 cgd * Described in Leffler, et al. (pp. 208-213).
345 1.31 cgd */
346 1.31 cgd void
347 1.31 cgd bdwrite(bp)
348 1.31 cgd struct buf *bp;
349 1.31 cgd {
350 1.31 cgd
351 1.31 cgd /*
352 1.31 cgd * If the block hasn't been seen before:
353 1.31 cgd * (1) Mark it as having been seen,
354 1.31 cgd * (2) Charge for the write.
355 1.31 cgd * (3) Make sure it's on its vnode's correct block list,
356 1.31 cgd */
357 1.31 cgd if (!ISSET(bp->b_flags, B_DELWRI)) {
358 1.31 cgd SET(bp->b_flags, B_DELWRI);
359 1.31 cgd curproc->p_stats->p_ru.ru_oublock++; /* XXX */
360 1.31 cgd reassignbuf(bp, bp->b_vp);
361 1.31 cgd }
362 1.31 cgd
363 1.35 mycroft /* If this is a tape block, write the block now. */
364 1.35 mycroft if (bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
365 1.31 cgd bwrite(bp);
366 1.31 cgd return;
367 1.31 cgd }
368 1.31 cgd
369 1.31 cgd /* Otherwise, the "write" is done, so mark and release the buffer. */
370 1.31 cgd SET(bp->b_flags, B_DONE);
371 1.31 cgd brelse(bp);
372 1.31 cgd }
373 1.31 cgd
374 1.31 cgd /*
375 1.31 cgd * Asynchronous block write; just an asynchronous bwrite().
376 1.31 cgd */
377 1.31 cgd void
378 1.31 cgd bawrite(bp)
379 1.31 cgd struct buf *bp;
380 1.31 cgd {
381 1.31 cgd
382 1.31 cgd SET(bp->b_flags, B_ASYNC);
383 1.31 cgd VOP_BWRITE(bp);
384 1.31 cgd }
385 1.31 cgd
386 1.31 cgd /*
387 1.31 cgd * Release a buffer on to the free lists.
388 1.31 cgd * Described in Bach (p. 46).
389 1.31 cgd */
390 1.31 cgd void
391 1.31 cgd brelse(bp)
392 1.31 cgd struct buf *bp;
393 1.31 cgd {
394 1.31 cgd struct bqueues *bufq;
395 1.31 cgd int s;
396 1.31 cgd
397 1.31 cgd /* Wake up any processes waiting for any buffer to become free. */
398 1.31 cgd if (needbuffer) {
399 1.31 cgd needbuffer = 0;
400 1.31 cgd wakeup(&needbuffer);
401 1.31 cgd }
402 1.31 cgd
403 1.31 cgd /* Wake up any proceeses waiting for _this_ buffer to become free. */
404 1.31 cgd if (ISSET(bp->b_flags, B_WANTED)) {
405 1.31 cgd CLR(bp->b_flags, B_WANTED);
406 1.31 cgd wakeup(bp);
407 1.31 cgd }
408 1.31 cgd
409 1.31 cgd /* Block disk interrupts. */
410 1.31 cgd s = splbio();
411 1.31 cgd
412 1.31 cgd /*
413 1.31 cgd * Determine which queue the buffer should be on, then put it there.
414 1.31 cgd */
415 1.31 cgd
416 1.31 cgd /* If it's locked, don't report an error; try again later. */
417 1.31 cgd if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
418 1.31 cgd CLR(bp->b_flags, B_ERROR);
419 1.31 cgd
420 1.31 cgd /* If it's not cacheable, or an error, mark it invalid. */
421 1.31 cgd if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
422 1.31 cgd SET(bp->b_flags, B_INVAL);
423 1.31 cgd
424 1.31 cgd if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
425 1.31 cgd /*
426 1.31 cgd * If it's invalid or empty, dissociate it from its vnode
427 1.31 cgd * and put on the head of the appropriate queue.
428 1.31 cgd */
429 1.31 cgd if (bp->b_vp)
430 1.31 cgd brelvp(bp);
431 1.31 cgd CLR(bp->b_flags, B_DELWRI);
432 1.31 cgd if (bp->b_bufsize <= 0)
433 1.31 cgd /* no data */
434 1.31 cgd bufq = &bufqueues[BQ_EMPTY];
435 1.31 cgd else
436 1.31 cgd /* invalid data */
437 1.31 cgd bufq = &bufqueues[BQ_AGE];
438 1.31 cgd binsheadfree(bp, bufq);
439 1.31 cgd } else {
440 1.31 cgd /*
441 1.31 cgd * It has valid data. Put it on the end of the appropriate
442 1.31 cgd * queue, so that it'll stick around for as long as possible.
443 1.31 cgd */
444 1.31 cgd if (ISSET(bp->b_flags, B_LOCKED))
445 1.31 cgd /* locked in core */
446 1.31 cgd bufq = &bufqueues[BQ_LOCKED];
447 1.31 cgd else if (ISSET(bp->b_flags, B_AGE))
448 1.31 cgd /* stale but valid data */
449 1.31 cgd bufq = &bufqueues[BQ_AGE];
450 1.31 cgd else
451 1.31 cgd /* valid data */
452 1.31 cgd bufq = &bufqueues[BQ_LRU];
453 1.31 cgd binstailfree(bp, bufq);
454 1.31 cgd }
455 1.31 cgd
456 1.31 cgd /* Unlock the buffer. */
457 1.31 cgd CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE));
458 1.31 cgd
459 1.31 cgd /* Allow disk interrupts. */
460 1.31 cgd splx(s);
461 1.31 cgd }
462 1.31 cgd
463 1.31 cgd /*
464 1.31 cgd * Determine if a block is in the cache.
465 1.31 cgd * Just look on what would be its hash chain. If it's there, return
466 1.31 cgd * a pointer to it, unless it's marked invalid. If it's marked invalid,
467 1.31 cgd * we normally don't return the buffer, unless the caller explicitly
468 1.31 cgd * wants us to.
469 1.31 cgd */
470 1.31 cgd struct buf *
471 1.31 cgd incore(vp, blkno)
472 1.31 cgd struct vnode *vp;
473 1.31 cgd daddr_t blkno;
474 1.31 cgd {
475 1.31 cgd struct buf *bp;
476 1.31 cgd
477 1.31 cgd bp = BUFHASH(vp, blkno)->lh_first;
478 1.31 cgd
479 1.31 cgd /* Search hash chain */
480 1.31 cgd for (; bp != NULL; bp = bp->b_hash.le_next) {
481 1.31 cgd if (bp->b_lblkno == blkno && bp->b_vp == vp &&
482 1.31 cgd !ISSET(bp->b_flags, B_INVAL))
483 1.31 cgd return (bp);
484 1.31 cgd }
485 1.31 cgd
486 1.31 cgd return (0);
487 1.31 cgd }
488 1.31 cgd
489 1.31 cgd /*
490 1.31 cgd * Get a block of requested size that is associated with
491 1.31 cgd * a given vnode and block offset. If it is found in the
492 1.31 cgd * block cache, mark it as having been found, make it busy
493 1.31 cgd * and return it. Otherwise, return an empty block of the
494 1.31 cgd * correct size. It is up to the caller to insure that the
495 1.31 cgd * cached blocks be of the correct size.
496 1.31 cgd */
497 1.31 cgd struct buf *
498 1.31 cgd getblk(vp, blkno, size, slpflag, slptimeo)
499 1.31 cgd register struct vnode *vp;
500 1.31 cgd daddr_t blkno;
501 1.31 cgd int size, slpflag, slptimeo;
502 1.31 cgd {
503 1.31 cgd struct buf *bp;
504 1.31 cgd int s, err;
505 1.31 cgd
506 1.31 cgd start:
507 1.31 cgd s = splbio();
508 1.37 cgd
509 1.37 cgd /*
510 1.37 cgd * XXX
511 1.37 cgd * The following is an inlined version of 'incore()', but with
512 1.37 cgd * the 'invalid' test moved to after the 'busy' test. It's
513 1.37 cgd * necessary because there are some cases in which the NFS
514 1.37 cgd * code sets B_INVAL prior to writing data to the server, but
515 1.37 cgd * in which the buffers actually contain valid data. In this
516 1.37 cgd * case, we can't allow the system to allocate a new buffer for
517 1.37 cgd * the block until the write is finished.
518 1.37 cgd */
519 1.37 cgd loop:
520 1.37 cgd bp = BUFHASH(vp, blkno)->lh_first;
521 1.37 cgd for (; bp != NULL; bp = bp->b_hash.le_next) {
522 1.37 cgd if (bp->b_lblkno != blkno || bp->b_vp != vp)
523 1.37 cgd continue;
524 1.37 cgd
525 1.31 cgd if (ISSET(bp->b_flags, B_BUSY)) {
526 1.31 cgd SET(bp->b_flags, B_WANTED);
527 1.31 cgd err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
528 1.31 cgd slptimeo);
529 1.31 cgd splx(s);
530 1.31 cgd if (err)
531 1.31 cgd return (NULL);
532 1.31 cgd goto start;
533 1.31 cgd }
534 1.37 cgd
535 1.37 cgd if (!ISSET(bp->b_flags, B_INVAL))
536 1.37 cgd break;
537 1.37 cgd }
538 1.37 cgd
539 1.37 cgd if (bp) {
540 1.31 cgd SET(bp->b_flags, (B_BUSY | B_CACHE));
541 1.31 cgd bremfree(bp);
542 1.31 cgd splx(s);
543 1.31 cgd allocbuf(bp, size);
544 1.31 cgd } else {
545 1.31 cgd splx(s);
546 1.31 cgd if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
547 1.31 cgd goto start;
548 1.34 mycroft binshash(bp, BUFHASH(vp, blkno));
549 1.31 cgd allocbuf(bp, size);
550 1.31 cgd bp->b_blkno = bp->b_lblkno = blkno;
551 1.31 cgd s = splbio();
552 1.31 cgd bgetvp(vp, bp);
553 1.31 cgd splx(s);
554 1.31 cgd }
555 1.31 cgd return (bp);
556 1.31 cgd }
557 1.31 cgd
558 1.31 cgd /*
559 1.31 cgd * Get an empty, disassociated buffer of given size.
560 1.31 cgd */
561 1.31 cgd struct buf *
562 1.31 cgd geteblk(size)
563 1.31 cgd int size;
564 1.31 cgd {
565 1.31 cgd struct buf *bp;
566 1.31 cgd
567 1.31 cgd while ((bp = getnewbuf(0, 0)) == 0)
568 1.31 cgd ;
569 1.31 cgd SET(bp->b_flags, B_INVAL);
570 1.31 cgd binshash(bp, &invalhash);
571 1.31 cgd allocbuf(bp, size);
572 1.31 cgd
573 1.31 cgd return (bp);
574 1.31 cgd }
575 1.31 cgd
576 1.31 cgd /*
577 1.31 cgd * Expand or contract the actual memory allocated to a buffer.
578 1.31 cgd *
579 1.31 cgd * If the buffer shrinks, data is lost, so it's up to the
580 1.31 cgd * caller to have written it out *first*; this routine will not
581 1.31 cgd * start a write. If the buffer grows, it's the callers
582 1.31 cgd * responsibility to fill out the buffer's additional contents.
583 1.31 cgd */
584 1.31 cgd allocbuf(bp, size)
585 1.31 cgd struct buf *bp;
586 1.31 cgd int size;
587 1.31 cgd {
588 1.31 cgd struct buf *nbp;
589 1.31 cgd vm_size_t desired_size;
590 1.31 cgd int s;
591 1.31 cgd
592 1.31 cgd desired_size = roundup(size, CLBYTES);
593 1.31 cgd if (desired_size > MAXBSIZE)
594 1.31 cgd panic("allocbuf: buffer larger than MAXBSIZE requested");
595 1.31 cgd
596 1.31 cgd if (bp->b_bufsize == desired_size)
597 1.31 cgd goto out;
598 1.31 cgd
599 1.31 cgd /*
600 1.31 cgd * If the buffer is smaller than the desired size, we need to snarf
601 1.31 cgd * it from other buffers. Get buffers (via getnewbuf()), and
602 1.31 cgd * steal their pages.
603 1.31 cgd */
604 1.31 cgd while (bp->b_bufsize < desired_size) {
605 1.31 cgd int amt;
606 1.31 cgd
607 1.31 cgd /* find a buffer */
608 1.31 cgd while ((nbp = getnewbuf(0, 0)) == NULL)
609 1.31 cgd ;
610 1.34 mycroft SET(nbp->b_flags, B_INVAL);
611 1.34 mycroft binshash(nbp, &invalhash);
612 1.31 cgd
613 1.31 cgd /* and steal its pages, up to the amount we need */
614 1.31 cgd amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
615 1.31 cgd pagemove((nbp->b_data + nbp->b_bufsize - amt),
616 1.31 cgd bp->b_data + bp->b_bufsize, amt);
617 1.31 cgd bp->b_bufsize += amt;
618 1.31 cgd nbp->b_bufsize -= amt;
619 1.31 cgd
620 1.31 cgd /* reduce transfer count if we stole some data */
621 1.31 cgd if (nbp->b_bcount > nbp->b_bufsize)
622 1.31 cgd nbp->b_bcount = nbp->b_bufsize;
623 1.31 cgd
624 1.31 cgd #ifdef DIAGNOSTIC
625 1.31 cgd if (nbp->b_bufsize < 0)
626 1.31 cgd panic("allocbuf: negative bufsize");
627 1.31 cgd #endif
628 1.34 mycroft
629 1.31 cgd brelse(nbp);
630 1.31 cgd }
631 1.31 cgd
632 1.31 cgd /*
633 1.31 cgd * If we want a buffer smaller than the current size,
634 1.31 cgd * shrink this buffer. Grab a buf head from the EMPTY queue,
635 1.31 cgd * move a page onto it, and put it on front of the AGE queue.
636 1.31 cgd * If there are no free buffer headers, leave the buffer alone.
637 1.31 cgd */
638 1.31 cgd if (bp->b_bufsize > desired_size) {
639 1.31 cgd s = splbio();
640 1.31 cgd if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
641 1.31 cgd /* No free buffer head */
642 1.31 cgd splx(s);
643 1.31 cgd goto out;
644 1.31 cgd }
645 1.31 cgd bremfree(nbp);
646 1.31 cgd SET(nbp->b_flags, B_BUSY);
647 1.31 cgd splx(s);
648 1.31 cgd
649 1.31 cgd /* move the page to it and note this change */
650 1.31 cgd pagemove(bp->b_data + desired_size,
651 1.31 cgd nbp->b_data, bp->b_bufsize - desired_size);
652 1.31 cgd nbp->b_bufsize = bp->b_bufsize - desired_size;
653 1.31 cgd bp->b_bufsize = desired_size;
654 1.31 cgd nbp->b_bcount = 0;
655 1.31 cgd SET(nbp->b_flags, B_INVAL);
656 1.31 cgd
657 1.31 cgd /* release the newly-filled buffer and leave */
658 1.31 cgd brelse(nbp);
659 1.31 cgd }
660 1.31 cgd
661 1.31 cgd out:
662 1.31 cgd bp->b_bcount = size;
663 1.31 cgd }
664 1.31 cgd
665 1.31 cgd /*
666 1.31 cgd * Find a buffer which is available for use.
667 1.31 cgd * Select something from a free list.
668 1.31 cgd * Preference is to AGE list, then LRU list.
669 1.31 cgd */
670 1.31 cgd struct buf *
671 1.31 cgd getnewbuf(slpflag, slptimeo)
672 1.31 cgd int slpflag, slptimeo;
673 1.31 cgd {
674 1.31 cgd register struct buf *bp;
675 1.31 cgd int s;
676 1.31 cgd
677 1.31 cgd start:
678 1.31 cgd s = splbio();
679 1.31 cgd if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
680 1.31 cgd (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
681 1.31 cgd bremfree(bp);
682 1.31 cgd } else {
683 1.31 cgd /* wait for a free buffer of any kind */
684 1.31 cgd needbuffer = 1;
685 1.31 cgd tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
686 1.31 cgd splx(s);
687 1.31 cgd return (0);
688 1.31 cgd }
689 1.31 cgd
690 1.31 cgd /* Buffer is no longer on free lists. */
691 1.31 cgd SET(bp->b_flags, B_BUSY);
692 1.31 cgd splx(s);
693 1.31 cgd
694 1.31 cgd /* If buffer was a delayed write, start it, and go back to the top. */
695 1.31 cgd if (ISSET(bp->b_flags, B_DELWRI)) {
696 1.31 cgd bawrite (bp);
697 1.31 cgd goto start;
698 1.31 cgd }
699 1.31 cgd
700 1.31 cgd /* disassociate us from our vnode, if we had one... */
701 1.31 cgd s = splbio();
702 1.31 cgd if (bp->b_vp)
703 1.31 cgd brelvp(bp);
704 1.31 cgd splx(s);
705 1.31 cgd
706 1.31 cgd /* clear out various other fields */
707 1.31 cgd bp->b_flags = B_BUSY;
708 1.31 cgd bp->b_dev = NODEV;
709 1.31 cgd bp->b_blkno = bp->b_lblkno = 0;
710 1.31 cgd bp->b_iodone = 0;
711 1.31 cgd bp->b_error = 0;
712 1.31 cgd bp->b_resid = 0;
713 1.31 cgd bp->b_bcount = 0;
714 1.31 cgd bp->b_dirtyoff = bp->b_dirtyend = 0;
715 1.31 cgd bp->b_validoff = bp->b_validend = 0;
716 1.31 cgd
717 1.31 cgd /* nuke any credentials we were holding */
718 1.31 cgd if (bp->b_rcred != NOCRED) {
719 1.31 cgd crfree(bp->b_rcred);
720 1.31 cgd bp->b_rcred = NOCRED;
721 1.31 cgd }
722 1.31 cgd if (bp->b_wcred != NOCRED) {
723 1.31 cgd crfree(bp->b_wcred);
724 1.31 cgd bp->b_wcred = NOCRED;
725 1.31 cgd }
726 1.31 cgd
727 1.34 mycroft bremhash(bp);
728 1.31 cgd return (bp);
729 1.31 cgd }
730 1.31 cgd
731 1.31 cgd /*
732 1.31 cgd * Wait for operations on the buffer to complete.
733 1.31 cgd * When they do, extract and return the I/O's error value.
734 1.31 cgd */
735 1.31 cgd int
736 1.31 cgd biowait(bp)
737 1.31 cgd struct buf *bp;
738 1.31 cgd {
739 1.31 cgd int s;
740 1.31 cgd
741 1.31 cgd s = splbio();
742 1.31 cgd while (!ISSET(bp->b_flags, B_DONE))
743 1.31 cgd tsleep(bp, PRIBIO + 1, "biowait", 0);
744 1.31 cgd splx(s);
745 1.31 cgd
746 1.31 cgd /* check for interruption of I/O (e.g. via NFS), then errors. */
747 1.31 cgd if (ISSET(bp->b_flags, B_EINTR)) {
748 1.31 cgd CLR(bp->b_flags, B_EINTR);
749 1.31 cgd return (EINTR);
750 1.31 cgd } else if (ISSET(bp->b_flags, B_ERROR))
751 1.31 cgd return (bp->b_error ? bp->b_error : EIO);
752 1.31 cgd else
753 1.31 cgd return (0);
754 1.31 cgd }
755 1.31 cgd
756 1.31 cgd /*
757 1.31 cgd * Mark I/O complete on a buffer.
758 1.31 cgd *
759 1.31 cgd * If a callback has been requested, e.g. the pageout
760 1.31 cgd * daemon, do so. Otherwise, awaken waiting processes.
761 1.31 cgd *
762 1.31 cgd * [ Leffler, et al., says on p.247:
763 1.31 cgd * "This routine wakes up the blocked process, frees the buffer
764 1.31 cgd * for an asynchronous write, or, for a request by the pagedaemon
765 1.31 cgd * process, invokes a procedure specified in the buffer structure" ]
766 1.31 cgd *
767 1.31 cgd * In real life, the pagedaemon (or other system processes) wants
768 1.31 cgd * to do async stuff to, and doesn't want the buffer brelse()'d.
769 1.31 cgd * (for swap pager, that puts swap buffers on the free lists (!!!),
770 1.31 cgd * for the vn device, that puts malloc'd buffers on the free lists!)
771 1.31 cgd */
772 1.31 cgd void
773 1.31 cgd biodone(bp)
774 1.31 cgd struct buf *bp;
775 1.31 cgd {
776 1.31 cgd if (ISSET(bp->b_flags, B_DONE))
777 1.31 cgd panic("biodone already");
778 1.31 cgd SET(bp->b_flags, B_DONE); /* note that it's done */
779 1.31 cgd
780 1.31 cgd if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
781 1.31 cgd vwakeup(bp);
782 1.31 cgd
783 1.31 cgd if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */
784 1.31 cgd CLR(bp->b_flags, B_CALL); /* but note callout done */
785 1.31 cgd (*bp->b_iodone)(bp);
786 1.31 cgd } else if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release it */
787 1.31 cgd brelse(bp);
788 1.31 cgd else { /* or just wakeup the buffer */
789 1.31 cgd CLR(bp->b_flags, B_WANTED);
790 1.31 cgd wakeup(bp);
791 1.31 cgd }
792 1.31 cgd }
793 1.31 cgd
794 1.31 cgd /*
795 1.31 cgd * Return a count of buffers on the "locked" queue.
796 1.31 cgd */
797 1.31 cgd int
798 1.31 cgd count_lock_queue()
799 1.31 cgd {
800 1.31 cgd register struct buf *bp;
801 1.31 cgd register int n = 0;
802 1.31 cgd
803 1.31 cgd for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
804 1.31 cgd bp = bp->b_freelist.tqe_next)
805 1.31 cgd n++;
806 1.31 cgd return (n);
807 1.31 cgd }
808 1.31 cgd
809 1.36 cgd #ifdef DEBUG
810 1.31 cgd /*
811 1.31 cgd * Print out statistics on the current allocation of the buffer pool.
812 1.31 cgd * Can be enabled to print out on every ``sync'' by setting "syncprt"
813 1.31 cgd * in vfs_syscalls.c using sysctl.
814 1.31 cgd */
815 1.31 cgd void
816 1.31 cgd vfs_bufstats()
817 1.31 cgd {
818 1.31 cgd int s, i, j, count;
819 1.31 cgd register struct buf *bp;
820 1.31 cgd register struct bqueues *dp;
821 1.31 cgd int counts[MAXBSIZE/CLBYTES+1];
822 1.31 cgd static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
823 1.31 cgd
824 1.31 cgd for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
825 1.31 cgd count = 0;
826 1.31 cgd for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
827 1.31 cgd counts[j] = 0;
828 1.31 cgd s = splbio();
829 1.31 cgd for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
830 1.31 cgd counts[bp->b_bufsize/CLBYTES]++;
831 1.31 cgd count++;
832 1.31 cgd }
833 1.31 cgd splx(s);
834 1.31 cgd printf("%s: total-%d", bname[i], count);
835 1.31 cgd for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
836 1.31 cgd if (counts[j] != 0)
837 1.31 cgd printf(", %d-%d", j * CLBYTES, counts[j]);
838 1.31 cgd printf("\n");
839 1.31 cgd }
840 1.31 cgd }
841 1.36 cgd #endif /* DEBUG */
842